summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/mediatek
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /drivers/net/wireless/mediatek
parentInitial commit. (diff)
downloadlinux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz
linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--drivers/net/wireless/mediatek/Kconfig16
-rw-r--r--drivers/net/wireless/mediatek/Makefile3
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig31
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile33
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c300
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c115
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c680
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h51
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c122
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c1206
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c52
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c100
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h1080
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/Kconfig12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/Makefile7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/beacon.c188
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/core.c67
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c118
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c250
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c186
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h91
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c587
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c1862
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.h242
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c730
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c479
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.h103
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h267
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/pci.c83
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/regs.h780
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/soc.c82
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Kconfig55
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Makefile20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c417
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c301
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c353
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h116
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c493
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c2336
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.h438
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c1268
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c3997
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.h1060
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c274
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h688
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h56
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c199
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c170
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c184
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/regs.h582
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c496
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.h115
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c172
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c336
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/soc.c75
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/testmode.c362
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/trace.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c234
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c98
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c402
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/Makefile11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c357
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h41
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c263
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h85
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals_init.h159
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h633
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c66
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h46
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h60
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c242
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c132
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c1213
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.h90
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c349
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c174
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h273
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c213
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c140
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c895
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h132
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dma.h65
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c147
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h187
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c1218
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h208
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c165
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h98
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c569
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.c204
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.h49
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_regs.h706
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_trace.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_trace.h87
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c183
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb.h25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c276
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c309
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c697
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/Makefile15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c521
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h83
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c175
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mac.c46
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mac.h24
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c106
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h68
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h78
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h43
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c178
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c319
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c159
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c197
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c311
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c349
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c149
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c248
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c174
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c124
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c255
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c201
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Kconfig13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Makefile6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c473
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c376
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c250
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h125
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c723
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c1526
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.h329
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c845
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c3409
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h1067
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h466
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c192
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h395
-rw-r--r--drivers/net/wireless/mediatek/mt76/pci.c47
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c363
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.c503
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.h156
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.h111
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c628
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c1147
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.h86
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c141
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.h124
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/Kconfig7
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/Makefile8
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/core.c70
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/debugfs.c140
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c540
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.h117
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/eeprom.c390
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/eeprom.h143
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/init.c630
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/initvals.h157
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/initvals_phy.h283
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mac.c593
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mac.h171
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c426
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mcu.c535
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mcu.h86
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mt7601u.h392
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/phy.c1252
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/regs.h627
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/trace.c13
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/trace.h392
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/tx.c315
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/usb.c381
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/usb.h71
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/util.c34
175 files changed, 61193 insertions, 0 deletions
diff --git a/drivers/net/wireless/mediatek/Kconfig b/drivers/net/wireless/mediatek/Kconfig
new file mode 100644
index 000000000..bd4db1282
--- /dev/null
+++ b/drivers/net/wireless/mediatek/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config WLAN_VENDOR_MEDIATEK
+ bool "MediaTek devices"
+ default y
+ help
+ If you have a wireless card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all the
+ questions about these cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if WLAN_VENDOR_MEDIATEK
+source "drivers/net/wireless/mediatek/mt7601u/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/Kconfig"
+endif # WLAN_VENDOR_MEDIATEK
diff --git a/drivers/net/wireless/mediatek/Makefile b/drivers/net/wireless/mediatek/Makefile
new file mode 100644
index 000000000..806172659
--- /dev/null
+++ b/drivers/net/wireless/mediatek/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MT7601U) += mt7601u/
+obj-$(CONFIG_MT76_CORE) += mt76/
diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig
new file mode 100644
index 000000000..31015d2a8
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/Kconfig
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config MT76_CORE
+ tristate
+
+config MT76_LEDS
+ bool
+ depends on MT76_CORE
+ depends on LEDS_CLASS=y || MT76_CORE=LEDS_CLASS
+ default y
+
+config MT76_USB
+ tristate
+ depends on MT76_CORE
+
+config MT76_SDIO
+ tristate
+ depends on MT76_CORE
+
+config MT76x02_LIB
+ tristate
+ select MT76_CORE
+
+config MT76x02_USB
+ tristate
+ select MT76_USB
+
+source "drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/mt7603/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/mt7615/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/mt7915/Kconfig"
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
new file mode 100644
index 000000000..e53584db0
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MT76_CORE) += mt76.o
+obj-$(CONFIG_MT76_USB) += mt76-usb.o
+obj-$(CONFIG_MT76_SDIO) += mt76-sdio.o
+obj-$(CONFIG_MT76x02_LIB) += mt76x02-lib.o
+obj-$(CONFIG_MT76x02_USB) += mt76x02-usb.o
+
+mt76-y := \
+ mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \
+ tx.o agg-rx.o mcu.o
+
+mt76-$(CONFIG_PCI) += pci.o
+mt76-$(CONFIG_NL80211_TESTMODE) += testmode.o
+
+mt76-usb-y := usb.o usb_trace.o
+mt76-sdio-y := sdio.o
+
+CFLAGS_trace.o := -I$(src)
+CFLAGS_usb_trace.o := -I$(src)
+CFLAGS_mt76x02_trace.o := -I$(src)
+
+mt76x02-lib-y := mt76x02_util.o mt76x02_mac.o mt76x02_mcu.o \
+ mt76x02_eeprom.o mt76x02_phy.o mt76x02_mmio.o \
+ mt76x02_txrx.o mt76x02_trace.o mt76x02_debugfs.o \
+ mt76x02_dfs.o mt76x02_beacon.o
+
+mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o
+
+obj-$(CONFIG_MT76x0_COMMON) += mt76x0/
+obj-$(CONFIG_MT76x2_COMMON) += mt76x2/
+obj-$(CONFIG_MT7603E) += mt7603/
+obj-$(CONFIG_MT7615_COMMON) += mt7615/
+obj-$(CONFIG_MT7915E) += mt7915/
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
new file mode 100644
index 000000000..df25c00d9
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Felix Fietkau <nbd@nbd.name>
+ */
+#include "mt76.h"
+
+static unsigned long mt76_aggr_tid_to_timeo(u8 tidno)
+{
+ /* Currently voice traffic (AC_VO) always runs without aggregation,
+ * no special handling is needed. AC_BE/AC_BK use tids 0-3. Just check
+ * for non AC_BK/AC_BE and set smaller timeout for it. */
+ return HZ / (tidno >= 4 ? 25 : 10);
+}
+
+static void
+mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx)
+{
+ struct sk_buff *skb;
+
+ tid->head = ieee80211_sn_inc(tid->head);
+
+ skb = tid->reorder_buf[idx];
+ if (!skb)
+ return;
+
+ tid->reorder_buf[idx] = NULL;
+ tid->nframes--;
+ __skb_queue_tail(frames, skb);
+}
+
+static void
+mt76_rx_aggr_release_frames(struct mt76_rx_tid *tid,
+ struct sk_buff_head *frames,
+ u16 head)
+{
+ int idx;
+
+ while (ieee80211_sn_less(tid->head, head)) {
+ idx = tid->head % tid->size;
+ mt76_aggr_release(tid, frames, idx);
+ }
+}
+
+static void
+mt76_rx_aggr_release_head(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
+{
+ int idx = tid->head % tid->size;
+
+ while (tid->reorder_buf[idx]) {
+ mt76_aggr_release(tid, frames, idx);
+ idx = tid->head % tid->size;
+ }
+}
+
+static void
+mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
+{
+ struct mt76_rx_status *status;
+ struct sk_buff *skb;
+ int start, idx, nframes;
+
+ if (!tid->nframes)
+ return;
+
+ mt76_rx_aggr_release_head(tid, frames);
+
+ start = tid->head % tid->size;
+ nframes = tid->nframes;
+
+ for (idx = (tid->head + 1) % tid->size;
+ idx != start && nframes;
+ idx = (idx + 1) % tid->size) {
+ skb = tid->reorder_buf[idx];
+ if (!skb)
+ continue;
+
+ nframes--;
+ status = (struct mt76_rx_status *)skb->cb;
+ if (!time_after(jiffies,
+ status->reorder_time +
+ mt76_aggr_tid_to_timeo(tid->num)))
+ continue;
+
+ mt76_rx_aggr_release_frames(tid, frames, status->seqno);
+ }
+
+ mt76_rx_aggr_release_head(tid, frames);
+}
+
+static void
+mt76_rx_aggr_reorder_work(struct work_struct *work)
+{
+ struct mt76_rx_tid *tid = container_of(work, struct mt76_rx_tid,
+ reorder_work.work);
+ struct mt76_dev *dev = tid->dev;
+ struct sk_buff_head frames;
+ int nframes;
+
+ __skb_queue_head_init(&frames);
+
+ local_bh_disable();
+ rcu_read_lock();
+
+ spin_lock(&tid->lock);
+ mt76_rx_aggr_check_release(tid, &frames);
+ nframes = tid->nframes;
+ spin_unlock(&tid->lock);
+
+ if (nframes)
+ ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
+ mt76_aggr_tid_to_timeo(tid->num));
+ mt76_rx_complete(dev, &frames, NULL);
+
+ rcu_read_unlock();
+ local_bh_enable();
+}
+
+static void
+mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct ieee80211_bar *bar = mt76_skb_get_hdr(skb);
+ struct mt76_wcid *wcid = status->wcid;
+ struct mt76_rx_tid *tid;
+ u16 seqno;
+
+ if (!ieee80211_is_ctl(bar->frame_control))
+ return;
+
+ if (!ieee80211_is_back_req(bar->frame_control))
+ return;
+
+ status->tid = le16_to_cpu(bar->control) >> 12;
+ seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
+ tid = rcu_dereference(wcid->aggr[status->tid]);
+ if (!tid)
+ return;
+
+ spin_lock_bh(&tid->lock);
+ if (!tid->stopped) {
+ mt76_rx_aggr_release_frames(tid, frames, seqno);
+ mt76_rx_aggr_release_head(tid, frames);
+ }
+ spin_unlock_bh(&tid->lock);
+}
+
+void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
+ struct mt76_wcid *wcid = status->wcid;
+ struct ieee80211_sta *sta;
+ struct mt76_rx_tid *tid;
+ bool sn_less;
+ u16 seqno, head, size, idx;
+ u8 ackp;
+
+ __skb_queue_tail(frames, skb);
+
+ sta = wcid_to_sta(wcid);
+ if (!sta)
+ return;
+
+ if (!status->aggr) {
+ mt76_rx_aggr_check_ctl(skb, frames);
+ return;
+ }
+
+ /* not part of a BA session */
+ ackp = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_ACK_POLICY_MASK;
+ if (ackp != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
+ ackp != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
+ return;
+
+ tid = rcu_dereference(wcid->aggr[status->tid]);
+ if (!tid)
+ return;
+
+ status->flag |= RX_FLAG_DUP_VALIDATED;
+ spin_lock_bh(&tid->lock);
+
+ if (tid->stopped)
+ goto out;
+
+ head = tid->head;
+ seqno = status->seqno;
+ size = tid->size;
+ sn_less = ieee80211_sn_less(seqno, head);
+
+ if (!tid->started) {
+ if (sn_less)
+ goto out;
+
+ tid->started = true;
+ }
+
+ if (sn_less) {
+ __skb_unlink(skb, frames);
+ dev_kfree_skb(skb);
+ goto out;
+ }
+
+ if (seqno == head) {
+ tid->head = ieee80211_sn_inc(head);
+ if (tid->nframes)
+ mt76_rx_aggr_release_head(tid, frames);
+ goto out;
+ }
+
+ __skb_unlink(skb, frames);
+
+ /*
+ * Frame sequence number exceeds buffering window, free up some space
+ * by releasing previous frames
+ */
+ if (!ieee80211_sn_less(seqno, head + size)) {
+ head = ieee80211_sn_inc(ieee80211_sn_sub(seqno, size));
+ mt76_rx_aggr_release_frames(tid, frames, head);
+ }
+
+ idx = seqno % size;
+
+ /* Discard if the current slot is already in use */
+ if (tid->reorder_buf[idx]) {
+ dev_kfree_skb(skb);
+ goto out;
+ }
+
+ status->reorder_time = jiffies;
+ tid->reorder_buf[idx] = skb;
+ tid->nframes++;
+ mt76_rx_aggr_release_head(tid, frames);
+
+ ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
+ mt76_aggr_tid_to_timeo(tid->num));
+
+out:
+ spin_unlock_bh(&tid->lock);
+}
+
+int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno,
+ u16 ssn, u16 size)
+{
+ struct mt76_rx_tid *tid;
+
+ mt76_rx_aggr_stop(dev, wcid, tidno);
+
+ tid = kzalloc(struct_size(tid, reorder_buf, size), GFP_KERNEL);
+ if (!tid)
+ return -ENOMEM;
+
+ tid->dev = dev;
+ tid->head = ssn;
+ tid->size = size;
+ tid->num = tidno;
+ INIT_DELAYED_WORK(&tid->reorder_work, mt76_rx_aggr_reorder_work);
+ spin_lock_init(&tid->lock);
+
+ rcu_assign_pointer(wcid->aggr[tidno], tid);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_rx_aggr_start);
+
+static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
+{
+ u16 size = tid->size;
+ int i;
+
+ spin_lock_bh(&tid->lock);
+
+ tid->stopped = true;
+ for (i = 0; tid->nframes && i < size; i++) {
+ struct sk_buff *skb = tid->reorder_buf[i];
+
+ if (!skb)
+ continue;
+
+ tid->reorder_buf[i] = NULL;
+ tid->nframes--;
+ dev_kfree_skb(skb);
+ }
+
+ spin_unlock_bh(&tid->lock);
+
+ cancel_delayed_work_sync(&tid->reorder_work);
+}
+
+void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno)
+{
+ struct mt76_rx_tid *tid = NULL;
+
+ tid = rcu_replace_pointer(wcid->aggr[tidno], tid,
+ lockdep_is_held(&dev->mutex));
+ if (tid) {
+ mt76_rx_aggr_shutdown(dev, tid);
+ kfree_rcu(tid, rcu_head);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_rx_aggr_stop);
diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c
new file mode 100644
index 000000000..52f583cb1
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/debugfs.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+#include "mt76.h"
+
+static int
+mt76_reg_set(void *data, u64 val)
+{
+ struct mt76_dev *dev = data;
+
+ __mt76_wr(dev, dev->debugfs_reg, val);
+ return 0;
+}
+
+static int
+mt76_reg_get(void *data, u64 *val)
+{
+ struct mt76_dev *dev = data;
+
+ *val = __mt76_rr(dev, dev->debugfs_reg);
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set,
+ "0x%08llx\n");
+
+int mt76_queues_read(struct seq_file *s, void *data)
+{
+ struct mt76_dev *dev = dev_get_drvdata(s->private);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
+ struct mt76_queue *q = dev->q_tx[i];
+
+ if (!q)
+ continue;
+
+ seq_printf(s,
+ "%d: queued=%d head=%d tail=%d\n",
+ i, q->queued, q->head, q->tail);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_queues_read);
+
+static int mt76_rx_queues_read(struct seq_file *s, void *data)
+{
+ struct mt76_dev *dev = dev_get_drvdata(s->private);
+ int i, queued;
+
+ mt76_for_each_q_rx(dev, i) {
+ struct mt76_queue *q = &dev->q_rx[i];
+
+ queued = mt76_is_usb(dev) ? q->ndesc - q->queued : q->queued;
+ seq_printf(s, "%d: queued=%d head=%d tail=%d\n",
+ i, queued, q->head, q->tail);
+ }
+
+ return 0;
+}
+
+void mt76_seq_puts_array(struct seq_file *file, const char *str,
+ s8 *val, int len)
+{
+ int i;
+
+ seq_printf(file, "%10s:", str);
+ for (i = 0; i < len; i++)
+ seq_printf(file, " %2d", val[i]);
+ seq_puts(file, "\n");
+}
+EXPORT_SYMBOL_GPL(mt76_seq_puts_array);
+
+static int mt76_read_rate_txpower(struct seq_file *s, void *data)
+{
+ struct mt76_dev *dev = dev_get_drvdata(s->private);
+
+ mt76_seq_puts_array(s, "CCK", dev->rate_power.cck,
+ ARRAY_SIZE(dev->rate_power.cck));
+ mt76_seq_puts_array(s, "OFDM", dev->rate_power.ofdm,
+ ARRAY_SIZE(dev->rate_power.ofdm));
+ mt76_seq_puts_array(s, "STBC", dev->rate_power.stbc,
+ ARRAY_SIZE(dev->rate_power.stbc));
+ mt76_seq_puts_array(s, "HT", dev->rate_power.ht,
+ ARRAY_SIZE(dev->rate_power.ht));
+ mt76_seq_puts_array(s, "VHT", dev->rate_power.vht,
+ ARRAY_SIZE(dev->rate_power.vht));
+ return 0;
+}
+
+struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
+{
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("mt76", dev->hw->wiphy->debugfsdir);
+ if (!dir)
+ return NULL;
+
+ debugfs_create_u8("led_pin", 0600, dir, &dev->led_pin);
+ debugfs_create_u32("regidx", 0600, dir, &dev->debugfs_reg);
+ debugfs_create_file_unsafe("regval", 0600, dir, dev,
+ &fops_regval);
+ debugfs_create_blob("eeprom", 0400, dir, &dev->eeprom);
+ if (dev->otp.data)
+ debugfs_create_blob("otp", 0400, dir, &dev->otp);
+ debugfs_create_devm_seqfile(dev->dev, "rate_txpower", dir,
+ mt76_read_rate_txpower);
+ debugfs_create_devm_seqfile(dev->dev, "rx-queues", dir,
+ mt76_rx_queues_read);
+
+ return dir;
+}
+EXPORT_SYMBOL_GPL(mt76_register_debugfs);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
new file mode 100644
index 000000000..7991705e9
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -0,0 +1,680 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/dma-mapping.h>
+#include "mt76.h"
+#include "dma.h"
+
+static struct mt76_txwi_cache *
+mt76_alloc_txwi(struct mt76_dev *dev)
+{
+ struct mt76_txwi_cache *t;
+ dma_addr_t addr;
+ u8 *txwi;
+ int size;
+
+ size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
+ txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
+ if (!txwi)
+ return NULL;
+
+ addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size,
+ DMA_TO_DEVICE);
+ t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
+ t->dma_addr = addr;
+
+ return t;
+}
+
+static struct mt76_txwi_cache *
+__mt76_get_txwi(struct mt76_dev *dev)
+{
+ struct mt76_txwi_cache *t = NULL;
+
+ spin_lock(&dev->lock);
+ if (!list_empty(&dev->txwi_cache)) {
+ t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
+ list);
+ list_del(&t->list);
+ }
+ spin_unlock(&dev->lock);
+
+ return t;
+}
+
+static struct mt76_txwi_cache *
+mt76_get_txwi(struct mt76_dev *dev)
+{
+ struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
+
+ if (t)
+ return t;
+
+ return mt76_alloc_txwi(dev);
+}
+
+void
+mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+{
+ if (!t)
+ return;
+
+ spin_lock(&dev->lock);
+ list_add(&t->list, &dev->txwi_cache);
+ spin_unlock(&dev->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_put_txwi);
+
+static void
+mt76_free_pending_txwi(struct mt76_dev *dev)
+{
+ struct mt76_txwi_cache *t;
+
+ local_bh_disable();
+ while ((t = __mt76_get_txwi(dev)) != NULL)
+ dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
+ DMA_TO_DEVICE);
+ local_bh_enable();
+}
+
+static int
+mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
+ int idx, int n_desc, int bufsize,
+ u32 ring_base)
+{
+ int size;
+ int i;
+
+ spin_lock_init(&q->lock);
+
+ q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
+ q->ndesc = n_desc;
+ q->buf_size = bufsize;
+ q->hw_idx = idx;
+
+ size = q->ndesc * sizeof(struct mt76_desc);
+ q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
+ if (!q->desc)
+ return -ENOMEM;
+
+ size = q->ndesc * sizeof(*q->entry);
+ q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ /* clear descriptors */
+ for (i = 0; i < q->ndesc; i++)
+ q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+
+ writel(q->desc_dma, &q->regs->desc_base);
+ writel(0, &q->regs->cpu_idx);
+ writel(0, &q->regs->dma_idx);
+ writel(q->ndesc, &q->regs->ring_size);
+
+ return 0;
+}
+
+static int
+mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
+ struct mt76_queue_buf *buf, int nbufs, u32 info,
+ struct sk_buff *skb, void *txwi)
+{
+ struct mt76_queue_entry *entry;
+ struct mt76_desc *desc;
+ u32 ctrl;
+ int i, idx = -1;
+
+ if (txwi) {
+ q->entry[q->head].txwi = DMA_DUMMY_DATA;
+ q->entry[q->head].skip_buf0 = true;
+ }
+
+ for (i = 0; i < nbufs; i += 2, buf += 2) {
+ u32 buf0 = buf[0].addr, buf1 = 0;
+
+ idx = q->head;
+ q->head = (q->head + 1) % q->ndesc;
+
+ desc = &q->desc[idx];
+ entry = &q->entry[idx];
+
+ if (buf[0].skip_unmap)
+ entry->skip_buf0 = true;
+ entry->skip_buf1 = i == nbufs - 1;
+
+ entry->dma_addr[0] = buf[0].addr;
+ entry->dma_len[0] = buf[0].len;
+
+ ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
+ if (i < nbufs - 1) {
+ entry->dma_addr[1] = buf[1].addr;
+ entry->dma_len[1] = buf[1].len;
+ buf1 = buf[1].addr;
+ ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
+ if (buf[1].skip_unmap)
+ entry->skip_buf1 = true;
+ }
+
+ if (i == nbufs - 1)
+ ctrl |= MT_DMA_CTL_LAST_SEC0;
+ else if (i == nbufs - 2)
+ ctrl |= MT_DMA_CTL_LAST_SEC1;
+
+ WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
+ WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
+ WRITE_ONCE(desc->info, cpu_to_le32(info));
+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
+
+ q->queued++;
+ }
+
+ q->entry[idx].txwi = txwi;
+ q->entry[idx].skb = skb;
+
+ return idx;
+}
+
+static void
+mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+ struct mt76_queue_entry *prev_e)
+{
+ struct mt76_queue_entry *e = &q->entry[idx];
+
+ if (!e->skip_buf0)
+ dma_unmap_single(dev->dev, e->dma_addr[0], e->dma_len[0],
+ DMA_TO_DEVICE);
+
+ if (!e->skip_buf1)
+ dma_unmap_single(dev->dev, e->dma_addr[1], e->dma_len[1],
+ DMA_TO_DEVICE);
+
+ if (e->txwi == DMA_DUMMY_DATA)
+ e->txwi = NULL;
+
+ if (e->skb == DMA_DUMMY_DATA)
+ e->skb = NULL;
+
+ *prev_e = *e;
+ memset(e, 0, sizeof(*e));
+}
+
+static void
+mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ writel(q->desc_dma, &q->regs->desc_base);
+ writel(q->ndesc, &q->regs->ring_size);
+ q->head = readl(&q->regs->dma_idx);
+ q->tail = q->head;
+}
+
+static void
+mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ wmb();
+ writel(q->head, &q->regs->cpu_idx);
+}
+
+static void
+mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
+{
+ struct mt76_queue *q = dev->q_tx[qid];
+ struct mt76_queue_entry entry;
+ bool wake = false;
+ int last;
+
+ if (!q)
+ return;
+
+ if (flush)
+ last = -1;
+ else
+ last = readl(&q->regs->dma_idx);
+
+ while (q->queued > 0 && q->tail != last) {
+ mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
+ mt76_queue_tx_complete(dev, q, &entry);
+
+ if (entry.txwi) {
+ if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
+ mt76_put_txwi(dev, entry.txwi);
+ wake = !flush;
+ }
+
+ if (!flush && q->tail == last)
+ last = readl(&q->regs->dma_idx);
+
+ }
+
+ if (flush) {
+ spin_lock_bh(&q->lock);
+ mt76_dma_sync_idx(dev, q);
+ mt76_dma_kick_queue(dev, q);
+ spin_unlock_bh(&q->lock);
+ }
+
+ wake = wake && q->stopped &&
+ qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+ if (wake)
+ q->stopped = false;
+
+ if (!q->queued)
+ wake_up(&dev->tx_wait);
+
+ if (wake)
+ ieee80211_wake_queue(dev->hw, qid);
+}
+
+static void *
+mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+ int *len, u32 *info, bool *more)
+{
+ struct mt76_queue_entry *e = &q->entry[idx];
+ struct mt76_desc *desc = &q->desc[idx];
+ dma_addr_t buf_addr;
+ void *buf = e->buf;
+ int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
+
+ buf_addr = e->dma_addr[0];
+ if (len) {
+ u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
+ *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
+ *more = !(ctl & MT_DMA_CTL_LAST_SEC0);
+ }
+
+ if (info)
+ *info = le32_to_cpu(desc->info);
+
+ dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE);
+ e->buf = NULL;
+
+ return buf;
+}
+
+static void *
+mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
+ int *len, u32 *info, bool *more)
+{
+ int idx = q->tail;
+
+ *more = false;
+ if (!q->queued)
+ return NULL;
+
+ if (flush)
+ q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+ else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
+ return NULL;
+
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
+
+ return mt76_dma_get_buf(dev, q, idx, len, info, more);
+}
+
+static int
+mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
+ struct sk_buff *skb, u32 tx_info)
+{
+ struct mt76_queue *q = dev->q_tx[qid];
+ struct mt76_queue_buf buf = {};
+ dma_addr_t addr;
+
+ if (q->queued + 1 >= q->ndesc - 1)
+ goto error;
+
+ addr = dma_map_single(dev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev, addr)))
+ goto error;
+
+ buf.addr = addr;
+ buf.len = skb->len;
+
+ spin_lock_bh(&q->lock);
+ mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
+ mt76_dma_kick_queue(dev, q);
+ spin_unlock_bh(&q->lock);
+
+ return 0;
+
+error:
+ dev_kfree_skb(skb);
+ return -ENOMEM;
+}
+
+static int
+mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta)
+{
+ struct mt76_queue *q = dev->q_tx[qid];
+ struct mt76_tx_info tx_info = {
+ .skb = skb,
+ };
+ struct ieee80211_hw *hw;
+ int len, n = 0, ret = -ENOMEM;
+ struct mt76_txwi_cache *t;
+ struct sk_buff *iter;
+ dma_addr_t addr;
+ u8 *txwi;
+
+ t = mt76_get_txwi(dev);
+ if (!t) {
+ hw = mt76_tx_status_get_hw(dev, skb);
+ ieee80211_free_txskb(hw, skb);
+ return -ENOMEM;
+ }
+ txwi = mt76_get_txwi_ptr(dev, t);
+
+ skb->prev = skb->next = NULL;
+ if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS)
+ mt76_insert_hdr_pad(skb);
+
+ len = skb_headlen(skb);
+ addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev, addr)))
+ goto free;
+
+ tx_info.buf[n].addr = t->dma_addr;
+ tx_info.buf[n++].len = dev->drv->txwi_size;
+ tx_info.buf[n].addr = addr;
+ tx_info.buf[n++].len = len;
+
+ skb_walk_frags(skb, iter) {
+ if (n == ARRAY_SIZE(tx_info.buf))
+ goto unmap;
+
+ addr = dma_map_single(dev->dev, iter->data, iter->len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev, addr)))
+ goto unmap;
+
+ tx_info.buf[n].addr = addr;
+ tx_info.buf[n++].len = iter->len;
+ }
+ tx_info.nbuf = n;
+
+ if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
+ DMA_TO_DEVICE);
+ ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
+ dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
+ DMA_TO_DEVICE);
+ if (ret < 0)
+ goto unmap;
+
+ return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
+ tx_info.info, tx_info.skb, t);
+
+unmap:
+ for (n--; n > 0; n--)
+ dma_unmap_single(dev->dev, tx_info.buf[n].addr,
+ tx_info.buf[n].len, DMA_TO_DEVICE);
+
+free:
+#ifdef CONFIG_NL80211_TESTMODE
+ /* fix tx_done accounting on queue overflow */
+ if (tx_info.skb == dev->test.tx_skb)
+ dev->test.tx_done--;
+#endif
+
+ dev_kfree_skb(tx_info.skb);
+ mt76_put_txwi(dev, t);
+ return ret;
+}
+
+static int
+mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ dma_addr_t addr;
+ void *buf;
+ int frames = 0;
+ int len = SKB_WITH_OVERHEAD(q->buf_size);
+ int offset = q->buf_offset;
+
+ spin_lock_bh(&q->lock);
+
+ while (q->queued < q->ndesc - 1) {
+ struct mt76_queue_buf qbuf;
+
+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
+ if (!buf)
+ break;
+
+ addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev, addr))) {
+ skb_free_frag(buf);
+ break;
+ }
+
+ qbuf.addr = addr + offset;
+ qbuf.len = len - offset;
+ qbuf.skip_unmap = false;
+ mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
+ frames++;
+ }
+
+ if (frames)
+ mt76_dma_kick_queue(dev, q);
+
+ spin_unlock_bh(&q->lock);
+
+ return frames;
+}
+
+static void
+mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ struct page *page;
+ void *buf;
+ bool more;
+
+ spin_lock_bh(&q->lock);
+
+ do {
+ buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
+ if (!buf)
+ break;
+
+ skb_free_frag(buf);
+ } while (1);
+
+ if (q->rx_head) {
+ dev_kfree_skb(q->rx_head);
+ q->rx_head = NULL;
+ }
+
+ spin_unlock_bh(&q->lock);
+
+ if (!q->rx_page.va)
+ return;
+
+ page = virt_to_page(q->rx_page.va);
+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
+ memset(&q->rx_page, 0, sizeof(q->rx_page));
+}
+
+static void
+mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
+{
+ struct mt76_queue *q = &dev->q_rx[qid];
+ int i;
+
+ for (i = 0; i < q->ndesc; i++)
+ q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+
+ mt76_dma_rx_cleanup(dev, q);
+ mt76_dma_sync_idx(dev, q);
+ mt76_dma_rx_fill(dev, q);
+}
+
+static void
+mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
+ int len, bool more)
+{
+ struct sk_buff *skb = q->rx_head;
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ int nr_frags = shinfo->nr_frags;
+
+ if (nr_frags < ARRAY_SIZE(shinfo->frags)) {
+ struct page *page = virt_to_head_page(data);
+ int offset = data - page_address(page) + q->buf_offset;
+
+ skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
+ } else {
+ skb_free_frag(data);
+ }
+
+ if (more)
+ return;
+
+ q->rx_head = NULL;
+ if (nr_frags < ARRAY_SIZE(shinfo->frags))
+ dev->drv->rx_skb(dev, q - dev->q_rx, skb);
+ else
+ dev_kfree_skb(skb);
+}
+
+static int
+mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
+{
+ int len, data_len, done = 0;
+ struct sk_buff *skb;
+ unsigned char *data;
+ bool more;
+
+ while (done < budget) {
+ u32 info;
+
+ data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
+ if (!data)
+ break;
+
+ if (q->rx_head)
+ data_len = q->buf_size;
+ else
+ data_len = SKB_WITH_OVERHEAD(q->buf_size);
+
+ if (data_len < len + q->buf_offset) {
+ dev_kfree_skb(q->rx_head);
+ q->rx_head = NULL;
+
+ skb_free_frag(data);
+ continue;
+ }
+
+ if (q->rx_head) {
+ mt76_add_fragment(dev, q, data, len, more);
+ continue;
+ }
+
+ skb = build_skb(data, q->buf_size);
+ if (!skb) {
+ skb_free_frag(data);
+ continue;
+ }
+ skb_reserve(skb, q->buf_offset);
+
+ if (q == &dev->q_rx[MT_RXQ_MCU]) {
+ u32 *rxfce = (u32 *)skb->cb;
+ *rxfce = info;
+ }
+
+ __skb_put(skb, len);
+ done++;
+
+ if (more) {
+ q->rx_head = skb;
+ continue;
+ }
+
+ dev->drv->rx_skb(dev, q - dev->q_rx, skb);
+ }
+
+ mt76_dma_rx_fill(dev, q);
+ return done;
+}
+
+static int
+mt76_dma_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct mt76_dev *dev;
+ int qid, done = 0, cur;
+
+ dev = container_of(napi->dev, struct mt76_dev, napi_dev);
+ qid = napi - dev->napi;
+
+ local_bh_disable();
+ rcu_read_lock();
+
+ do {
+ cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
+ mt76_rx_poll_complete(dev, qid, napi);
+ done += cur;
+ } while (cur && done < budget);
+
+ rcu_read_unlock();
+ local_bh_enable();
+
+ if (done < budget && napi_complete(napi))
+ dev->drv->rx_poll_complete(dev, qid);
+
+ return done;
+}
+
+static int
+mt76_dma_init(struct mt76_dev *dev)
+{
+ int i;
+
+ init_dummy_netdev(&dev->napi_dev);
+
+ mt76_for_each_q_rx(dev, i) {
+ netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
+ 64);
+ mt76_dma_rx_fill(dev, &dev->q_rx[i]);
+ napi_enable(&dev->napi[i]);
+ }
+
+ return 0;
+}
+
+static const struct mt76_queue_ops mt76_dma_ops = {
+ .init = mt76_dma_init,
+ .alloc = mt76_dma_alloc_queue,
+ .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
+ .tx_queue_skb = mt76_dma_tx_queue_skb,
+ .tx_cleanup = mt76_dma_tx_cleanup,
+ .rx_reset = mt76_dma_rx_reset,
+ .kick = mt76_dma_kick_queue,
+};
+
+void mt76_dma_attach(struct mt76_dev *dev)
+{
+ dev->queue_ops = &mt76_dma_ops;
+}
+EXPORT_SYMBOL_GPL(mt76_dma_attach);
+
+void mt76_dma_cleanup(struct mt76_dev *dev)
+{
+ int i;
+
+ mt76_worker_disable(&dev->tx_worker);
+ netif_napi_del(&dev->tx_napi);
+ for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++)
+ mt76_dma_tx_cleanup(dev, i, true);
+
+ mt76_for_each_q_rx(dev, i) {
+ netif_napi_del(&dev->napi[i]);
+ mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
+ }
+
+ mt76_free_pending_txwi(dev);
+}
+EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
new file mode 100644
index 000000000..e7c27697e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/dma.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+#ifndef __MT76_DMA_H
+#define __MT76_DMA_H
+
+#define DMA_DUMMY_DATA ((void *)~0)
+
+#define MT_RING_SIZE 0x10
+
+#define MT_DMA_CTL_SD_LEN1 GENMASK(13, 0)
+#define MT_DMA_CTL_LAST_SEC1 BIT(14)
+#define MT_DMA_CTL_BURST BIT(15)
+#define MT_DMA_CTL_SD_LEN0 GENMASK(29, 16)
+#define MT_DMA_CTL_LAST_SEC0 BIT(30)
+#define MT_DMA_CTL_DMA_DONE BIT(31)
+
+#define MT_DMA_HDR_LEN 4
+#define MT_RX_INFO_LEN 4
+#define MT_FCE_INFO_LEN 4
+#define MT_RX_RXWI_LEN 32
+
+struct mt76_desc {
+ __le32 buf0;
+ __le32 ctrl;
+ __le32 buf1;
+ __le32 info;
+} __packed __aligned(4);
+
+enum mt76_qsel {
+ MT_QSEL_MGMT,
+ MT_QSEL_HCCA,
+ MT_QSEL_EDCA,
+ MT_QSEL_EDCA_2,
+};
+
+enum mt76_mcu_evt_type {
+ EVT_CMD_DONE,
+ EVT_CMD_ERROR,
+ EVT_CMD_RETRY,
+ EVT_EVENT_PWR_RSP,
+ EVT_EVENT_WOW_RSP,
+ EVT_EVENT_CARRIER_DETECT_RSP,
+ EVT_EVENT_DFS_DETECT_RSP,
+};
+
+void mt76_dma_attach(struct mt76_dev *dev);
+void mt76_dma_cleanup(struct mt76_dev *dev);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
new file mode 100644
index 000000000..3044e0069
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/etherdevice.h>
+#include "mt76.h"
+
+static int
+mt76_get_of_eeprom(struct mt76_dev *dev, int len)
+{
+#if defined(CONFIG_OF) && defined(CONFIG_MTD)
+ struct device_node *np = dev->dev->of_node;
+ struct mtd_info *mtd;
+ const __be32 *list;
+ const char *part;
+ phandle phandle;
+ int offset = 0;
+ int size;
+ size_t retlen;
+ int ret;
+
+ if (!np)
+ return -ENOENT;
+
+ list = of_get_property(np, "mediatek,mtd-eeprom", &size);
+ if (!list)
+ return -ENOENT;
+
+ phandle = be32_to_cpup(list++);
+ if (!phandle)
+ return -ENOENT;
+
+ np = of_find_node_by_phandle(phandle);
+ if (!np)
+ return -EINVAL;
+
+ part = of_get_property(np, "label", NULL);
+ if (!part)
+ part = np->name;
+
+ mtd = get_mtd_device_nm(part);
+ if (IS_ERR(mtd)) {
+ ret = PTR_ERR(mtd);
+ goto out_put_node;
+ }
+
+ if (size <= sizeof(*list)) {
+ ret = -EINVAL;
+ goto out_put_node;
+ }
+
+ offset = be32_to_cpup(list);
+ ret = mtd_read(mtd, offset, len, &retlen, dev->eeprom.data);
+ put_mtd_device(mtd);
+ if (ret)
+ goto out_put_node;
+
+ if (retlen < len) {
+ ret = -EINVAL;
+ goto out_put_node;
+ }
+
+ if (of_property_read_bool(dev->dev->of_node, "big-endian")) {
+ u8 *data = (u8 *)dev->eeprom.data;
+ int i;
+
+ /* convert eeprom data in Little Endian */
+ for (i = 0; i < round_down(len, 2); i += 2)
+ put_unaligned_le16(get_unaligned_be16(&data[i]),
+ &data[i]);
+ }
+
+#ifdef CONFIG_NL80211_TESTMODE
+ dev->test.mtd_name = devm_kstrdup(dev->dev, part, GFP_KERNEL);
+ dev->test.mtd_offset = offset;
+#endif
+
+out_put_node:
+ of_node_put(np);
+ return ret;
+#else
+ return -ENOENT;
+#endif
+}
+
+void
+mt76_eeprom_override(struct mt76_dev *dev)
+{
+#ifdef CONFIG_OF
+ struct device_node *np = dev->dev->of_node;
+ const u8 *mac = NULL;
+
+ if (np)
+ mac = of_get_mac_address(np);
+ if (!IS_ERR_OR_NULL(mac))
+ ether_addr_copy(dev->macaddr, mac);
+#endif
+
+ if (!is_valid_ether_addr(dev->macaddr)) {
+ eth_random_addr(dev->macaddr);
+ dev_info(dev->dev,
+ "Invalid MAC address, using random address %pM\n",
+ dev->macaddr);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_eeprom_override);
+
+int
+mt76_eeprom_init(struct mt76_dev *dev, int len)
+{
+ dev->eeprom.size = len;
+ dev->eeprom.data = devm_kzalloc(dev->dev, len, GFP_KERNEL);
+ if (!dev->eeprom.data)
+ return -ENOMEM;
+
+ return !mt76_get_of_eeprom(dev, len);
+}
+EXPORT_SYMBOL_GPL(mt76_eeprom_init);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
new file mode 100644
index 000000000..dc1191aa0
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -0,0 +1,1206 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+#include <linux/sched.h>
+#include <linux/of.h>
+#include "mt76.h"
+
+#define CHAN2G(_idx, _freq) { \
+ .band = NL80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_idx, _freq) { \
+ .band = NL80211_BAND_5GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 30, \
+}
+
+static const struct ieee80211_channel mt76_channels_2ghz[] = {
+ CHAN2G(1, 2412),
+ CHAN2G(2, 2417),
+ CHAN2G(3, 2422),
+ CHAN2G(4, 2427),
+ CHAN2G(5, 2432),
+ CHAN2G(6, 2437),
+ CHAN2G(7, 2442),
+ CHAN2G(8, 2447),
+ CHAN2G(9, 2452),
+ CHAN2G(10, 2457),
+ CHAN2G(11, 2462),
+ CHAN2G(12, 2467),
+ CHAN2G(13, 2472),
+ CHAN2G(14, 2484),
+};
+
+static const struct ieee80211_channel mt76_channels_5ghz[] = {
+ CHAN5G(36, 5180),
+ CHAN5G(40, 5200),
+ CHAN5G(44, 5220),
+ CHAN5G(48, 5240),
+
+ CHAN5G(52, 5260),
+ CHAN5G(56, 5280),
+ CHAN5G(60, 5300),
+ CHAN5G(64, 5320),
+
+ CHAN5G(100, 5500),
+ CHAN5G(104, 5520),
+ CHAN5G(108, 5540),
+ CHAN5G(112, 5560),
+ CHAN5G(116, 5580),
+ CHAN5G(120, 5600),
+ CHAN5G(124, 5620),
+ CHAN5G(128, 5640),
+ CHAN5G(132, 5660),
+ CHAN5G(136, 5680),
+ CHAN5G(140, 5700),
+ CHAN5G(144, 5720),
+
+ CHAN5G(149, 5745),
+ CHAN5G(153, 5765),
+ CHAN5G(157, 5785),
+ CHAN5G(161, 5805),
+ CHAN5G(165, 5825),
+ CHAN5G(169, 5845),
+ CHAN5G(173, 5865),
+};
+
+static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
+ { .throughput = 0 * 1024, .blink_time = 334 },
+ { .throughput = 1 * 1024, .blink_time = 260 },
+ { .throughput = 5 * 1024, .blink_time = 220 },
+ { .throughput = 10 * 1024, .blink_time = 190 },
+ { .throughput = 20 * 1024, .blink_time = 170 },
+ { .throughput = 50 * 1024, .blink_time = 150 },
+ { .throughput = 70 * 1024, .blink_time = 130 },
+ { .throughput = 100 * 1024, .blink_time = 110 },
+ { .throughput = 200 * 1024, .blink_time = 80 },
+ { .throughput = 300 * 1024, .blink_time = 50 },
+};
+
+static int mt76_led_init(struct mt76_dev *dev)
+{
+ struct device_node *np = dev->dev->of_node;
+ struct ieee80211_hw *hw = dev->hw;
+ int led_pin;
+
+ if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
+ return 0;
+
+ snprintf(dev->led_name, sizeof(dev->led_name),
+ "mt76-%s", wiphy_name(hw->wiphy));
+
+ dev->led_cdev.name = dev->led_name;
+ dev->led_cdev.default_trigger =
+ ieee80211_create_tpt_led_trigger(hw,
+ IEEE80211_TPT_LEDTRIG_FL_RADIO,
+ mt76_tpt_blink,
+ ARRAY_SIZE(mt76_tpt_blink));
+
+ np = of_get_child_by_name(np, "led");
+ if (np) {
+ if (!of_property_read_u32(np, "led-sources", &led_pin))
+ dev->led_pin = led_pin;
+ dev->led_al = of_property_read_bool(np, "led-active-low");
+ of_node_put(np);
+ }
+
+ return led_classdev_register(dev->dev, &dev->led_cdev);
+}
+
+static void mt76_led_cleanup(struct mt76_dev *dev)
+{
+ if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
+ return;
+
+ led_classdev_unregister(&dev->led_cdev);
+}
+
+static void mt76_init_stream_cap(struct mt76_phy *phy,
+ struct ieee80211_supported_band *sband,
+ bool vht)
+{
+ struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
+ int i, nstream = hweight8(phy->antenna_mask);
+ struct ieee80211_sta_vht_cap *vht_cap;
+ u16 mcs_map = 0;
+
+ if (nstream > 1)
+ ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
+ else
+ ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
+
+ for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
+ ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
+
+ if (!vht)
+ return;
+
+ vht_cap = &sband->vht_cap;
+ if (nstream > 1)
+ vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
+ else
+ vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
+
+ for (i = 0; i < 8; i++) {
+ if (i < nstream)
+ mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
+ else
+ mcs_map |=
+ (IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
+ }
+ vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
+ vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
+}
+
+void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
+{
+ if (phy->cap.has_2ghz)
+ mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
+ if (phy->cap.has_5ghz)
+ mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
+}
+EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
+
+static int
+mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband,
+ const struct ieee80211_channel *chan, int n_chan,
+ struct ieee80211_rate *rates, int n_rates, bool vht)
+{
+ struct ieee80211_supported_band *sband = &msband->sband;
+ struct ieee80211_sta_ht_cap *ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap;
+ void *chanlist;
+ int size;
+
+ size = n_chan * sizeof(*chan);
+ chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
+ if (!chanlist)
+ return -ENOMEM;
+
+ msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
+ GFP_KERNEL);
+ if (!msband->chan)
+ return -ENOMEM;
+
+ sband->channels = chanlist;
+ sband->n_channels = n_chan;
+ sband->bitrates = rates;
+ sband->n_bitrates = n_rates;
+
+ ht_cap = &sband->ht_cap;
+ ht_cap->ht_supported = true;
+ ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40 |
+ (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+ ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+
+ mt76_init_stream_cap(&dev->phy, sband, vht);
+
+ if (!vht)
+ return 0;
+
+ vht_cap = &sband->vht_cap;
+ vht_cap->vht_supported = true;
+ vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
+ IEEE80211_VHT_CAP_RXSTBC_1 |
+ IEEE80211_VHT_CAP_SHORT_GI_80 |
+ IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
+ IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
+ (3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
+
+ return 0;
+}
+
+static int
+mt76_init_sband_2g(struct mt76_dev *dev, struct ieee80211_rate *rates,
+ int n_rates)
+{
+ dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->phy.sband_2g.sband;
+
+ return mt76_init_sband(dev, &dev->phy.sband_2g,
+ mt76_channels_2ghz,
+ ARRAY_SIZE(mt76_channels_2ghz),
+ rates, n_rates, false);
+}
+
+static int
+mt76_init_sband_5g(struct mt76_dev *dev, struct ieee80211_rate *rates,
+ int n_rates, bool vht)
+{
+ dev->hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->phy.sband_5g.sband;
+
+ return mt76_init_sband(dev, &dev->phy.sband_5g,
+ mt76_channels_5ghz,
+ ARRAY_SIZE(mt76_channels_5ghz),
+ rates, n_rates, vht);
+}
+
+static void
+mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
+ enum nl80211_band band)
+{
+ struct ieee80211_supported_band *sband = &msband->sband;
+ bool found = false;
+ int i;
+
+ if (!sband)
+ return;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ found = true;
+ break;
+ }
+
+ if (found) {
+ phy->chandef.chan = &sband->channels[0];
+ phy->chan_state = &msband->chan[0];
+ return;
+ }
+
+ sband->n_channels = 0;
+ phy->hw->wiphy->bands[band] = NULL;
+}
+
+static void
+mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw)
+{
+ struct wiphy *wiphy = hw->wiphy;
+
+ SET_IEEE80211_DEV(hw, dev->dev);
+ SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
+
+ wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+ wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
+ WIPHY_FLAG_SUPPORTS_TDLS |
+ WIPHY_FLAG_AP_UAPSD;
+
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
+
+ wiphy->available_antennas_tx = dev->phy.antenna_mask;
+ wiphy->available_antennas_rx = dev->phy.antenna_mask;
+
+ hw->txq_data_size = sizeof(struct mt76_txq);
+ hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
+
+ if (!hw->max_tx_fragments)
+ hw->max_tx_fragments = 16;
+
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+ ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+ ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
+ ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
+ ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+
+ if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD)) {
+ ieee80211_hw_set(hw, TX_AMSDU);
+ ieee80211_hw_set(hw, TX_FRAG_LIST);
+ }
+
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+ ieee80211_hw_set(hw, AP_LINK_PS);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
+
+ wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_ADHOC);
+}
+
+struct mt76_phy *
+mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
+ const struct ieee80211_ops *ops)
+{
+ struct ieee80211_hw *hw;
+ struct mt76_phy *phy;
+ unsigned int phy_size, chan_size;
+ unsigned int size_2g, size_5g;
+ void *priv;
+
+ phy_size = ALIGN(sizeof(*phy), 8);
+ chan_size = sizeof(dev->phy.sband_2g.chan[0]);
+ size_2g = ALIGN(ARRAY_SIZE(mt76_channels_2ghz) * chan_size, 8);
+ size_5g = ALIGN(ARRAY_SIZE(mt76_channels_5ghz) * chan_size, 8);
+
+ size += phy_size + size_2g + size_5g;
+ hw = ieee80211_alloc_hw(size, ops);
+ if (!hw)
+ return NULL;
+
+ phy = hw->priv;
+ phy->dev = dev;
+ phy->hw = hw;
+
+ mt76_phy_init(dev, hw);
+
+ priv = hw->priv + phy_size;
+
+ phy->sband_2g = dev->phy.sband_2g;
+ phy->sband_2g.chan = priv;
+ priv += size_2g;
+
+ phy->sband_5g = dev->phy.sband_5g;
+ phy->sband_5g.chan = priv;
+ priv += size_5g;
+
+ phy->priv = priv;
+
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
+
+ mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
+ mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(mt76_alloc_phy);
+
+int
+mt76_register_phy(struct mt76_phy *phy)
+{
+ int ret;
+
+ ret = ieee80211_register_hw(phy->hw);
+ if (ret)
+ return ret;
+
+ phy->dev->phy2 = phy;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_register_phy);
+
+void
+mt76_unregister_phy(struct mt76_phy *phy)
+{
+ struct mt76_dev *dev = phy->dev;
+
+ dev->phy2 = NULL;
+ mt76_tx_status_check(dev, NULL, true);
+ ieee80211_unregister_hw(phy->hw);
+}
+EXPORT_SYMBOL_GPL(mt76_unregister_phy);
+
+struct mt76_dev *
+mt76_alloc_device(struct device *pdev, unsigned int size,
+ const struct ieee80211_ops *ops,
+ const struct mt76_driver_ops *drv_ops)
+{
+ struct ieee80211_hw *hw;
+ struct mt76_phy *phy;
+ struct mt76_dev *dev;
+ int i;
+
+ hw = ieee80211_alloc_hw(size, ops);
+ if (!hw)
+ return NULL;
+
+ dev = hw->priv;
+ dev->hw = hw;
+ dev->dev = pdev;
+ dev->drv = drv_ops;
+
+ phy = &dev->phy;
+ phy->dev = dev;
+ phy->hw = hw;
+
+ spin_lock_init(&dev->rx_lock);
+ spin_lock_init(&dev->lock);
+ spin_lock_init(&dev->cc_lock);
+ mutex_init(&dev->mutex);
+ init_waitqueue_head(&dev->tx_wait);
+ skb_queue_head_init(&dev->status_list);
+
+ skb_queue_head_init(&dev->mcu.res_q);
+ init_waitqueue_head(&dev->mcu.wait);
+ mutex_init(&dev->mcu.mutex);
+ dev->tx_worker.fn = mt76_tx_worker;
+
+ INIT_LIST_HEAD(&dev->txwi_cache);
+
+ for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
+ skb_queue_head_init(&dev->rx_skb[i]);
+
+ dev->wq = alloc_ordered_workqueue("mt76", 0);
+ if (!dev->wq) {
+ ieee80211_free_hw(hw);
+ return NULL;
+ }
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(mt76_alloc_device);
+
+int mt76_register_device(struct mt76_dev *dev, bool vht,
+ struct ieee80211_rate *rates, int n_rates)
+{
+ struct ieee80211_hw *hw = dev->hw;
+ struct mt76_phy *phy = &dev->phy;
+ int ret;
+
+ dev_set_drvdata(dev->dev, dev);
+ mt76_phy_init(dev, hw);
+
+ if (phy->cap.has_2ghz) {
+ ret = mt76_init_sband_2g(dev, rates, n_rates);
+ if (ret)
+ return ret;
+ }
+
+ if (phy->cap.has_5ghz) {
+ ret = mt76_init_sband_5g(dev, rates + 4, n_rates - 4, vht);
+ if (ret)
+ return ret;
+ }
+
+ wiphy_read_of_freq_limits(hw->wiphy);
+ mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
+ mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
+
+ if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+ ret = mt76_led_init(dev);
+ if (ret)
+ return ret;
+ }
+
+ ret = ieee80211_register_hw(hw);
+ if (ret)
+ return ret;
+
+ WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
+ sched_set_fifo_low(dev->tx_worker.task);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_register_device);
+
+void mt76_unregister_device(struct mt76_dev *dev)
+{
+ struct ieee80211_hw *hw = dev->hw;
+
+ if (IS_ENABLED(CONFIG_MT76_LEDS))
+ mt76_led_cleanup(dev);
+ mt76_tx_status_check(dev, NULL, true);
+ ieee80211_unregister_hw(hw);
+}
+EXPORT_SYMBOL_GPL(mt76_unregister_device);
+
+void mt76_free_device(struct mt76_dev *dev)
+{
+ mt76_worker_teardown(&dev->tx_worker);
+ if (dev->wq) {
+ destroy_workqueue(dev->wq);
+ dev->wq = NULL;
+ }
+ ieee80211_free_hw(dev->hw);
+}
+EXPORT_SYMBOL_GPL(mt76_free_device);
+
+void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct mt76_phy *phy = mt76_dev_phy(dev, status->ext_phy);
+
+ if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+#ifdef CONFIG_NL80211_TESTMODE
+ if (dev->test.state == MT76_TM_STATE_RX_FRAMES) {
+ dev->test.rx_stats.packets[q]++;
+ if (status->flag & RX_FLAG_FAILED_FCS_CRC)
+ dev->test.rx_stats.fcs_error[q]++;
+ }
+#endif
+ __skb_queue_tail(&dev->rx_skb[q], skb);
+}
+EXPORT_SYMBOL_GPL(mt76_rx);
+
+bool mt76_has_tx_pending(struct mt76_phy *phy)
+{
+ struct mt76_dev *dev = phy->dev;
+ struct mt76_queue *q;
+ int i, offset;
+
+ offset = __MT_TXQ_MAX * (phy != &dev->phy);
+
+ for (i = 0; i < __MT_TXQ_MAX; i++) {
+ q = dev->q_tx[offset + i];
+ if (q && q->queued)
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
+
+static struct mt76_channel_state *
+mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
+{
+ struct mt76_sband *msband;
+ int idx;
+
+ if (c->band == NL80211_BAND_2GHZ)
+ msband = &phy->sband_2g;
+ else
+ msband = &phy->sband_5g;
+
+ idx = c - &msband->sband.channels[0];
+ return &msband->chan[idx];
+}
+
+void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
+{
+ struct mt76_channel_state *state = phy->chan_state;
+
+ state->cc_active += ktime_to_us(ktime_sub(time,
+ phy->survey_time));
+ phy->survey_time = time;
+}
+EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
+
+void mt76_update_survey(struct mt76_dev *dev)
+{
+ ktime_t cur_time;
+
+ if (dev->drv->update_survey)
+ dev->drv->update_survey(dev);
+
+ cur_time = ktime_get_boottime();
+ mt76_update_survey_active_time(&dev->phy, cur_time);
+ if (dev->phy2)
+ mt76_update_survey_active_time(dev->phy2, cur_time);
+
+ if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
+ struct mt76_channel_state *state = dev->phy.chan_state;
+
+ spin_lock_bh(&dev->cc_lock);
+ state->cc_bss_rx += dev->cur_cc_bss_rx;
+ dev->cur_cc_bss_rx = 0;
+ spin_unlock_bh(&dev->cc_lock);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_update_survey);
+
+void mt76_set_channel(struct mt76_phy *phy)
+{
+ struct mt76_dev *dev = phy->dev;
+ struct ieee80211_hw *hw = phy->hw;
+ struct cfg80211_chan_def *chandef = &hw->conf.chandef;
+ bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
+ int timeout = HZ / 5;
+
+ wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
+ mt76_update_survey(dev);
+
+ phy->chandef = *chandef;
+ phy->chan_state = mt76_channel_state(phy, chandef->chan);
+
+ if (!offchannel)
+ phy->main_chan = chandef->chan;
+
+ if (chandef->chan != phy->main_chan)
+ memset(phy->chan_state, 0, sizeof(*phy->chan_state));
+}
+EXPORT_SYMBOL_GPL(mt76_set_channel);
+
+int mt76_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
+ struct mt76_sband *sband;
+ struct ieee80211_channel *chan;
+ struct mt76_channel_state *state;
+ int ret = 0;
+
+ mutex_lock(&dev->mutex);
+ if (idx == 0 && dev->drv->update_survey)
+ mt76_update_survey(dev);
+
+ sband = &phy->sband_2g;
+ if (idx >= sband->sband.n_channels) {
+ idx -= sband->sband.n_channels;
+ sband = &phy->sband_5g;
+ }
+
+ if (idx >= sband->sband.n_channels) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ chan = &sband->sband.channels[idx];
+ state = mt76_channel_state(phy, chan);
+
+ memset(survey, 0, sizeof(*survey));
+ survey->channel = chan;
+ survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
+ survey->filled |= dev->drv->survey_flags;
+ if (state->noise)
+ survey->filled |= SURVEY_INFO_NOISE_DBM;
+
+ if (chan == phy->main_chan) {
+ survey->filled |= SURVEY_INFO_IN_USE;
+
+ if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
+ survey->filled |= SURVEY_INFO_TIME_BSS_RX;
+ }
+
+ survey->time_busy = div_u64(state->cc_busy, 1000);
+ survey->time_rx = div_u64(state->cc_rx, 1000);
+ survey->time = div_u64(state->cc_active, 1000);
+ survey->noise = state->noise;
+
+ spin_lock_bh(&dev->cc_lock);
+ survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
+ survey->time_tx = div_u64(state->cc_tx, 1000);
+ spin_unlock_bh(&dev->cc_lock);
+
+out:
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_get_survey);
+
+void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ struct ieee80211_key_conf *key)
+{
+ struct ieee80211_key_seq seq;
+ int i;
+
+ wcid->rx_check_pn = false;
+
+ if (!key)
+ return;
+
+ if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
+ return;
+
+ wcid->rx_check_pn = true;
+ for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
+ ieee80211_get_key_rx_seq(key, i, &seq);
+ memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
+ }
+}
+EXPORT_SYMBOL(mt76_wcid_key_setup);
+
+static void
+mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
+ struct ieee80211_hw **hw,
+ struct ieee80211_sta **sta)
+{
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct mt76_rx_status mstat;
+
+ mstat = *((struct mt76_rx_status *)skb->cb);
+ memset(status, 0, sizeof(*status));
+
+ status->flag = mstat.flag;
+ status->freq = mstat.freq;
+ status->enc_flags = mstat.enc_flags;
+ status->encoding = mstat.encoding;
+ status->bw = mstat.bw;
+ status->he_ru = mstat.he_ru;
+ status->he_gi = mstat.he_gi;
+ status->he_dcm = mstat.he_dcm;
+ status->rate_idx = mstat.rate_idx;
+ status->nss = mstat.nss;
+ status->band = mstat.band;
+ status->signal = mstat.signal;
+ status->chains = mstat.chains;
+ status->ampdu_reference = mstat.ampdu_ref;
+
+ BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
+ BUILD_BUG_ON(sizeof(status->chain_signal) !=
+ sizeof(mstat.chain_signal));
+ memcpy(status->chain_signal, mstat.chain_signal,
+ sizeof(mstat.chain_signal));
+
+ *sta = wcid_to_sta(mstat.wcid);
+ *hw = mt76_phy_hw(dev, mstat.ext_phy);
+}
+
+static int
+mt76_check_ccmp_pn(struct sk_buff *skb)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct mt76_wcid *wcid = status->wcid;
+ struct ieee80211_hdr *hdr;
+ int ret;
+
+ if (!(status->flag & RX_FLAG_DECRYPTED))
+ return 0;
+
+ if (!wcid || !wcid->rx_check_pn)
+ return 0;
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ /*
+ * Validate the first fragment both here and in mac80211
+ * All further fragments will be validated by mac80211 only.
+ */
+ hdr = mt76_skb_get_hdr(skb);
+ if (ieee80211_is_frag(hdr) &&
+ !ieee80211_is_first_frag(hdr->frame_control))
+ return 0;
+ }
+
+ BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
+ ret = memcmp(status->iv, wcid->rx_key_pn[status->tid],
+ sizeof(status->iv));
+ if (ret <= 0)
+ return -EINVAL; /* replay */
+
+ memcpy(wcid->rx_key_pn[status->tid], status->iv, sizeof(status->iv));
+
+ if (status->flag & RX_FLAG_IV_STRIPPED)
+ status->flag |= RX_FLAG_PN_VALIDATED;
+
+ return 0;
+}
+
+static void
+mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
+ int len)
+{
+ struct mt76_wcid *wcid = status->wcid;
+ struct ieee80211_rx_status info = {
+ .enc_flags = status->enc_flags,
+ .rate_idx = status->rate_idx,
+ .encoding = status->encoding,
+ .band = status->band,
+ .nss = status->nss,
+ .bw = status->bw,
+ };
+ struct ieee80211_sta *sta;
+ u32 airtime;
+
+ airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
+ spin_lock(&dev->cc_lock);
+ dev->cur_cc_bss_rx += airtime;
+ spin_unlock(&dev->cc_lock);
+
+ if (!wcid || !wcid->sta)
+ return;
+
+ sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
+ ieee80211_sta_register_airtime(sta, status->tid, 0, airtime);
+}
+
+static void
+mt76_airtime_flush_ampdu(struct mt76_dev *dev)
+{
+ struct mt76_wcid *wcid;
+ int wcid_idx;
+
+ if (!dev->rx_ampdu_len)
+ return;
+
+ wcid_idx = dev->rx_ampdu_status.wcid_idx;
+ if (wcid_idx < ARRAY_SIZE(dev->wcid))
+ wcid = rcu_dereference(dev->wcid[wcid_idx]);
+ else
+ wcid = NULL;
+ dev->rx_ampdu_status.wcid = wcid;
+
+ mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
+
+ dev->rx_ampdu_len = 0;
+ dev->rx_ampdu_ref = 0;
+}
+
+static void
+mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct mt76_wcid *wcid = status->wcid;
+
+ if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
+ return;
+
+ if (!wcid || !wcid->sta) {
+ if (!ether_addr_equal(hdr->addr1, dev->macaddr))
+ return;
+
+ wcid = NULL;
+ }
+
+ if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
+ status->ampdu_ref != dev->rx_ampdu_ref)
+ mt76_airtime_flush_ampdu(dev);
+
+ if (status->flag & RX_FLAG_AMPDU_DETAILS) {
+ if (!dev->rx_ampdu_len ||
+ status->ampdu_ref != dev->rx_ampdu_ref) {
+ dev->rx_ampdu_status = *status;
+ dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
+ dev->rx_ampdu_ref = status->ampdu_ref;
+ }
+
+ dev->rx_ampdu_len += skb->len;
+ return;
+ }
+
+ mt76_airtime_report(dev, status, skb->len);
+}
+
+static void
+mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
+ struct ieee80211_sta *sta;
+ struct ieee80211_hw *hw;
+ struct mt76_wcid *wcid = status->wcid;
+ bool ps;
+
+ hw = mt76_phy_hw(dev, status->ext_phy);
+ if (ieee80211_is_pspoll(hdr->frame_control) && !wcid) {
+ sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
+ if (sta)
+ wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
+ }
+
+ mt76_airtime_check(dev, skb);
+
+ if (!wcid || !wcid->sta)
+ return;
+
+ sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
+
+ if (status->signal <= 0)
+ ewma_signal_add(&wcid->rssi, -status->signal);
+
+ wcid->inactive_count = 0;
+
+ if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
+ return;
+
+ if (ieee80211_is_pspoll(hdr->frame_control)) {
+ ieee80211_sta_pspoll(sta);
+ return;
+ }
+
+ if (ieee80211_has_morefrags(hdr->frame_control) ||
+ !(ieee80211_is_mgmt(hdr->frame_control) ||
+ ieee80211_is_data(hdr->frame_control)))
+ return;
+
+ ps = ieee80211_has_pm(hdr->frame_control);
+
+ if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
+ ieee80211_is_qos_nullfunc(hdr->frame_control)))
+ ieee80211_sta_uapsd_trigger(sta, status->tid);
+
+ if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
+ return;
+
+ if (ps)
+ set_bit(MT_WCID_FLAG_PS, &wcid->flags);
+ else
+ clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
+
+ dev->drv->sta_ps(dev, sta, ps);
+ ieee80211_sta_ps_transition(sta, ps);
+}
+
+void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
+ struct napi_struct *napi)
+{
+ struct ieee80211_sta *sta;
+ struct ieee80211_hw *hw;
+ struct sk_buff *skb;
+
+ spin_lock(&dev->rx_lock);
+ while ((skb = __skb_dequeue(frames)) != NULL) {
+ if (mt76_check_ccmp_pn(skb)) {
+ dev_kfree_skb(skb);
+ continue;
+ }
+
+ mt76_rx_convert(dev, skb, &hw, &sta);
+ ieee80211_rx_napi(hw, sta, skb, napi);
+ }
+ spin_unlock(&dev->rx_lock);
+}
+
+void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
+ struct napi_struct *napi)
+{
+ struct sk_buff_head frames;
+ struct sk_buff *skb;
+
+ __skb_queue_head_init(&frames);
+
+ while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
+ mt76_check_sta(dev, skb);
+ mt76_rx_aggr_reorder(skb, &frames);
+ }
+
+ mt76_rx_complete(dev, &frames, napi);
+}
+EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
+
+static int
+mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool ext_phy)
+{
+ struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+ int ret;
+ int i;
+
+ mutex_lock(&dev->mutex);
+
+ ret = dev->drv->sta_add(dev, vif, sta);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+ struct mt76_txq *mtxq;
+
+ if (!sta->txq[i])
+ continue;
+
+ mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
+ mtxq->wcid = wcid;
+ }
+
+ ewma_signal_init(&wcid->rssi);
+ if (ext_phy)
+ mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
+ wcid->ext_phy = ext_phy;
+ rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
+
+out:
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+
+void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+ int i, idx = wcid->idx;
+
+ for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
+ mt76_rx_aggr_stop(dev, wcid, i);
+
+ if (dev->drv->sta_remove)
+ dev->drv->sta_remove(dev, vif, sta);
+
+ mt76_tx_status_check(dev, wcid, true);
+ mt76_wcid_mask_clear(dev->wcid_mask, idx);
+ mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
+}
+EXPORT_SYMBOL_GPL(__mt76_sta_remove);
+
+static void
+mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ mutex_lock(&dev->mutex);
+ __mt76_sta_remove(dev, vif, sta);
+ mutex_unlock(&dev->mutex);
+}
+
+int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE)
+ return mt76_sta_add(dev, vif, sta, ext_phy);
+
+ if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC &&
+ dev->drv->sta_assoc)
+ dev->drv->sta_assoc(dev, vif, sta);
+
+ if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)
+ mt76_sta_remove(dev, vif, sta);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_sta_state);
+
+void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
+ struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+
+ mutex_lock(&dev->mutex);
+ rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
+ mutex_unlock(&dev->mutex);
+}
+EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
+
+int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ int *dbm)
+{
+ struct mt76_phy *phy = hw->priv;
+ int n_chains = hweight8(phy->antenna_mask);
+ int delta = mt76_tx_power_nss_delta(n_chains);
+
+ *dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_get_txpower);
+
+static void
+__mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ if (vif->csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
+ ieee80211_csa_finish(vif);
+}
+
+void mt76_csa_finish(struct mt76_dev *dev)
+{
+ if (!dev->csa_complete)
+ return;
+
+ ieee80211_iterate_active_interfaces_atomic(dev->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ __mt76_csa_finish, dev);
+
+ dev->csa_complete = 0;
+}
+EXPORT_SYMBOL_GPL(mt76_csa_finish);
+
+static void
+__mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt76_dev *dev = priv;
+
+ if (!vif->csa_active)
+ return;
+
+ dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif);
+}
+
+void mt76_csa_check(struct mt76_dev *dev)
+{
+ ieee80211_iterate_active_interfaces_atomic(dev->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ __mt76_csa_check, dev);
+}
+EXPORT_SYMBOL_GPL(mt76_csa_check);
+
+int
+mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_set_tim);
+
+void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ u8 *hdr, *pn = status->iv;
+
+ __skb_push(skb, 8);
+ memmove(skb->data, skb->data + 8, hdr_len);
+ hdr = skb->data + hdr_len;
+
+ hdr[0] = pn[5];
+ hdr[1] = pn[4];
+ hdr[2] = 0;
+ hdr[3] = 0x20 | (key_id << 6);
+ hdr[4] = pn[3];
+ hdr[5] = pn[2];
+ hdr[6] = pn[1];
+ hdr[7] = pn[0];
+
+ status->flag &= ~RX_FLAG_IV_STRIPPED;
+}
+EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
+
+int mt76_get_rate(struct mt76_dev *dev,
+ struct ieee80211_supported_band *sband,
+ int idx, bool cck)
+{
+ int i, offset = 0, len = sband->n_bitrates;
+
+ if (cck) {
+ if (sband == &dev->phy.sband_5g.sband)
+ return 0;
+
+ idx &= ~BIT(2); /* short preamble */
+ } else if (sband == &dev->phy.sband_2g.sband) {
+ offset = 4;
+ }
+
+ for (i = offset; i < len; i++) {
+ if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
+ return i;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_get_rate);
+
+void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ const u8 *mac)
+{
+ struct mt76_phy *phy = hw->priv;
+
+ set_bit(MT76_SCANNING, &phy->state);
+}
+EXPORT_SYMBOL_GPL(mt76_sw_scan);
+
+void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt76_phy *phy = hw->priv;
+
+ clear_bit(MT76_SCANNING, &phy->state);
+}
+EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
+
+int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+{
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
+
+ mutex_lock(&dev->mutex);
+ *tx_ant = phy->antenna_mask;
+ *rx_ant = phy->antenna_mask;
+ mutex_unlock(&dev->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_get_antenna);
diff --git a/drivers/net/wireless/mediatek/mt76/mcu.c b/drivers/net/wireless/mediatek/mt76/mcu.c
new file mode 100644
index 000000000..ade61a533
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mcu.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2019 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include "mt76.h"
+
+struct sk_buff *
+mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
+ int data_len)
+{
+ const struct mt76_mcu_ops *ops = dev->mcu_ops;
+ int length = ops->headroom + data_len + ops->tailroom;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(length, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ memset(skb->head, 0, length);
+ skb_reserve(skb, ops->headroom);
+
+ if (data && data_len)
+ skb_put_data(skb, data, data_len);
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(mt76_mcu_msg_alloc);
+
+struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
+ unsigned long expires)
+{
+ unsigned long timeout;
+
+ if (!time_is_after_jiffies(expires))
+ return NULL;
+
+ timeout = expires - jiffies;
+ wait_event_timeout(dev->mcu.wait,
+ (!skb_queue_empty(&dev->mcu.res_q) ||
+ test_bit(MT76_MCU_RESET, &dev->phy.state)),
+ timeout);
+ return skb_dequeue(&dev->mcu.res_q);
+}
+EXPORT_SYMBOL_GPL(mt76_mcu_get_response);
+
+void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb)
+{
+ skb_queue_tail(&dev->mcu.res_q, skb);
+ wake_up(&dev->mcu.wait);
+}
+EXPORT_SYMBOL_GPL(mt76_mcu_rx_event);
diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c
new file mode 100644
index 000000000..26353b6bc
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mmio.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include "mt76.h"
+#include "trace.h"
+
+static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset)
+{
+ u32 val;
+
+ val = readl(dev->mmio.regs + offset);
+ trace_reg_rr(dev, offset, val);
+
+ return val;
+}
+
+static void mt76_mmio_wr(struct mt76_dev *dev, u32 offset, u32 val)
+{
+ trace_reg_wr(dev, offset, val);
+ writel(val, dev->mmio.regs + offset);
+}
+
+static u32 mt76_mmio_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
+{
+ val |= mt76_mmio_rr(dev, offset) & ~mask;
+ mt76_mmio_wr(dev, offset, val);
+ return val;
+}
+
+static void mt76_mmio_write_copy(struct mt76_dev *dev, u32 offset,
+ const void *data, int len)
+{
+ __iowrite32_copy(dev->mmio.regs + offset, data, DIV_ROUND_UP(len, 4));
+}
+
+static void mt76_mmio_read_copy(struct mt76_dev *dev, u32 offset,
+ void *data, int len)
+{
+ __ioread32_copy(data, dev->mmio.regs + offset, DIV_ROUND_UP(len, 4));
+}
+
+static int mt76_mmio_wr_rp(struct mt76_dev *dev, u32 base,
+ const struct mt76_reg_pair *data, int len)
+{
+ while (len > 0) {
+ mt76_mmio_wr(dev, data->reg, data->value);
+ data++;
+ len--;
+ }
+
+ return 0;
+}
+
+static int mt76_mmio_rd_rp(struct mt76_dev *dev, u32 base,
+ struct mt76_reg_pair *data, int len)
+{
+ while (len > 0) {
+ data->value = mt76_mmio_rr(dev, data->reg);
+ data++;
+ len--;
+ }
+
+ return 0;
+}
+
+void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr,
+ u32 clear, u32 set)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->mmio.irq_lock, flags);
+ dev->mmio.irqmask &= ~clear;
+ dev->mmio.irqmask |= set;
+ if (addr)
+ mt76_mmio_wr(dev, addr, dev->mmio.irqmask);
+ spin_unlock_irqrestore(&dev->mmio.irq_lock, flags);
+}
+EXPORT_SYMBOL_GPL(mt76_set_irq_mask);
+
+void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
+{
+ static const struct mt76_bus_ops mt76_mmio_ops = {
+ .rr = mt76_mmio_rr,
+ .rmw = mt76_mmio_rmw,
+ .wr = mt76_mmio_wr,
+ .write_copy = mt76_mmio_write_copy,
+ .read_copy = mt76_mmio_read_copy,
+ .wr_rp = mt76_mmio_wr_rp,
+ .rd_rp = mt76_mmio_rd_rp,
+ .type = MT76_BUS_MMIO,
+ };
+
+ dev->bus = &mt76_mmio_ops;
+ dev->mmio.regs = regs;
+
+ spin_lock_init(&dev->mmio.irq_lock);
+}
+EXPORT_SYMBOL_GPL(mt76_mmio_init);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
new file mode 100644
index 000000000..16e65020a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -0,0 +1,1080 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#ifndef __MT76_H
+#define __MT76_H
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/leds.h>
+#include <linux/usb.h>
+#include <linux/average.h>
+#include <net/mac80211.h>
+#include "util.h"
+#include "testmode.h"
+
+#define MT_MCU_RING_SIZE 32
+#define MT_RX_BUF_SIZE 2048
+#define MT_SKB_HEAD_LEN 128
+
+#define MT_MAX_NON_AQL_PKT 16
+#define MT_TXQ_FREE_THR 32
+
+struct mt76_dev;
+struct mt76_phy;
+struct mt76_wcid;
+
+struct mt76_reg_pair {
+ u32 reg;
+ u32 value;
+};
+
+enum mt76_bus_type {
+ MT76_BUS_MMIO,
+ MT76_BUS_USB,
+ MT76_BUS_SDIO,
+};
+
+struct mt76_bus_ops {
+ u32 (*rr)(struct mt76_dev *dev, u32 offset);
+ void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
+ u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
+ void (*write_copy)(struct mt76_dev *dev, u32 offset, const void *data,
+ int len);
+ void (*read_copy)(struct mt76_dev *dev, u32 offset, void *data,
+ int len);
+ int (*wr_rp)(struct mt76_dev *dev, u32 base,
+ const struct mt76_reg_pair *rp, int len);
+ int (*rd_rp)(struct mt76_dev *dev, u32 base,
+ struct mt76_reg_pair *rp, int len);
+ enum mt76_bus_type type;
+};
+
+#define mt76_is_usb(dev) ((dev)->bus->type == MT76_BUS_USB)
+#define mt76_is_mmio(dev) ((dev)->bus->type == MT76_BUS_MMIO)
+#define mt76_is_sdio(dev) ((dev)->bus->type == MT76_BUS_SDIO)
+
+enum mt76_txq_id {
+ MT_TXQ_VO = IEEE80211_AC_VO,
+ MT_TXQ_VI = IEEE80211_AC_VI,
+ MT_TXQ_BE = IEEE80211_AC_BE,
+ MT_TXQ_BK = IEEE80211_AC_BK,
+ MT_TXQ_PSD,
+ MT_TXQ_MCU,
+ MT_TXQ_MCU_WA,
+ MT_TXQ_BEACON,
+ MT_TXQ_CAB,
+ MT_TXQ_FWDL,
+ __MT_TXQ_MAX
+};
+
+enum mt76_rxq_id {
+ MT_RXQ_MAIN,
+ MT_RXQ_MCU,
+ MT_RXQ_MCU_WA,
+ __MT_RXQ_MAX
+};
+
+struct mt76_queue_buf {
+ dma_addr_t addr;
+ u16 len;
+ bool skip_unmap;
+};
+
+struct mt76_tx_info {
+ struct mt76_queue_buf buf[32];
+ struct sk_buff *skb;
+ int nbuf;
+ u32 info;
+};
+
+struct mt76_queue_entry {
+ union {
+ void *buf;
+ struct sk_buff *skb;
+ };
+ union {
+ struct mt76_txwi_cache *txwi;
+ struct urb *urb;
+ int buf_sz;
+ };
+ u32 dma_addr[2];
+ u16 dma_len[2];
+ u16 wcid;
+ bool skip_buf0:1;
+ bool skip_buf1:1;
+ bool done:1;
+};
+
+struct mt76_queue_regs {
+ u32 desc_base;
+ u32 ring_size;
+ u32 cpu_idx;
+ u32 dma_idx;
+} __packed __aligned(4);
+
+struct mt76_queue {
+ struct mt76_queue_regs __iomem *regs;
+
+ spinlock_t lock;
+ struct mt76_queue_entry *entry;
+ struct mt76_desc *desc;
+
+ u16 first;
+ u16 head;
+ u16 tail;
+ int ndesc;
+ int queued;
+ int buf_size;
+ bool stopped;
+
+ u8 buf_offset;
+ u8 hw_idx;
+
+ dma_addr_t desc_dma;
+ struct sk_buff *rx_head;
+ struct page_frag_cache rx_page;
+};
+
+struct mt76_mcu_ops {
+ u32 headroom;
+ u32 tailroom;
+
+ int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data,
+ int len, bool wait_resp);
+ int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb,
+ int cmd, bool wait_resp);
+ u32 (*mcu_rr)(struct mt76_dev *dev, u32 offset);
+ void (*mcu_wr)(struct mt76_dev *dev, u32 offset, u32 val);
+ int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base,
+ const struct mt76_reg_pair *rp, int len);
+ int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
+ struct mt76_reg_pair *rp, int len);
+ int (*mcu_restart)(struct mt76_dev *dev);
+};
+
+struct mt76_queue_ops {
+ int (*init)(struct mt76_dev *dev);
+
+ int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q,
+ int idx, int n_desc, int bufsize,
+ u32 ring_base);
+
+ int (*tx_queue_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta);
+
+ int (*tx_queue_skb_raw)(struct mt76_dev *dev, enum mt76_txq_id qid,
+ struct sk_buff *skb, u32 tx_info);
+
+ void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
+ int *len, u32 *info, bool *more);
+
+ void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid);
+
+ void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid,
+ bool flush);
+
+ void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
+};
+
+enum mt76_wcid_flags {
+ MT_WCID_FLAG_CHECK_PS,
+ MT_WCID_FLAG_PS,
+};
+
+#define MT76_N_WCIDS 288
+
+/* stored in ieee80211_tx_info::hw_queue */
+#define MT_TX_HW_QUEUE_EXT_PHY BIT(3)
+
+DECLARE_EWMA(signal, 10, 8);
+
+#define MT_WCID_TX_INFO_RATE GENMASK(15, 0)
+#define MT_WCID_TX_INFO_NSS GENMASK(17, 16)
+#define MT_WCID_TX_INFO_TXPWR_ADJ GENMASK(25, 18)
+#define MT_WCID_TX_INFO_SET BIT(31)
+
+struct mt76_wcid {
+ struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
+
+ atomic_t non_aql_packets;
+ unsigned long flags;
+
+ struct ewma_signal rssi;
+ int inactive_count;
+
+ u16 idx;
+ u8 hw_key_idx;
+
+ u8 sta:1;
+ u8 ext_phy:1;
+ u8 amsdu:1;
+
+ u8 rx_check_pn;
+ u8 rx_key_pn[IEEE80211_NUM_TIDS][6];
+ u16 cipher;
+
+ u32 tx_info;
+ bool sw_iv;
+
+ u8 packet_id;
+};
+
+struct mt76_txq {
+ struct mt76_wcid *wcid;
+
+ u16 agg_ssn;
+ bool send_bar;
+ bool aggr;
+};
+
+struct mt76_txwi_cache {
+ struct list_head list;
+ dma_addr_t dma_addr;
+
+ struct sk_buff *skb;
+};
+
+struct mt76_rx_tid {
+ struct rcu_head rcu_head;
+
+ struct mt76_dev *dev;
+
+ spinlock_t lock;
+ struct delayed_work reorder_work;
+
+ u16 head;
+ u16 size;
+ u16 nframes;
+
+ u8 num;
+
+ u8 started:1, stopped:1, timer_pending:1;
+
+ struct sk_buff *reorder_buf[];
+};
+
+#define MT_TX_CB_DMA_DONE BIT(0)
+#define MT_TX_CB_TXS_DONE BIT(1)
+#define MT_TX_CB_TXS_FAILED BIT(2)
+
+#define MT_PACKET_ID_MASK GENMASK(6, 0)
+#define MT_PACKET_ID_NO_ACK 0
+#define MT_PACKET_ID_NO_SKB 1
+#define MT_PACKET_ID_FIRST 2
+#define MT_PACKET_ID_HAS_RATE BIT(7)
+
+#define MT_TX_STATUS_SKB_TIMEOUT HZ
+
+struct mt76_tx_cb {
+ unsigned long jiffies;
+ u16 wcid;
+ u8 pktid;
+ u8 flags;
+};
+
+enum {
+ MT76_STATE_INITIALIZED,
+ MT76_STATE_RUNNING,
+ MT76_STATE_MCU_RUNNING,
+ MT76_SCANNING,
+ MT76_HW_SCANNING,
+ MT76_HW_SCHED_SCANNING,
+ MT76_RESTART,
+ MT76_RESET,
+ MT76_MCU_RESET,
+ MT76_REMOVED,
+ MT76_READING_STATS,
+ MT76_STATE_POWER_OFF,
+ MT76_STATE_SUSPEND,
+ MT76_STATE_ROC,
+ MT76_STATE_PM,
+};
+
+struct mt76_hw_cap {
+ bool has_2ghz;
+ bool has_5ghz;
+};
+
+#define MT_DRV_TXWI_NO_FREE BIT(0)
+#define MT_DRV_TX_ALIGNED4_SKBS BIT(1)
+#define MT_DRV_SW_RX_AIRTIME BIT(2)
+#define MT_DRV_RX_DMA_HDR BIT(3)
+#define MT_DRV_HW_MGMT_TXQ BIT(4)
+#define MT_DRV_AMSDU_OFFLOAD BIT(5)
+
+struct mt76_driver_ops {
+ u32 drv_flags;
+ u32 survey_flags;
+ u16 txwi_size;
+
+ void (*update_survey)(struct mt76_dev *dev);
+
+ int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info);
+
+ void (*tx_complete_skb)(struct mt76_dev *dev,
+ struct mt76_queue_entry *e);
+
+ bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
+
+ void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
+ struct sk_buff *skb);
+
+ void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
+
+ void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
+ bool ps);
+
+ int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
+ void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
+ void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+};
+
+struct mt76_channel_state {
+ u64 cc_active;
+ u64 cc_busy;
+ u64 cc_rx;
+ u64 cc_bss_rx;
+ u64 cc_tx;
+
+ s8 noise;
+};
+
+struct mt76_sband {
+ struct ieee80211_supported_band sband;
+ struct mt76_channel_state *chan;
+};
+
+struct mt76_rate_power {
+ union {
+ struct {
+ s8 cck[4];
+ s8 ofdm[8];
+ s8 stbc[10];
+ s8 ht[16];
+ s8 vht[10];
+ };
+ s8 all[48];
+ };
+};
+
+/* addr req mask */
+#define MT_VEND_TYPE_EEPROM BIT(31)
+#define MT_VEND_TYPE_CFG BIT(30)
+#define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
+
+#define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n))
+enum mt_vendor_req {
+ MT_VEND_DEV_MODE = 0x1,
+ MT_VEND_WRITE = 0x2,
+ MT_VEND_POWER_ON = 0x4,
+ MT_VEND_MULTI_WRITE = 0x6,
+ MT_VEND_MULTI_READ = 0x7,
+ MT_VEND_READ_EEPROM = 0x9,
+ MT_VEND_WRITE_FCE = 0x42,
+ MT_VEND_WRITE_CFG = 0x46,
+ MT_VEND_READ_CFG = 0x47,
+ MT_VEND_READ_EXT = 0x63,
+ MT_VEND_WRITE_EXT = 0x66,
+ MT_VEND_FEATURE_SET = 0x91,
+};
+
+enum mt76u_in_ep {
+ MT_EP_IN_PKT_RX,
+ MT_EP_IN_CMD_RESP,
+ __MT_EP_IN_MAX,
+};
+
+enum mt76u_out_ep {
+ MT_EP_OUT_INBAND_CMD,
+ MT_EP_OUT_AC_BE,
+ MT_EP_OUT_AC_BK,
+ MT_EP_OUT_AC_VI,
+ MT_EP_OUT_AC_VO,
+ MT_EP_OUT_HCCA,
+ __MT_EP_OUT_MAX,
+};
+
+struct mt76_mcu {
+ struct mutex mutex;
+ u32 msg_seq;
+
+ struct sk_buff_head res_q;
+ wait_queue_head_t wait;
+};
+
+#define MT_TX_SG_MAX_SIZE 8
+#define MT_RX_SG_MAX_SIZE 4
+#define MT_NUM_TX_ENTRIES 256
+#define MT_NUM_RX_ENTRIES 128
+#define MCU_RESP_URB_SIZE 1024
+struct mt76_usb {
+ struct mutex usb_ctrl_mtx;
+ u8 *data;
+ u16 data_len;
+
+ struct tasklet_struct rx_tasklet;
+ struct work_struct stat_work;
+
+ u8 out_ep[__MT_EP_OUT_MAX];
+ u8 in_ep[__MT_EP_IN_MAX];
+ bool sg_en;
+
+ struct mt76u_mcu {
+ u8 *data;
+ /* multiple reads */
+ struct mt76_reg_pair *rp;
+ int rp_len;
+ u32 base;
+ bool burst;
+ } mcu;
+};
+
+#define MT76S_XMIT_BUF_SZ (16 * PAGE_SIZE)
+struct mt76_sdio {
+ struct workqueue_struct *txrx_wq;
+ struct {
+ struct work_struct xmit_work;
+ struct work_struct status_work;
+ } tx;
+ struct {
+ struct work_struct recv_work;
+ struct work_struct net_work;
+ } rx;
+
+ struct work_struct stat_work;
+
+ u8 *xmit_buf[MT_TXQ_MCU_WA];
+
+ struct sdio_func *func;
+ void *intr_data;
+
+ struct {
+ struct mutex lock;
+ int pse_data_quota;
+ int ple_data_quota;
+ int pse_mcu_quota;
+ int deficit;
+ } sched;
+};
+
+struct mt76_mmio {
+ void __iomem *regs;
+ spinlock_t irq_lock;
+ u32 irqmask;
+};
+
+struct mt76_rx_status {
+ union {
+ struct mt76_wcid *wcid;
+ u16 wcid_idx;
+ };
+
+ unsigned long reorder_time;
+
+ u32 ampdu_ref;
+
+ u8 iv[6];
+
+ u8 ext_phy:1;
+ u8 aggr:1;
+ u8 tid;
+ u16 seqno;
+
+ u16 freq;
+ u32 flag;
+ u8 enc_flags;
+ u8 encoding:2, bw:3, he_ru:3;
+ u8 he_gi:2, he_dcm:1;
+ u8 rate_idx;
+ u8 nss;
+ u8 band;
+ s8 signal;
+ u8 chains;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
+};
+
+struct mt76_testmode_ops {
+ int (*set_state)(struct mt76_dev *dev, enum mt76_testmode_state state);
+ int (*set_params)(struct mt76_dev *dev, struct nlattr **tb,
+ enum mt76_testmode_state new_state);
+ int (*dump_stats)(struct mt76_dev *dev, struct sk_buff *msg);
+};
+
+struct mt76_testmode_data {
+ enum mt76_testmode_state state;
+
+ u32 param_set[DIV_ROUND_UP(NUM_MT76_TM_ATTRS, 32)];
+ struct sk_buff *tx_skb;
+
+ u32 tx_count;
+ u16 tx_msdu_len;
+
+ u8 tx_rate_mode;
+ u8 tx_rate_idx;
+ u8 tx_rate_nss;
+ u8 tx_rate_sgi;
+ u8 tx_rate_ldpc;
+
+ u8 tx_antenna_mask;
+
+ u32 freq_offset;
+
+ u8 tx_power[4];
+ u8 tx_power_control;
+
+ const char *mtd_name;
+ u32 mtd_offset;
+
+ u32 tx_pending;
+ u32 tx_queued;
+ u32 tx_done;
+ struct {
+ u64 packets[__MT_RXQ_MAX];
+ u64 fcs_error[__MT_RXQ_MAX];
+ } rx_stats;
+};
+
+struct mt76_phy {
+ struct ieee80211_hw *hw;
+ struct mt76_dev *dev;
+ void *priv;
+
+ unsigned long state;
+
+ struct cfg80211_chan_def chandef;
+ struct ieee80211_channel *main_chan;
+
+ struct mt76_channel_state *chan_state;
+ ktime_t survey_time;
+
+ struct mt76_hw_cap cap;
+ struct mt76_sband sband_2g;
+ struct mt76_sband sband_5g;
+
+ u32 vif_mask;
+
+ int txpower_cur;
+ u8 antenna_mask;
+};
+
+struct mt76_dev {
+ struct mt76_phy phy; /* must be first */
+
+ struct mt76_phy *phy2;
+
+ struct ieee80211_hw *hw;
+
+ spinlock_t lock;
+ spinlock_t cc_lock;
+
+ u32 cur_cc_bss_rx;
+
+ struct mt76_rx_status rx_ampdu_status;
+ u32 rx_ampdu_len;
+ u32 rx_ampdu_ref;
+
+ struct mutex mutex;
+
+ const struct mt76_bus_ops *bus;
+ const struct mt76_driver_ops *drv;
+ const struct mt76_mcu_ops *mcu_ops;
+ struct device *dev;
+
+ struct mt76_mcu mcu;
+
+ struct net_device napi_dev;
+ spinlock_t rx_lock;
+ struct napi_struct napi[__MT_RXQ_MAX];
+ struct sk_buff_head rx_skb[__MT_RXQ_MAX];
+
+ struct list_head txwi_cache;
+ struct mt76_queue *q_tx[2 * __MT_TXQ_MAX];
+ struct mt76_queue q_rx[__MT_RXQ_MAX];
+ const struct mt76_queue_ops *queue_ops;
+ int tx_dma_idx[4];
+
+ struct mt76_worker tx_worker;
+ struct napi_struct tx_napi;
+ struct delayed_work mac_work;
+
+ wait_queue_head_t tx_wait;
+ struct sk_buff_head status_list;
+
+ u32 wcid_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
+ u32 wcid_phy_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
+
+ struct mt76_wcid global_wcid;
+ struct mt76_wcid __rcu *wcid[MT76_N_WCIDS];
+
+ u8 macaddr[ETH_ALEN];
+ u32 rev;
+
+ u32 aggr_stats[32];
+
+ struct tasklet_struct pre_tbtt_tasklet;
+ int beacon_int;
+ u8 beacon_mask;
+
+ struct debugfs_blob_wrapper eeprom;
+ struct debugfs_blob_wrapper otp;
+
+ struct mt76_rate_power rate_power;
+
+ enum nl80211_dfs_regions region;
+
+ u32 debugfs_reg;
+
+ struct led_classdev led_cdev;
+ char led_name[32];
+ bool led_al;
+ u8 led_pin;
+
+ u8 csa_complete;
+
+ u32 rxfilter;
+
+#ifdef CONFIG_NL80211_TESTMODE
+ const struct mt76_testmode_ops *test_ops;
+ struct mt76_testmode_data test;
+#endif
+
+ struct workqueue_struct *wq;
+
+ union {
+ struct mt76_mmio mmio;
+ struct mt76_usb usb;
+ struct mt76_sdio sdio;
+ };
+};
+
+enum mt76_phy_type {
+ MT_PHY_TYPE_CCK,
+ MT_PHY_TYPE_OFDM,
+ MT_PHY_TYPE_HT,
+ MT_PHY_TYPE_HT_GF,
+ MT_PHY_TYPE_VHT,
+ MT_PHY_TYPE_HE_SU = 8,
+ MT_PHY_TYPE_HE_EXT_SU,
+ MT_PHY_TYPE_HE_TB,
+ MT_PHY_TYPE_HE_MU,
+};
+
+#define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__)
+#define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__)
+#define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__)
+#define __mt76_wr_copy(dev, ...) (dev)->bus->write_copy((dev), __VA_ARGS__)
+#define __mt76_rr_copy(dev, ...) (dev)->bus->read_copy((dev), __VA_ARGS__)
+
+#define __mt76_set(dev, offset, val) __mt76_rmw(dev, offset, 0, val)
+#define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0)
+
+#define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
+#define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
+#define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
+#define mt76_wr_copy(dev, ...) (dev)->mt76.bus->write_copy(&((dev)->mt76), __VA_ARGS__)
+#define mt76_rr_copy(dev, ...) (dev)->mt76.bus->read_copy(&((dev)->mt76), __VA_ARGS__)
+#define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__)
+#define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__)
+
+#define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__)
+
+#define __mt76_mcu_send_msg(dev, ...) (dev)->mcu_ops->mcu_send_msg((dev), __VA_ARGS__)
+#define __mt76_mcu_skb_send_msg(dev, ...) (dev)->mcu_ops->mcu_skb_send_msg((dev), __VA_ARGS__)
+#define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76))
+#define __mt76_mcu_restart(dev, ...) (dev)->mcu_ops->mcu_restart((dev))
+
+#define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val)
+#define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0)
+
+#define mt76_get_field(_dev, _reg, _field) \
+ FIELD_GET(_field, mt76_rr(dev, _reg))
+
+#define mt76_rmw_field(_dev, _reg, _field, _val) \
+ mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
+
+#define __mt76_rmw_field(_dev, _reg, _field, _val) \
+ __mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
+
+#define mt76_hw(dev) (dev)->mphy.hw
+
+static inline struct ieee80211_hw *
+mt76_wcid_hw(struct mt76_dev *dev, u16 wcid)
+{
+ if (wcid <= MT76_N_WCIDS &&
+ mt76_wcid_mask_test(dev->wcid_phy_mask, wcid))
+ return dev->phy2->hw;
+
+ return dev->phy.hw;
+}
+
+bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout);
+
+#define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
+
+bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout);
+
+#define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
+
+void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
+void mt76_pci_disable_aspm(struct pci_dev *pdev);
+
+static inline u16 mt76_chip(struct mt76_dev *dev)
+{
+ return dev->rev >> 16;
+}
+
+static inline u16 mt76_rev(struct mt76_dev *dev)
+{
+ return dev->rev & 0xffff;
+}
+
+#define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
+#define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
+
+#define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76))
+#define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
+#define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
+#define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
+
+#define mt76_for_each_q_rx(dev, i) \
+ for (i = 0; i < ARRAY_SIZE((dev)->q_rx) && \
+ (dev)->q_rx[i].ndesc; i++)
+
+struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
+ const struct ieee80211_ops *ops,
+ const struct mt76_driver_ops *drv_ops);
+int mt76_register_device(struct mt76_dev *dev, bool vht,
+ struct ieee80211_rate *rates, int n_rates);
+void mt76_unregister_device(struct mt76_dev *dev);
+void mt76_free_device(struct mt76_dev *dev);
+void mt76_unregister_phy(struct mt76_phy *phy);
+
+struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
+ const struct ieee80211_ops *ops);
+int mt76_register_phy(struct mt76_phy *phy);
+
+struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
+int mt76_queues_read(struct seq_file *s, void *data);
+void mt76_seq_puts_array(struct seq_file *file, const char *str,
+ s8 *val, int len);
+
+int mt76_eeprom_init(struct mt76_dev *dev, int len);
+void mt76_eeprom_override(struct mt76_dev *dev);
+
+static inline struct mt76_phy *
+mt76_dev_phy(struct mt76_dev *dev, bool phy_ext)
+{
+ if (phy_ext && dev->phy2)
+ return dev->phy2;
+ return &dev->phy;
+}
+
+static inline struct ieee80211_hw *
+mt76_phy_hw(struct mt76_dev *dev, bool phy_ext)
+{
+ return mt76_dev_phy(dev, phy_ext)->hw;
+}
+
+static inline u8 *
+mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+{
+ return (u8 *)t - dev->drv->txwi_size;
+}
+
+/* increment with wrap-around */
+static inline int mt76_incr(int val, int size)
+{
+ return (val + 1) & (size - 1);
+}
+
+/* decrement with wrap-around */
+static inline int mt76_decr(int val, int size)
+{
+ return (val - 1) & (size - 1);
+}
+
+u8 mt76_ac_to_hwq(u8 ac);
+
+static inline struct ieee80211_txq *
+mtxq_to_txq(struct mt76_txq *mtxq)
+{
+ void *ptr = mtxq;
+
+ return container_of(ptr, struct ieee80211_txq, drv_priv);
+}
+
+static inline struct ieee80211_sta *
+wcid_to_sta(struct mt76_wcid *wcid)
+{
+ void *ptr = wcid;
+
+ if (!wcid || !wcid->sta)
+ return NULL;
+
+ return container_of(ptr, struct ieee80211_sta, drv_priv);
+}
+
+static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct mt76_tx_cb) >
+ sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data));
+ return ((void *)IEEE80211_SKB_CB(skb)->status.status_driver_data);
+}
+
+static inline void *mt76_skb_get_hdr(struct sk_buff *skb)
+{
+ struct mt76_rx_status mstat;
+ u8 *data = skb->data;
+
+ /* Alignment concerns */
+ BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) % 4);
+ BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) % 4);
+
+ mstat = *((struct mt76_rx_status *)skb->cb);
+
+ if (mstat.flag & RX_FLAG_RADIOTAP_HE)
+ data += sizeof(struct ieee80211_radiotap_he);
+ if (mstat.flag & RX_FLAG_RADIOTAP_HE_MU)
+ data += sizeof(struct ieee80211_radiotap_he_mu);
+
+ return data;
+}
+
+static inline void mt76_insert_hdr_pad(struct sk_buff *skb)
+{
+ int len = ieee80211_get_hdrlen_from_skb(skb);
+
+ if (len % 4 == 0)
+ return;
+
+ skb_push(skb, 2);
+ memmove(skb->data, skb->data + 2, len);
+
+ skb->data[len] = 0;
+ skb->data[len + 1] = 0;
+}
+
+static inline bool mt76_is_skb_pktid(u8 pktid)
+{
+ if (pktid & MT_PACKET_ID_HAS_RATE)
+ return false;
+
+ return pktid >= MT_PACKET_ID_FIRST;
+}
+
+static inline u8 mt76_tx_power_nss_delta(u8 nss)
+{
+ static const u8 nss_delta[4] = { 0, 6, 9, 12 };
+ u8 idx = nss - 1;
+
+ return (idx < ARRAY_SIZE(nss_delta)) ? nss_delta[idx] : 0;
+}
+
+static inline bool mt76_testmode_enabled(struct mt76_dev *dev)
+{
+#ifdef CONFIG_NL80211_TESTMODE
+ return dev->test.state != MT76_TM_STATE_OFF;
+#else
+ return false;
+#endif
+}
+
+void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
+void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta,
+ struct mt76_wcid *wcid, struct sk_buff *skb);
+void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
+void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
+ bool send_bar);
+void mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb);
+void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid);
+void mt76_txq_schedule_all(struct mt76_phy *phy);
+void mt76_tx_worker(struct mt76_worker *w);
+void mt76_release_buffered_frames(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u16 tids, int nframes,
+ enum ieee80211_frame_release_type reason,
+ bool more_data);
+bool mt76_has_tx_pending(struct mt76_phy *phy);
+void mt76_set_channel(struct mt76_phy *phy);
+void mt76_update_survey(struct mt76_dev *dev);
+void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time);
+int mt76_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey);
+void mt76_set_stream_caps(struct mt76_phy *phy, bool vht);
+
+int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
+ u16 ssn, u16 size);
+void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
+
+void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ struct ieee80211_key_conf *key);
+
+void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
+ __acquires(&dev->status_list.lock);
+void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
+ __releases(&dev->status_list.lock);
+
+int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ struct sk_buff *skb);
+struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
+ struct mt76_wcid *wcid, int pktid,
+ struct sk_buff_head *list);
+void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
+ struct sk_buff_head *list);
+void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb);
+void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ bool flush);
+int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state);
+void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
+int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy);
+
+int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ int *dbm);
+
+void mt76_csa_check(struct mt76_dev *dev);
+void mt76_csa_finish(struct mt76_dev *dev);
+
+int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
+int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set);
+void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id);
+int mt76_get_rate(struct mt76_dev *dev,
+ struct ieee80211_supported_band *sband,
+ int idx, bool cck);
+void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ const u8 *mac);
+void mt76_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len);
+int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct netlink_callback *cb, void *data, int len);
+int mt76_testmode_set_state(struct mt76_dev *dev, enum mt76_testmode_state state);
+
+static inline void mt76_testmode_reset(struct mt76_dev *dev, bool disable)
+{
+#ifdef CONFIG_NL80211_TESTMODE
+ enum mt76_testmode_state state = MT76_TM_STATE_IDLE;
+
+ if (disable || dev->test.state == MT76_TM_STATE_OFF)
+ state = MT76_TM_STATE_OFF;
+
+ mt76_testmode_set_state(dev, state);
+#endif
+}
+
+
+/* internal */
+static inline struct ieee80211_hw *
+mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hw *hw = dev->phy.hw;
+
+ if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && dev->phy2)
+ hw = dev->phy2->hw;
+
+ info->hw_queue &= ~MT_TX_HW_QUEUE_EXT_PHY;
+
+ return hw;
+}
+
+void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
+void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
+ struct napi_struct *napi);
+void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
+ struct napi_struct *napi);
+void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
+void mt76_testmode_tx_pending(struct mt76_dev *dev);
+void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
+ struct mt76_queue_entry *e);
+
+/* usb */
+static inline bool mt76u_urb_error(struct urb *urb)
+{
+ return urb->status &&
+ urb->status != -ECONNRESET &&
+ urb->status != -ESHUTDOWN &&
+ urb->status != -ENOENT;
+}
+
+/* Map hardware queues to usb endpoints */
+static inline u8 q2ep(u8 qid)
+{
+ /* TODO: take management packets to queue 5 */
+ return qid + 1;
+}
+
+static inline int
+mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
+ int timeout, int ep)
+{
+ struct usb_interface *uintf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(uintf);
+ struct mt76_usb *usb = &dev->usb;
+ unsigned int pipe;
+
+ if (actual_len)
+ pipe = usb_rcvbulkpipe(udev, usb->in_ep[ep]);
+ else
+ pipe = usb_sndbulkpipe(udev, usb->out_ep[ep]);
+
+ return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
+}
+
+int mt76_skb_adjust_pad(struct sk_buff *skb, int pad);
+int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
+ u8 req_type, u16 val, u16 offset,
+ void *buf, size_t len);
+void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
+ const u16 offset, const u32 val);
+int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
+ bool ext);
+int mt76u_alloc_mcu_queue(struct mt76_dev *dev);
+int mt76u_alloc_queues(struct mt76_dev *dev);
+void mt76u_stop_tx(struct mt76_dev *dev);
+void mt76u_stop_rx(struct mt76_dev *dev);
+int mt76u_resume_rx(struct mt76_dev *dev);
+void mt76u_queues_deinit(struct mt76_dev *dev);
+
+int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
+ const struct mt76_bus_ops *bus_ops);
+int mt76s_alloc_queues(struct mt76_dev *dev);
+void mt76s_stop_txrx(struct mt76_dev *dev);
+void mt76s_deinit(struct mt76_dev *dev);
+
+struct sk_buff *
+mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
+ int data_len);
+void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
+struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
+ unsigned long expires);
+
+void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig
new file mode 100644
index 000000000..6a0080f1d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config MT7603E
+ tristate "MediaTek MT7603E (PCIe) and MT76x8 WLAN support"
+ select MT76_CORE
+ depends on MAC80211
+ depends on PCI
+ help
+ This adds support for MT7603E wireless PCIe devices and the WLAN core
+ on MT7628/MT7688 SoC devices. This family supports IEEE 802.11n 2x2
+ to 300Mbps PHY rate
+
+ To compile this driver as a module, choose M here.
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/Makefile b/drivers/net/wireless/mediatek/mt76/mt7603/Makefile
new file mode 100644
index 000000000..6878e305c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MT7603E) += mt7603e.o
+
+mt7603e-y := \
+ pci.o soc.o main.o init.o mcu.o \
+ core.o dma.o mac.o eeprom.o \
+ beacon.o debugfs.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
new file mode 100644
index 000000000..d728c5e43
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: ISC
+
+#include "mt7603.h"
+
+struct beacon_bc_data {
+ struct mt7603_dev *dev;
+ struct sk_buff_head q;
+ struct sk_buff *tail[MT7603_MAX_INTERFACES];
+ int count[MT7603_MAX_INTERFACES];
+};
+
+static void
+mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt7603_dev *dev = (struct mt7603_dev *)priv;
+ struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+ struct sk_buff *skb = NULL;
+
+ if (!(dev->mt76.beacon_mask & BIT(mvif->idx)))
+ return;
+
+ skb = ieee80211_beacon_get(mt76_hw(dev), vif);
+ if (!skb)
+ return;
+
+ mt76_tx_queue_skb(dev, MT_TXQ_BEACON, skb, &mvif->sta.wcid, NULL);
+
+ spin_lock_bh(&dev->ps_lock);
+ mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
+ FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) |
+ FIELD_PREP(MT_DMA_FQCR0_TARGET_QID,
+ dev->mt76.q_tx[MT_TXQ_CAB]->hw_idx) |
+ FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) |
+ FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8));
+
+ if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000))
+ dev->beacon_check = MT7603_WATCHDOG_TIMEOUT;
+
+ spin_unlock_bh(&dev->ps_lock);
+}
+
+static void
+mt7603_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct beacon_bc_data *data = priv;
+ struct mt7603_dev *dev = data->dev;
+ struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb;
+
+ if (!(dev->mt76.beacon_mask & BIT(mvif->idx)))
+ return;
+
+ skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
+ if (!skb)
+ return;
+
+ info = IEEE80211_SKB_CB(skb);
+ info->control.vif = vif;
+ info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
+ mt76_skb_set_moredata(skb, true);
+ __skb_queue_tail(&data->q, skb);
+ data->tail[mvif->idx] = skb;
+ data->count[mvif->idx]++;
+}
+
+void mt7603_pre_tbtt_tasklet(unsigned long arg)
+{
+ struct mt7603_dev *dev = (struct mt7603_dev *)arg;
+ struct mt76_queue *q;
+ struct beacon_bc_data data = {};
+ struct sk_buff *skb;
+ int i, nframes;
+
+ if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ return;
+
+ data.dev = dev;
+ __skb_queue_head_init(&data.q);
+
+ q = dev->mt76.q_tx[MT_TXQ_BEACON];
+ spin_lock_bh(&q->lock);
+ ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7603_update_beacon_iter, dev);
+ mt76_queue_kick(dev, q);
+ spin_unlock_bh(&q->lock);
+
+ /* Flush all previous CAB queue packets */
+ mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0));
+
+ mt76_queue_tx_cleanup(dev, MT_TXQ_CAB, false);
+
+ mt76_csa_check(&dev->mt76);
+ if (dev->mt76.csa_complete)
+ goto out;
+
+ q = dev->mt76.q_tx[MT_TXQ_CAB];
+ do {
+ nframes = skb_queue_len(&data.q);
+ ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7603_add_buffered_bc, &data);
+ } while (nframes != skb_queue_len(&data.q) &&
+ skb_queue_len(&data.q) < 8);
+
+ if (skb_queue_empty(&data.q))
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
+ if (!data.tail[i])
+ continue;
+
+ mt76_skb_set_moredata(data.tail[i], false);
+ }
+
+ spin_lock_bh(&q->lock);
+ while ((skb = __skb_dequeue(&data.q)) != NULL) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+
+ mt76_tx_queue_skb(dev, MT_TXQ_CAB, skb, &mvif->sta.wcid, NULL);
+ }
+ mt76_queue_kick(dev, q);
+ spin_unlock_bh(&q->lock);
+
+ for (i = 0; i < ARRAY_SIZE(data.count); i++)
+ mt76_wr(dev, MT_WF_ARB_CAB_COUNT_B0_REG(i),
+ data.count[i] << MT_WF_ARB_CAB_COUNT_B0_SHIFT(i));
+
+ mt76_wr(dev, MT_WF_ARB_CAB_START,
+ MT_WF_ARB_CAB_START_BSSn(0) |
+ (MT_WF_ARB_CAB_START_BSS0n(1) *
+ ((1 << (MT7603_MAX_INTERFACES - 1)) - 1)));
+
+out:
+ mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false);
+ if (dev->mt76.q_tx[MT_TXQ_BEACON]->queued >
+ hweight8(dev->mt76.beacon_mask))
+ dev->beacon_check++;
+}
+
+void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval)
+{
+ u32 pre_tbtt = MT7603_PRE_TBTT_TIME / 64;
+
+ if (idx >= 0) {
+ if (intval)
+ dev->mt76.beacon_mask |= BIT(idx);
+ else
+ dev->mt76.beacon_mask &= ~BIT(idx);
+ }
+
+ if (!dev->mt76.beacon_mask || (!intval && idx < 0)) {
+ mt7603_irq_disable(dev, MT_INT_MAC_IRQ3);
+ mt76_clear(dev, MT_ARB_SCR, MT_ARB_SCR_BCNQ_OPMODE_MASK);
+ mt76_wr(dev, MT_HW_INT_MASK(3), 0);
+ return;
+ }
+
+ dev->mt76.beacon_int = intval;
+ mt76_wr(dev, MT_TBTT,
+ FIELD_PREP(MT_TBTT_PERIOD, intval) | MT_TBTT_CAL_ENABLE);
+
+ mt76_wr(dev, MT_TBTT_TIMER_CFG, 0x99); /* start timer */
+
+ mt76_rmw_field(dev, MT_ARB_SCR, MT_ARB_SCR_BCNQ_OPMODE_MASK,
+ MT_BCNQ_OPMODE_AP);
+ mt76_clear(dev, MT_ARB_SCR, MT_ARB_SCR_TBTT_BCN_PRIO);
+ mt76_set(dev, MT_ARB_SCR, MT_ARB_SCR_TBTT_BCAST_PRIO);
+
+ mt76_wr(dev, MT_PRE_TBTT, pre_tbtt);
+
+ mt76_set(dev, MT_HW_INT_MASK(3),
+ MT_HW_INT3_PRE_TBTT0 | MT_HW_INT3_TBTT0);
+
+ mt76_set(dev, MT_WF_ARB_BCN_START,
+ MT_WF_ARB_BCN_START_BSSn(0) |
+ ((dev->mt76.beacon_mask >> 1) *
+ MT_WF_ARB_BCN_START_BSS0n(1)));
+ mt7603_irq_enable(dev, MT_INT_MAC_IRQ3);
+
+ if (dev->mt76.beacon_mask & ~BIT(0))
+ mt76_set(dev, MT_LPON_SBTOR(0), MT_LPON_SBTOR_SUB_BSS_EN);
+ else
+ mt76_clear(dev, MT_LPON_SBTOR(0), MT_LPON_SBTOR_SUB_BSS_EN);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/core.c b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
new file mode 100644
index 000000000..915b83491
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: ISC
+
+#include "mt7603.h"
+#include "../trace.h"
+
+void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
+{
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+
+ mt7603_irq_enable(dev, MT_INT_RX_DONE(q));
+}
+
+irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
+{
+ struct mt7603_dev *dev = dev_instance;
+ u32 intr;
+
+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+ return IRQ_NONE;
+
+ trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
+
+ intr &= dev->mt76.mmio.irqmask;
+
+ if (intr & MT_INT_MAC_IRQ3) {
+ u32 hwintr = mt76_rr(dev, MT_HW_INT_STATUS(3));
+
+ mt76_wr(dev, MT_HW_INT_STATUS(3), hwintr);
+ if (hwintr & MT_HW_INT3_PRE_TBTT0)
+ tasklet_schedule(&dev->mt76.pre_tbtt_tasklet);
+
+ if ((hwintr & MT_HW_INT3_TBTT0) && dev->mt76.csa_complete)
+ mt76_csa_finish(&dev->mt76);
+ }
+
+ if (intr & MT_INT_TX_DONE_ALL) {
+ mt7603_irq_disable(dev, MT_INT_TX_DONE_ALL);
+ napi_schedule(&dev->mt76.tx_napi);
+ }
+
+ if (intr & MT_INT_RX_DONE(0)) {
+ dev->rx_pse_check = 0;
+ mt7603_irq_disable(dev, MT_INT_RX_DONE(0));
+ napi_schedule(&dev->mt76.napi[0]);
+ }
+
+ if (intr & MT_INT_RX_DONE(1)) {
+ dev->rx_pse_check = 0;
+ mt7603_irq_disable(dev, MT_INT_RX_DONE(1));
+ napi_schedule(&dev->mt76.napi[1]);
+ }
+
+ return IRQ_HANDLED;
+}
+
+u32 mt7603_reg_map(struct mt7603_dev *dev, u32 addr)
+{
+ u32 base = addr & MT_MCU_PCIE_REMAP_2_BASE;
+ u32 offset = addr & MT_MCU_PCIE_REMAP_2_OFFSET;
+
+ dev->bus_ops->wr(&dev->mt76, MT_MCU_PCIE_REMAP_2, base);
+
+ return MT_PCIE_REMAP_BASE_2 + offset;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
new file mode 100644
index 000000000..f52165dff
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: ISC
+
+#include "mt7603.h"
+
+static int
+mt7603_reset_read(struct seq_file *s, void *data)
+{
+ struct mt7603_dev *dev = dev_get_drvdata(s->private);
+ static const char * const reset_cause_str[] = {
+ [RESET_CAUSE_TX_HANG] = "TX hang",
+ [RESET_CAUSE_TX_BUSY] = "TX DMA busy stuck",
+ [RESET_CAUSE_RX_BUSY] = "RX DMA busy stuck",
+ [RESET_CAUSE_RX_PSE_BUSY] = "RX PSE busy stuck",
+ [RESET_CAUSE_BEACON_STUCK] = "Beacon stuck",
+ [RESET_CAUSE_MCU_HANG] = "MCU hang",
+ [RESET_CAUSE_RESET_FAILED] = "PSE reset failed",
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(reset_cause_str); i++) {
+ if (!reset_cause_str[i])
+ continue;
+
+ seq_printf(s, "%20s: %u\n", reset_cause_str[i],
+ dev->reset_cause[i]);
+ }
+
+ return 0;
+}
+
+static int
+mt7603_radio_read(struct seq_file *s, void *data)
+{
+ struct mt7603_dev *dev = dev_get_drvdata(s->private);
+
+ seq_printf(s, "Sensitivity: %d\n", dev->sensitivity);
+ seq_printf(s, "False CCA: ofdm=%d cck=%d\n",
+ dev->false_cca_ofdm, dev->false_cca_cck);
+
+ return 0;
+}
+
+static int
+mt7603_edcca_set(void *data, u64 val)
+{
+ struct mt7603_dev *dev = data;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ dev->ed_monitor_enabled = !!val;
+ dev->ed_monitor = dev->ed_monitor_enabled &&
+ dev->mt76.region == NL80211_DFS_ETSI;
+ mt7603_init_edcca(dev);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+static int
+mt7603_edcca_get(void *data, u64 *val)
+{
+ struct mt7603_dev *dev = data;
+
+ *val = dev->ed_monitor_enabled;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_edcca, mt7603_edcca_get,
+ mt7603_edcca_set, "%lld\n");
+
+static int
+mt7603_ampdu_stat_show(struct seq_file *file, void *data)
+{
+ struct mt7603_dev *dev = file->private;
+ int bound[3], i, range;
+
+ range = mt76_rr(dev, MT_AGG_ASRCR);
+ for (i = 0; i < ARRAY_SIZE(bound); i++)
+ bound[i] = MT_AGG_ASRCR_RANGE(range, i) + 1;
+
+ seq_printf(file, "Length: %8d | ", bound[0]);
+ for (i = 0; i < ARRAY_SIZE(bound) - 1; i++)
+ seq_printf(file, "%3d -%3d | ",
+ bound[i], bound[i + 1]);
+ seq_puts(file, "\nCount: ");
+ for (i = 0; i < ARRAY_SIZE(bound); i++)
+ seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i]);
+ seq_puts(file, "\n");
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mt7603_ampdu_stat);
+
+void mt7603_init_debugfs(struct mt7603_dev *dev)
+{
+ struct dentry *dir;
+
+ dir = mt76_register_debugfs(&dev->mt76);
+ if (!dir)
+ return;
+
+ debugfs_create_file("ampdu_stat", 0400, dir, dev,
+ &mt7603_ampdu_stat_fops);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", dir,
+ mt76_queues_read);
+ debugfs_create_file("edcca", 0600, dir, dev, &fops_edcca);
+ debugfs_create_u32("reset_test", 0600, dir, &dev->reset_test);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "reset", dir,
+ mt7603_reset_read);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "radio", dir,
+ mt7603_radio_read);
+ debugfs_create_u8("sensitivity_limit", 0600, dir,
+ &dev->sensitivity_limit);
+ debugfs_create_bool("dynamic_sensitivity", 0600, dir,
+ &dev->dynamic_sensitivity);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
new file mode 100644
index 000000000..d60d00f6f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: ISC
+
+#include "mt7603.h"
+#include "mac.h"
+#include "../dma.h"
+
+static int
+mt7603_init_tx_queue(struct mt7603_dev *dev, int qid, int idx, int n_desc)
+{
+ struct mt76_queue *hwq;
+ int err;
+
+ hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
+ if (!hwq)
+ return -ENOMEM;
+
+ err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
+ if (err < 0)
+ return err;
+
+ dev->mt76.q_tx[qid] = hwq;
+
+ mt7603_irq_enable(dev, MT_INT_TX_DONE(idx));
+
+ return 0;
+}
+
+static void
+mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
+{
+ static const u8 tid_to_ac[8] = {
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_VO
+ };
+ __le32 *txd = (__le32 *)skb->data;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_sta *sta;
+ struct mt7603_sta *msta;
+ struct mt76_wcid *wcid;
+ void *priv;
+ int idx;
+ u32 val;
+ u8 tid = 0;
+
+ if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr))
+ goto free;
+
+ val = le32_to_cpu(txd[1]);
+ idx = FIELD_GET(MT_TXD1_WLAN_IDX, val);
+ skb->priority = FIELD_GET(MT_TXD1_TID, val);
+
+ if (idx >= MT7603_WTBL_STA - 1)
+ goto free;
+
+ wcid = rcu_dereference(dev->mt76.wcid[idx]);
+ if (!wcid)
+ goto free;
+
+ priv = msta = container_of(wcid, struct mt7603_sta, wcid);
+ val = le32_to_cpu(txd[0]);
+ val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
+ val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT);
+ txd[0] = cpu_to_le32(val);
+
+ sta = container_of(priv, struct ieee80211_sta, drv_priv);
+ hdr = (struct ieee80211_hdr *)&skb->data[MT_TXD_SIZE];
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tid = *ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_TAG1D_MASK;
+ skb_set_queue_mapping(skb, tid_to_ac[tid]);
+ ieee80211_sta_set_buffered(sta, tid, true);
+
+ spin_lock_bh(&dev->ps_lock);
+ __skb_queue_tail(&msta->psq, skb);
+ if (skb_queue_len(&msta->psq) >= 64) {
+ skb = __skb_dequeue(&msta->psq);
+ dev_kfree_skb(skb);
+ }
+ spin_unlock_bh(&dev->ps_lock);
+ return;
+
+free:
+ dev_kfree_skb(skb);
+}
+
+void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb)
+{
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+ __le32 *rxd = (__le32 *)skb->data;
+ __le32 *end = (__le32 *)&skb->data[skb->len];
+ enum rx_pkt_type type;
+
+ type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+
+ if (q == MT_RXQ_MCU) {
+ if (type == PKT_TYPE_RX_EVENT)
+ mt76_mcu_rx_event(&dev->mt76, skb);
+ else
+ mt7603_rx_loopback_skb(dev, skb);
+ return;
+ }
+
+ switch (type) {
+ case PKT_TYPE_TXS:
+ for (rxd++; rxd + 5 <= end; rxd += 5)
+ mt7603_mac_add_txs(dev, rxd);
+ dev_kfree_skb(skb);
+ break;
+ case PKT_TYPE_RX_EVENT:
+ mt76_mcu_rx_event(&dev->mt76, skb);
+ return;
+ case PKT_TYPE_NORMAL:
+ if (mt7603_mac_fill_rx(dev, skb) == 0) {
+ mt76_rx(&dev->mt76, q, skb);
+ return;
+ }
+ fallthrough;
+ default:
+ dev_kfree_skb(skb);
+ break;
+ }
+}
+
+static int
+mt7603_init_rx_queue(struct mt7603_dev *dev, struct mt76_queue *q,
+ int idx, int n_desc, int bufsize)
+{
+ int err;
+
+ err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize,
+ MT_RX_RING_BASE);
+ if (err < 0)
+ return err;
+
+ mt7603_irq_enable(dev, MT_INT_RX_DONE(idx));
+
+ return 0;
+}
+
+static int mt7603_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct mt7603_dev *dev;
+ int i;
+
+ dev = container_of(napi, struct mt7603_dev, mt76.tx_napi);
+ dev->tx_dma_check = 0;
+
+ for (i = MT_TXQ_MCU; i >= 0; i--)
+ mt76_queue_tx_cleanup(dev, i, false);
+
+ if (napi_complete_done(napi, 0))
+ mt7603_irq_enable(dev, MT_INT_TX_DONE_ALL);
+
+ for (i = MT_TXQ_MCU; i >= 0; i--)
+ mt76_queue_tx_cleanup(dev, i, false);
+
+ mt7603_mac_sta_poll(dev);
+
+ mt76_worker_schedule(&dev->mt76.tx_worker);
+
+ return 0;
+}
+
+int mt7603_dma_init(struct mt7603_dev *dev)
+{
+ static const u8 wmm_queue_map[] = {
+ [IEEE80211_AC_BK] = 0,
+ [IEEE80211_AC_BE] = 1,
+ [IEEE80211_AC_VI] = 2,
+ [IEEE80211_AC_VO] = 3,
+ };
+ int ret;
+ int i;
+
+ mt76_dma_attach(&dev->mt76);
+
+ mt76_clear(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_RX_DMA_EN |
+ MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
+ MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+
+ mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
+ mt7603_pse_client_reset(dev);
+
+ for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
+ ret = mt7603_init_tx_queue(dev, i, wmm_queue_map[i],
+ MT7603_TX_RING_SIZE);
+ if (ret)
+ return ret;
+ }
+
+ ret = mt7603_init_tx_queue(dev, MT_TXQ_PSD,
+ MT_TX_HW_QUEUE_MGMT, MT7603_PSD_RING_SIZE);
+ if (ret)
+ return ret;
+
+ ret = mt7603_init_tx_queue(dev, MT_TXQ_MCU,
+ MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
+ if (ret)
+ return ret;
+
+ ret = mt7603_init_tx_queue(dev, MT_TXQ_BEACON,
+ MT_TX_HW_QUEUE_BCN, MT_MCU_RING_SIZE);
+ if (ret)
+ return ret;
+
+ ret = mt7603_init_tx_queue(dev, MT_TXQ_CAB,
+ MT_TX_HW_QUEUE_BMC, MT_MCU_RING_SIZE);
+ if (ret)
+ return ret;
+
+ ret = mt7603_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
+ MT7603_MCU_RX_RING_SIZE, MT_RX_BUF_SIZE);
+ if (ret)
+ return ret;
+
+ ret = mt7603_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0,
+ MT7603_RX_RING_SIZE, MT_RX_BUF_SIZE);
+ if (ret)
+ return ret;
+
+ mt76_wr(dev, MT_DELAY_INT_CFG, 0);
+ ret = mt76_init_queues(dev);
+ if (ret)
+ return ret;
+
+ netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi,
+ mt7603_poll_tx, NAPI_POLL_WEIGHT);
+ napi_enable(&dev->mt76.tx_napi);
+
+ return 0;
+}
+
+void mt7603_dma_cleanup(struct mt7603_dev *dev)
+{
+ mt76_clear(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_RX_DMA_EN |
+ MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+
+ mt76_dma_cleanup(&dev->mt76);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
new file mode 100644
index 000000000..a6df733ac
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: ISC
+
+#include <linux/of.h>
+#include "mt7603.h"
+#include "eeprom.h"
+
+static int
+mt7603_efuse_read(struct mt7603_dev *dev, u32 base, u16 addr, u8 *data)
+{
+ u32 val;
+ int i;
+
+ val = mt76_rr(dev, base + MT_EFUSE_CTRL);
+ val &= ~(MT_EFUSE_CTRL_AIN |
+ MT_EFUSE_CTRL_MODE);
+ val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf);
+ val |= MT_EFUSE_CTRL_KICK;
+ mt76_wr(dev, base + MT_EFUSE_CTRL, val);
+
+ if (!mt76_poll(dev, base + MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
+ return -ETIMEDOUT;
+
+ udelay(2);
+
+ val = mt76_rr(dev, base + MT_EFUSE_CTRL);
+ if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT ||
+ WARN_ON_ONCE(!(val & MT_EFUSE_CTRL_VALID))) {
+ memset(data, 0xff, 16);
+ return 0;
+ }
+
+ for (i = 0; i < 4; i++) {
+ val = mt76_rr(dev, base + MT_EFUSE_RDATA(i));
+ put_unaligned_le32(val, data + 4 * i);
+ }
+
+ return 0;
+}
+
+static int
+mt7603_efuse_init(struct mt7603_dev *dev)
+{
+ u32 base = mt7603_reg_map(dev, MT_EFUSE_BASE);
+ int len = MT7603_EEPROM_SIZE;
+ void *buf;
+ int ret, i;
+
+ if (mt76_rr(dev, base + MT_EFUSE_BASE_CTRL) & MT_EFUSE_BASE_CTRL_EMPTY)
+ return 0;
+
+ dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, len, GFP_KERNEL);
+ dev->mt76.otp.size = len;
+ if (!dev->mt76.otp.data)
+ return -ENOMEM;
+
+ buf = dev->mt76.otp.data;
+ for (i = 0; i + 16 <= len; i += 16) {
+ ret = mt7603_efuse_read(dev, base, i, buf + i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool
+mt7603_has_cal_free_data(struct mt7603_dev *dev, u8 *efuse)
+{
+ if (!efuse[MT_EE_TEMP_SENSOR_CAL])
+ return false;
+
+ if (get_unaligned_le16(efuse + MT_EE_TX_POWER_0_START_2G) == 0)
+ return false;
+
+ if (get_unaligned_le16(efuse + MT_EE_TX_POWER_1_START_2G) == 0)
+ return false;
+
+ if (!efuse[MT_EE_CP_FT_VERSION])
+ return false;
+
+ if (!efuse[MT_EE_XTAL_FREQ_OFFSET])
+ return false;
+
+ if (!efuse[MT_EE_XTAL_WF_RFCAL])
+ return false;
+
+ return true;
+}
+
+static void
+mt7603_apply_cal_free_data(struct mt7603_dev *dev, u8 *efuse)
+{
+ static const u8 cal_free_bytes[] = {
+ MT_EE_TEMP_SENSOR_CAL,
+ MT_EE_CP_FT_VERSION,
+ MT_EE_XTAL_FREQ_OFFSET,
+ MT_EE_XTAL_WF_RFCAL,
+ /* Skip for MT7628 */
+ MT_EE_TX_POWER_0_START_2G,
+ MT_EE_TX_POWER_0_START_2G + 1,
+ MT_EE_TX_POWER_1_START_2G,
+ MT_EE_TX_POWER_1_START_2G + 1,
+ };
+ struct device_node *np = dev->mt76.dev->of_node;
+ u8 *eeprom = dev->mt76.eeprom.data;
+ int n = ARRAY_SIZE(cal_free_bytes);
+ int i;
+
+ if (!np || !of_property_read_bool(np, "mediatek,eeprom-merge-otp"))
+ return;
+
+ if (!mt7603_has_cal_free_data(dev, efuse))
+ return;
+
+ if (is_mt7628(dev))
+ n -= 4;
+
+ for (i = 0; i < n; i++) {
+ int offset = cal_free_bytes[i];
+
+ eeprom[offset] = efuse[offset];
+ }
+}
+
+static int
+mt7603_eeprom_load(struct mt7603_dev *dev)
+{
+ int ret;
+
+ ret = mt76_eeprom_init(&dev->mt76, MT7603_EEPROM_SIZE);
+ if (ret < 0)
+ return ret;
+
+ return mt7603_efuse_init(dev);
+}
+
+static int mt7603_check_eeprom(struct mt76_dev *dev)
+{
+ u16 val = get_unaligned_le16(dev->eeprom.data);
+
+ switch (val) {
+ case 0x7628:
+ case 0x7603:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline bool is_mt7688(struct mt7603_dev *dev)
+{
+ return mt76_rr(dev, MT_EFUSE_BASE + 0x64) & BIT(4);
+}
+
+int mt7603_eeprom_init(struct mt7603_dev *dev)
+{
+ u8 *eeprom;
+ int ret;
+
+ ret = mt7603_eeprom_load(dev);
+ if (ret < 0)
+ return ret;
+
+ if (dev->mt76.otp.data) {
+ if (mt7603_check_eeprom(&dev->mt76) == 0)
+ mt7603_apply_cal_free_data(dev, dev->mt76.otp.data);
+ else
+ memcpy(dev->mt76.eeprom.data, dev->mt76.otp.data,
+ MT7603_EEPROM_SIZE);
+ }
+
+ eeprom = (u8 *)dev->mt76.eeprom.data;
+ dev->mphy.cap.has_2ghz = true;
+ memcpy(dev->mt76.macaddr, eeprom + MT_EE_MAC_ADDR, ETH_ALEN);
+
+ /* Check for 1SS devices */
+ dev->mphy.antenna_mask = 3;
+ if (FIELD_GET(MT_EE_NIC_CONF_0_RX_PATH, eeprom[MT_EE_NIC_CONF_0]) == 1 ||
+ FIELD_GET(MT_EE_NIC_CONF_0_TX_PATH, eeprom[MT_EE_NIC_CONF_0]) == 1 ||
+ is_mt7688(dev))
+ dev->mphy.antenna_mask = 1;
+
+ mt76_eeprom_override(&dev->mt76);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h
new file mode 100644
index 000000000..4687d6dc0
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: ISC */
+
+#ifndef __MT7603_EEPROM_H
+#define __MT7603_EEPROM_H
+
+#include "mt7603.h"
+
+enum mt7603_eeprom_field {
+ MT_EE_CHIP_ID = 0x000,
+ MT_EE_VERSION = 0x002,
+ MT_EE_MAC_ADDR = 0x004,
+ MT_EE_NIC_CONF_0 = 0x034,
+ MT_EE_NIC_CONF_1 = 0x036,
+ MT_EE_NIC_CONF_2 = 0x042,
+
+ MT_EE_XTAL_TRIM_1 = 0x03a,
+
+ MT_EE_RSSI_OFFSET_2G = 0x046,
+ MT_EE_WIFI_RF_SETTING = 0x048,
+ MT_EE_RSSI_OFFSET_5G = 0x04a,
+
+ MT_EE_TX_POWER_DELTA_BW40 = 0x050,
+ MT_EE_TX_POWER_DELTA_BW80 = 0x052,
+
+ MT_EE_TX_POWER_EXT_PA_5G = 0x054,
+
+ MT_EE_TEMP_SENSOR_CAL = 0x055,
+
+ MT_EE_TX_POWER_0_START_2G = 0x056,
+ MT_EE_TX_POWER_1_START_2G = 0x05c,
+
+ /* used as byte arrays */
+#define MT_TX_POWER_GROUP_SIZE_5G 5
+#define MT_TX_POWER_GROUPS_5G 6
+ MT_EE_TX_POWER_0_START_5G = 0x062,
+
+ MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA = 0x074,
+ MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE = 0x076,
+
+ MT_EE_TX_POWER_1_START_5G = 0x080,
+
+ MT_EE_TX_POWER_CCK = 0x0a0,
+ MT_EE_TX_POWER_OFDM_2G_6M = 0x0a2,
+ MT_EE_TX_POWER_OFDM_2G_24M = 0x0a4,
+ MT_EE_TX_POWER_OFDM_2G_54M = 0x0a6,
+ MT_EE_TX_POWER_HT_BPSK_QPSK = 0x0a8,
+ MT_EE_TX_POWER_HT_16_64_QAM = 0x0aa,
+ MT_EE_TX_POWER_HT_64_QAM = 0x0ac,
+
+ MT_EE_ELAN_RX_MODE_GAIN = 0x0c0,
+ MT_EE_ELAN_RX_MODE_NF = 0x0c1,
+ MT_EE_ELAN_RX_MODE_P1DB = 0x0c2,
+
+ MT_EE_ELAN_BYPASS_MODE_GAIN = 0x0c3,
+ MT_EE_ELAN_BYPASS_MODE_NF = 0x0c4,
+ MT_EE_ELAN_BYPASS_MODE_P1DB = 0x0c5,
+
+ MT_EE_STEP_NUM_NEG_6_7 = 0x0c6,
+ MT_EE_STEP_NUM_NEG_4_5 = 0x0c8,
+ MT_EE_STEP_NUM_NEG_2_3 = 0x0ca,
+ MT_EE_STEP_NUM_NEG_0_1 = 0x0cc,
+
+ MT_EE_REF_STEP_24G = 0x0ce,
+
+ MT_EE_STEP_NUM_PLUS_1_2 = 0x0d0,
+ MT_EE_STEP_NUM_PLUS_3_4 = 0x0d2,
+ MT_EE_STEP_NUM_PLUS_5_6 = 0x0d4,
+ MT_EE_STEP_NUM_PLUS_7 = 0x0d6,
+
+ MT_EE_CP_FT_VERSION = 0x0f0,
+
+ MT_EE_TX_POWER_TSSI_OFF = 0x0f2,
+
+ MT_EE_XTAL_FREQ_OFFSET = 0x0f4,
+ MT_EE_XTAL_TRIM_2_COMP = 0x0f5,
+ MT_EE_XTAL_TRIM_3_COMP = 0x0f6,
+ MT_EE_XTAL_WF_RFCAL = 0x0f7,
+
+ __MT_EE_MAX
+};
+
+enum mt7603_eeprom_source {
+ MT_EE_SRC_PROM,
+ MT_EE_SRC_EFUSE,
+ MT_EE_SRC_FLASH,
+};
+
+#define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0)
+#define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4)
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
new file mode 100644
index 000000000..c4848fafd
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -0,0 +1,587 @@
+// SPDX-License-Identifier: ISC
+
+#include <linux/etherdevice.h>
+#include "mt7603.h"
+#include "mac.h"
+#include "eeprom.h"
+
+const struct mt76_driver_ops mt7603_drv_ops = {
+ .txwi_size = MT_TXD_SIZE,
+ .drv_flags = MT_DRV_SW_RX_AIRTIME,
+ .survey_flags = SURVEY_INFO_TIME_TX,
+ .tx_prepare_skb = mt7603_tx_prepare_skb,
+ .tx_complete_skb = mt7603_tx_complete_skb,
+ .rx_skb = mt7603_queue_rx_skb,
+ .rx_poll_complete = mt7603_rx_poll_complete,
+ .sta_ps = mt7603_sta_ps,
+ .sta_add = mt7603_sta_add,
+ .sta_assoc = mt7603_sta_assoc,
+ .sta_remove = mt7603_sta_remove,
+ .update_survey = mt7603_update_channel,
+};
+
+static void
+mt7603_set_tmac_template(struct mt7603_dev *dev)
+{
+ u32 desc[5] = {
+ [1] = FIELD_PREP(MT_TXD3_REM_TX_COUNT, 0xf),
+ [3] = MT_TXD5_SW_POWER_MGMT
+ };
+ u32 addr;
+ int i;
+
+ addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR);
+ addr += MT_CLIENT_TMAC_INFO_TEMPLATE;
+ for (i = 0; i < ARRAY_SIZE(desc); i++)
+ mt76_wr(dev, addr + 4 * i, desc[i]);
+}
+
+static void
+mt7603_dma_sched_init(struct mt7603_dev *dev)
+{
+ int page_size = 128;
+ int page_count;
+ int max_len = 1792;
+ int max_amsdu_pages = 4096 / page_size;
+ int max_mcu_len = 4096;
+ int max_beacon_len = 512 * 4 + max_len;
+ int max_mcast_pages = 4 * max_len / page_size;
+ int reserved_count = 0;
+ int beacon_pages;
+ int mcu_pages;
+ int i;
+
+ page_count = mt76_get_field(dev, MT_PSE_FC_P0,
+ MT_PSE_FC_P0_MAX_QUOTA);
+ beacon_pages = 4 * (max_beacon_len / page_size);
+ mcu_pages = max_mcu_len / page_size;
+
+ mt76_wr(dev, MT_PSE_FRP,
+ FIELD_PREP(MT_PSE_FRP_P0, 7) |
+ FIELD_PREP(MT_PSE_FRP_P1, 6) |
+ FIELD_PREP(MT_PSE_FRP_P2_RQ2, 4));
+
+ mt76_wr(dev, MT_HIGH_PRIORITY_1, 0x55555553);
+ mt76_wr(dev, MT_HIGH_PRIORITY_2, 0x78555555);
+
+ mt76_wr(dev, MT_QUEUE_PRIORITY_1, 0x2b1a096e);
+ mt76_wr(dev, MT_QUEUE_PRIORITY_2, 0x785f4d3c);
+
+ mt76_wr(dev, MT_PRIORITY_MASK, 0xffffffff);
+
+ mt76_wr(dev, MT_SCH_1, page_count | (2 << 28));
+ mt76_wr(dev, MT_SCH_2, max_amsdu_pages);
+
+ for (i = 0; i <= 4; i++)
+ mt76_wr(dev, MT_PAGE_COUNT(i), max_amsdu_pages);
+ reserved_count += 5 * max_amsdu_pages;
+
+ mt76_wr(dev, MT_PAGE_COUNT(5), mcu_pages);
+ reserved_count += mcu_pages;
+
+ mt76_wr(dev, MT_PAGE_COUNT(7), beacon_pages);
+ reserved_count += beacon_pages;
+
+ mt76_wr(dev, MT_PAGE_COUNT(8), max_mcast_pages);
+ reserved_count += max_mcast_pages;
+
+ if (is_mt7603(dev))
+ reserved_count = 0;
+
+ mt76_wr(dev, MT_RSV_MAX_THRESH, page_count - reserved_count);
+
+ if (is_mt7603(dev) && mt76xx_rev(dev) >= MT7603_REV_E2) {
+ mt76_wr(dev, MT_GROUP_THRESH(0),
+ page_count - beacon_pages - mcu_pages);
+ mt76_wr(dev, MT_GROUP_THRESH(1), beacon_pages);
+ mt76_wr(dev, MT_BMAP_0, 0x0080ff5f);
+ mt76_wr(dev, MT_GROUP_THRESH(2), mcu_pages);
+ mt76_wr(dev, MT_BMAP_1, 0x00000020);
+ } else {
+ mt76_wr(dev, MT_GROUP_THRESH(0), page_count);
+ mt76_wr(dev, MT_BMAP_0, 0xffff);
+ }
+
+ mt76_wr(dev, MT_SCH_4, 0);
+
+ for (i = 0; i <= 15; i++)
+ mt76_wr(dev, MT_TXTIME_THRESH(i), 0xfffff);
+
+ mt76_set(dev, MT_SCH_4, BIT(6));
+}
+
+static void
+mt7603_phy_init(struct mt7603_dev *dev)
+{
+ int rx_chains = dev->mphy.antenna_mask;
+ int tx_chains = hweight8(rx_chains) - 1;
+
+ mt76_rmw(dev, MT_WF_RMAC_RMCR,
+ (MT_WF_RMAC_RMCR_SMPS_MODE |
+ MT_WF_RMAC_RMCR_RX_STREAMS),
+ (FIELD_PREP(MT_WF_RMAC_RMCR_SMPS_MODE, 3) |
+ FIELD_PREP(MT_WF_RMAC_RMCR_RX_STREAMS, rx_chains)));
+
+ mt76_rmw_field(dev, MT_TMAC_TCR, MT_TMAC_TCR_TX_STREAMS,
+ tx_chains);
+
+ dev->agc0 = mt76_rr(dev, MT_AGC(0));
+ dev->agc3 = mt76_rr(dev, MT_AGC(3));
+}
+
+static void
+mt7603_mac_init(struct mt7603_dev *dev)
+{
+ u8 bc_addr[ETH_ALEN];
+ u32 addr;
+ int i;
+
+ mt76_wr(dev, MT_AGG_BA_SIZE_LIMIT_0,
+ (MT_AGG_SIZE_LIMIT(0) << 0 * MT_AGG_BA_SIZE_LIMIT_SHIFT) |
+ (MT_AGG_SIZE_LIMIT(1) << 1 * MT_AGG_BA_SIZE_LIMIT_SHIFT) |
+ (MT_AGG_SIZE_LIMIT(2) << 2 * MT_AGG_BA_SIZE_LIMIT_SHIFT) |
+ (MT_AGG_SIZE_LIMIT(3) << 3 * MT_AGG_BA_SIZE_LIMIT_SHIFT));
+
+ mt76_wr(dev, MT_AGG_BA_SIZE_LIMIT_1,
+ (MT_AGG_SIZE_LIMIT(4) << 0 * MT_AGG_BA_SIZE_LIMIT_SHIFT) |
+ (MT_AGG_SIZE_LIMIT(5) << 1 * MT_AGG_BA_SIZE_LIMIT_SHIFT) |
+ (MT_AGG_SIZE_LIMIT(6) << 2 * MT_AGG_BA_SIZE_LIMIT_SHIFT) |
+ (MT_AGG_SIZE_LIMIT(7) << 3 * MT_AGG_BA_SIZE_LIMIT_SHIFT));
+
+ mt76_wr(dev, MT_AGG_LIMIT,
+ FIELD_PREP(MT_AGG_LIMIT_AC(0), 24) |
+ FIELD_PREP(MT_AGG_LIMIT_AC(1), 24) |
+ FIELD_PREP(MT_AGG_LIMIT_AC(2), 24) |
+ FIELD_PREP(MT_AGG_LIMIT_AC(3), 24));
+
+ mt76_wr(dev, MT_AGG_LIMIT_1,
+ FIELD_PREP(MT_AGG_LIMIT_AC(0), 24) |
+ FIELD_PREP(MT_AGG_LIMIT_AC(1), 24) |
+ FIELD_PREP(MT_AGG_LIMIT_AC(2), 24) |
+ FIELD_PREP(MT_AGG_LIMIT_AC(3), 24));
+
+ mt76_wr(dev, MT_AGG_CONTROL,
+ FIELD_PREP(MT_AGG_CONTROL_BAR_RATE, 0x4b) |
+ FIELD_PREP(MT_AGG_CONTROL_CFEND_RATE, 0x69) |
+ MT_AGG_CONTROL_NO_BA_AR_RULE);
+
+ mt76_wr(dev, MT_AGG_RETRY_CONTROL,
+ FIELD_PREP(MT_AGG_RETRY_CONTROL_BAR_LIMIT, 1) |
+ FIELD_PREP(MT_AGG_RETRY_CONTROL_RTS_LIMIT, 15));
+
+ mt76_wr(dev, MT_DMA_DCR0, MT_DMA_DCR0_RX_VEC_DROP |
+ FIELD_PREP(MT_DMA_DCR0_MAX_RX_LEN, 4096));
+
+ mt76_rmw(dev, MT_DMA_VCFR0, BIT(0), BIT(13));
+ mt76_rmw(dev, MT_DMA_TMCFR0, BIT(0) | BIT(1), BIT(13));
+
+ mt76_clear(dev, MT_WF_RMAC_TMR_PA, BIT(31));
+
+ mt76_set(dev, MT_WF_RMACDR, MT_WF_RMACDR_MAXLEN_20BIT);
+ mt76_rmw(dev, MT_WF_RMAC_MAXMINLEN, 0xffffff, 0x19000);
+
+ mt76_wr(dev, MT_WF_RFCR1, 0);
+
+ mt76_set(dev, MT_TMAC_TCR, MT_TMAC_TCR_RX_RIFS_MODE);
+
+ mt7603_set_tmac_template(dev);
+
+ /* Enable RX group to HIF */
+ addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR);
+ mt76_set(dev, addr + MT_CLIENT_RXINF, MT_CLIENT_RXINF_RXSH_GROUPS);
+
+ /* Enable RX group to MCU */
+ mt76_set(dev, MT_DMA_DCR1, GENMASK(13, 11));
+
+ mt76_rmw_field(dev, MT_AGG_PCR_RTS, MT_AGG_PCR_RTS_PKT_THR, 3);
+ mt76_set(dev, MT_TMAC_PCR, MT_TMAC_PCR_SPE_EN);
+
+ /* include preamble detection in CCA trigger signal */
+ mt76_rmw_field(dev, MT_TXREQ, MT_TXREQ_CCA_SRC_SEL, 2);
+
+ mt76_wr(dev, MT_RXREQ, 4);
+
+ /* Configure all rx packets to HIF */
+ mt76_wr(dev, MT_DMA_RCFR0, 0xc0000000);
+
+ /* Configure MCU txs selection with aggregation */
+ mt76_wr(dev, MT_DMA_TCFR0,
+ FIELD_PREP(MT_DMA_TCFR_TXS_AGGR_TIMEOUT, 1) | /* 32 us */
+ MT_DMA_TCFR_TXS_AGGR_COUNT);
+
+ /* Configure HIF txs selection with aggregation */
+ mt76_wr(dev, MT_DMA_TCFR1,
+ FIELD_PREP(MT_DMA_TCFR_TXS_AGGR_TIMEOUT, 1) | /* 32 us */
+ MT_DMA_TCFR_TXS_AGGR_COUNT | /* Maximum count */
+ MT_DMA_TCFR_TXS_BIT_MAP);
+
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_1, MT_PSE_WTBL_2_PHYS_ADDR);
+
+ for (i = 0; i < MT7603_WTBL_SIZE; i++)
+ mt7603_wtbl_clear(dev, i);
+
+ eth_broadcast_addr(bc_addr);
+ mt7603_wtbl_init(dev, MT7603_WTBL_RESERVED, -1, bc_addr);
+ dev->global_sta.wcid.idx = MT7603_WTBL_RESERVED;
+ rcu_assign_pointer(dev->mt76.wcid[MT7603_WTBL_RESERVED],
+ &dev->global_sta.wcid);
+
+ mt76_rmw_field(dev, MT_LPON_BTEIR, MT_LPON_BTEIR_MBSS_MODE, 2);
+ mt76_rmw_field(dev, MT_WF_RMACDR, MT_WF_RMACDR_MBSSID_MASK, 2);
+
+ mt76_wr(dev, MT_AGG_ARUCR,
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 7) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(1), 2) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), 2) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), 2) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(5), 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), 1));
+
+ mt76_wr(dev, MT_AGG_ARDCR,
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), MT7603_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(1), MT7603_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), MT7603_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), MT7603_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), MT7603_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(5), MT7603_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), MT7603_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), MT7603_RATE_RETRY - 1));
+
+ mt76_wr(dev, MT_AGG_ARCR,
+ (FIELD_PREP(MT_AGG_ARCR_RTS_RATE_THR, 2) |
+ MT_AGG_ARCR_RATE_DOWN_RATIO_EN |
+ FIELD_PREP(MT_AGG_ARCR_RATE_DOWN_RATIO, 1) |
+ FIELD_PREP(MT_AGG_ARCR_RATE_UP_EXTRA_TH, 4)));
+
+ mt76_set(dev, MT_WTBL_RMVTCR, MT_WTBL_RMVTCR_RX_MV_MODE);
+
+ mt76_clear(dev, MT_SEC_SCR, MT_SEC_SCR_MASK_ORDER);
+ mt76_clear(dev, MT_SEC_SCR, BIT(18));
+
+ /* Set secondary beacon time offsets */
+ for (i = 0; i <= 4; i++)
+ mt76_rmw_field(dev, MT_LPON_SBTOR(i), MT_LPON_SBTOR_TIME_OFFSET,
+ (i + 1) * (20 + 4096));
+}
+
+static int
+mt7603_init_hardware(struct mt7603_dev *dev)
+{
+ int i, ret;
+
+ mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
+
+ ret = mt7603_eeprom_init(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = mt7603_dma_init(dev);
+ if (ret)
+ return ret;
+
+ mt76_wr(dev, MT_WPDMA_GLO_CFG, 0x52000850);
+ mt7603_mac_dma_start(dev);
+ dev->rxfilter = mt76_rr(dev, MT_WF_RFCR);
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+
+ for (i = 0; i < MT7603_WTBL_SIZE; i++) {
+ mt76_wr(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY | MT_PSE_RTA_WRITE |
+ FIELD_PREP(MT_PSE_RTA_TAG_ID, i));
+ mt76_poll(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY, 0, 5000);
+ }
+
+ ret = mt7603_mcu_init(dev);
+ if (ret)
+ return ret;
+
+ mt7603_dma_sched_init(dev);
+ mt7603_mcu_set_eeprom(dev);
+ mt7603_phy_init(dev);
+ mt7603_mac_init(dev);
+
+ return 0;
+}
+
+#define CCK_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
+ .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \
+ .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + _idx), \
+}
+
+#define OFDM_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
+ .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
+}
+
+static struct ieee80211_rate mt7603_rates[] = {
+ CCK_RATE(0, 10),
+ CCK_RATE(1, 20),
+ CCK_RATE(2, 55),
+ CCK_RATE(3, 110),
+ OFDM_RATE(11, 60),
+ OFDM_RATE(15, 90),
+ OFDM_RATE(10, 120),
+ OFDM_RATE(14, 180),
+ OFDM_RATE(9, 240),
+ OFDM_RATE(13, 360),
+ OFDM_RATE(8, 480),
+ OFDM_RATE(12, 540),
+};
+
+static const struct ieee80211_iface_limit if_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_ADHOC)
+ }, {
+ .max = MT7603_MAX_INTERFACES,
+ .types = BIT(NL80211_IFTYPE_STATION) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_AP)
+ },
+};
+
+static const struct ieee80211_iface_combination if_comb[] = {
+ {
+ .limits = if_limits,
+ .n_limits = ARRAY_SIZE(if_limits),
+ .max_interfaces = 4,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ }
+};
+
+static void mt7603_led_set_config(struct mt76_dev *mt76, u8 delay_on,
+ u8 delay_off)
+{
+ struct mt7603_dev *dev = container_of(mt76, struct mt7603_dev,
+ mt76);
+ u32 val, addr;
+
+ val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) |
+ FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
+ FIELD_PREP(MT_LED_STATUS_ON, delay_on);
+
+ addr = mt7603_reg_map(dev, MT_LED_STATUS_0(mt76->led_pin));
+ mt76_wr(dev, addr, val);
+ addr = mt7603_reg_map(dev, MT_LED_STATUS_1(mt76->led_pin));
+ mt76_wr(dev, addr, val);
+
+ val = MT_LED_CTRL_REPLAY(mt76->led_pin) |
+ MT_LED_CTRL_KICK(mt76->led_pin);
+ if (mt76->led_al)
+ val |= MT_LED_CTRL_POLARITY(mt76->led_pin);
+ addr = mt7603_reg_map(dev, MT_LED_CTRL);
+ mt76_wr(dev, addr, val);
+}
+
+static int mt7603_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
+ led_cdev);
+ u8 delta_on, delta_off;
+
+ delta_off = max_t(u8, *delay_off / 10, 1);
+ delta_on = max_t(u8, *delay_on / 10, 1);
+
+ mt7603_led_set_config(mt76, delta_on, delta_off);
+ return 0;
+}
+
+static void mt7603_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
+ led_cdev);
+
+ if (!brightness)
+ mt7603_led_set_config(mt76, 0, 0xff);
+ else
+ mt7603_led_set_config(mt76, 0xff, 0);
+}
+
+static u32 __mt7603_reg_addr(struct mt7603_dev *dev, u32 addr)
+{
+ if (addr < 0x100000)
+ return addr;
+
+ return mt7603_reg_map(dev, addr);
+}
+
+static u32 mt7603_rr(struct mt76_dev *mdev, u32 offset)
+{
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+ u32 addr = __mt7603_reg_addr(dev, offset);
+
+ return dev->bus_ops->rr(mdev, addr);
+}
+
+static void mt7603_wr(struct mt76_dev *mdev, u32 offset, u32 val)
+{
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+ u32 addr = __mt7603_reg_addr(dev, offset);
+
+ dev->bus_ops->wr(mdev, addr, val);
+}
+
+static u32 mt7603_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
+{
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+ u32 addr = __mt7603_reg_addr(dev, offset);
+
+ return dev->bus_ops->rmw(mdev, addr, mask, val);
+}
+
+static void
+mt7603_regd_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct mt7603_dev *dev = hw->priv;
+
+ dev->mt76.region = request->dfs_region;
+ dev->ed_monitor = dev->ed_monitor_enabled &&
+ dev->mt76.region == NL80211_DFS_ETSI;
+}
+
+static int
+mt7603_txpower_signed(int val)
+{
+ bool sign = val & BIT(6);
+
+ if (!(val & BIT(7)))
+ return 0;
+
+ val &= GENMASK(5, 0);
+ if (!sign)
+ val = -val;
+
+ return val;
+}
+
+static void
+mt7603_init_txpower(struct mt7603_dev *dev,
+ struct ieee80211_supported_band *sband)
+{
+ struct ieee80211_channel *chan;
+ u8 *eeprom = (u8 *)dev->mt76.eeprom.data;
+ int target_power = eeprom[MT_EE_TX_POWER_0_START_2G + 2] & ~BIT(7);
+ u8 *rate_power = &eeprom[MT_EE_TX_POWER_CCK];
+ bool ext_pa = eeprom[MT_EE_NIC_CONF_0 + 1] & BIT(1);
+ int max_offset, cur_offset;
+ int i;
+
+ if (ext_pa && is_mt7603(dev))
+ target_power = eeprom[MT_EE_TX_POWER_TSSI_OFF] & ~BIT(7);
+
+ if (target_power & BIT(6))
+ target_power = -(target_power & GENMASK(5, 0));
+
+ max_offset = 0;
+ for (i = 0; i < 14; i++) {
+ cur_offset = mt7603_txpower_signed(rate_power[i]);
+ max_offset = max(max_offset, cur_offset);
+ }
+
+ target_power += max_offset;
+
+ dev->tx_power_limit = target_power;
+ dev->mphy.txpower_cur = target_power;
+
+ target_power = DIV_ROUND_UP(target_power, 2);
+
+ /* add 3 dBm for 2SS devices (combined output) */
+ if (dev->mphy.antenna_mask & BIT(1))
+ target_power += 3;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ chan = &sband->channels[i];
+ chan->max_power = min_t(int, chan->max_reg_power, target_power);
+ chan->orig_mpwr = target_power;
+ }
+}
+
+int mt7603_register_device(struct mt7603_dev *dev)
+{
+ struct mt76_bus_ops *bus_ops;
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct wiphy *wiphy = hw->wiphy;
+ int ret;
+
+ dev->bus_ops = dev->mt76.bus;
+ bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
+ GFP_KERNEL);
+ if (!bus_ops)
+ return -ENOMEM;
+
+ bus_ops->rr = mt7603_rr;
+ bus_ops->wr = mt7603_wr;
+ bus_ops->rmw = mt7603_rmw;
+ dev->mt76.bus = bus_ops;
+
+ INIT_LIST_HEAD(&dev->sta_poll_list);
+ spin_lock_init(&dev->sta_poll_lock);
+ spin_lock_init(&dev->ps_lock);
+
+ INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7603_mac_work);
+ tasklet_init(&dev->mt76.pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet,
+ (unsigned long)dev);
+
+ dev->slottime = 9;
+ dev->sensitivity_limit = 28;
+ dev->dynamic_sensitivity = true;
+
+ ret = mt7603_init_hardware(dev);
+ if (ret)
+ return ret;
+
+ hw->queues = 4;
+ hw->max_rates = 3;
+ hw->max_report_rates = 7;
+ hw->max_rate_tries = 11;
+
+ hw->sta_data_size = sizeof(struct mt7603_sta);
+ hw->vif_data_size = sizeof(struct mt7603_vif);
+
+ wiphy->iface_combinations = if_comb;
+ wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+
+ ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
+ ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+
+ /* init led callbacks */
+ if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+ dev->mt76.led_cdev.brightness_set = mt7603_led_set_brightness;
+ dev->mt76.led_cdev.blink_set = mt7603_led_set_blink;
+ }
+
+ wiphy->reg_notifier = mt7603_regd_notifier;
+
+ ret = mt76_register_device(&dev->mt76, true, mt7603_rates,
+ ARRAY_SIZE(mt7603_rates));
+ if (ret)
+ return ret;
+
+ mt7603_init_debugfs(dev);
+ mt7603_init_txpower(dev, &dev->mphy.sband_2g.sband);
+
+ return 0;
+}
+
+void mt7603_unregister_device(struct mt7603_dev *dev)
+{
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
+ mt76_unregister_device(&dev->mt76);
+ mt7603_mcu_exit(dev);
+ mt7603_dma_cleanup(dev);
+ mt76_free_device(&dev->mt76);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
new file mode 100644
index 000000000..9eb898ebb
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -0,0 +1,1862 @@
+// SPDX-License-Identifier: ISC
+
+#include <linux/etherdevice.h>
+#include <linux/timekeeping.h>
+#include "mt7603.h"
+#include "mac.h"
+#include "../trace.h"
+
+#define MT_PSE_PAGE_SIZE 128
+
+static u32
+mt7603_ac_queue_mask0(u32 mask)
+{
+ u32 ret = 0;
+
+ ret |= GENMASK(3, 0) * !!(mask & BIT(0));
+ ret |= GENMASK(8, 5) * !!(mask & BIT(1));
+ ret |= GENMASK(13, 10) * !!(mask & BIT(2));
+ ret |= GENMASK(19, 16) * !!(mask & BIT(3));
+ return ret;
+}
+
+static void
+mt76_stop_tx_ac(struct mt7603_dev *dev, u32 mask)
+{
+ mt76_set(dev, MT_WF_ARB_TX_STOP_0, mt7603_ac_queue_mask0(mask));
+}
+
+static void
+mt76_start_tx_ac(struct mt7603_dev *dev, u32 mask)
+{
+ mt76_set(dev, MT_WF_ARB_TX_START_0, mt7603_ac_queue_mask0(mask));
+}
+
+void mt7603_mac_reset_counters(struct mt7603_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < 2; i++)
+ mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+ memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
+}
+
+void mt7603_mac_set_timing(struct mt7603_dev *dev)
+{
+ u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
+ u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, 24);
+ int offset = 3 * dev->coverage_class;
+ u32 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
+ bool is_5ghz = dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ;
+ int sifs;
+ u32 val;
+
+ if (is_5ghz)
+ sifs = 16;
+ else
+ sifs = 10;
+
+ mt76_set(dev, MT_ARB_SCR,
+ MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
+ udelay(1);
+
+ mt76_wr(dev, MT_TIMEOUT_CCK, cck + reg_offset);
+ mt76_wr(dev, MT_TIMEOUT_OFDM, ofdm + reg_offset);
+ mt76_wr(dev, MT_IFS,
+ FIELD_PREP(MT_IFS_EIFS, 360) |
+ FIELD_PREP(MT_IFS_RIFS, 2) |
+ FIELD_PREP(MT_IFS_SIFS, sifs) |
+ FIELD_PREP(MT_IFS_SLOT, dev->slottime));
+
+ if (dev->slottime < 20 || is_5ghz)
+ val = MT7603_CFEND_RATE_DEFAULT;
+ else
+ val = MT7603_CFEND_RATE_11B;
+
+ mt76_rmw_field(dev, MT_AGG_CONTROL, MT_AGG_CONTROL_CFEND_RATE, val);
+
+ mt76_clear(dev, MT_ARB_SCR,
+ MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
+}
+
+static void
+mt7603_wtbl_update(struct mt7603_dev *dev, int idx, u32 mask)
+{
+ mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
+ FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
+
+ mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
+}
+
+static u32
+mt7603_wtbl1_addr(int idx)
+{
+ return MT_WTBL1_BASE + idx * MT_WTBL1_SIZE;
+}
+
+static u32
+mt7603_wtbl2_addr(int idx)
+{
+ /* Mapped to WTBL2 */
+ return MT_PCIE_REMAP_BASE_1 + idx * MT_WTBL2_SIZE;
+}
+
+static u32
+mt7603_wtbl3_addr(int idx)
+{
+ u32 base = mt7603_wtbl2_addr(MT7603_WTBL_SIZE);
+
+ return base + idx * MT_WTBL3_SIZE;
+}
+
+static u32
+mt7603_wtbl4_addr(int idx)
+{
+ u32 base = mt7603_wtbl3_addr(MT7603_WTBL_SIZE);
+
+ return base + idx * MT_WTBL4_SIZE;
+}
+
+void mt7603_wtbl_init(struct mt7603_dev *dev, int idx, int vif,
+ const u8 *mac_addr)
+{
+ const void *_mac = mac_addr;
+ u32 addr = mt7603_wtbl1_addr(idx);
+ u32 w0 = 0, w1 = 0;
+ int i;
+
+ if (_mac) {
+ w0 = FIELD_PREP(MT_WTBL1_W0_ADDR_HI,
+ get_unaligned_le16(_mac + 4));
+ w1 = FIELD_PREP(MT_WTBL1_W1_ADDR_LO,
+ get_unaligned_le32(_mac));
+ }
+
+ if (vif < 0)
+ vif = 0;
+ else
+ w0 |= MT_WTBL1_W0_RX_CHECK_A1;
+ w0 |= FIELD_PREP(MT_WTBL1_W0_MUAR_IDX, vif);
+
+ mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
+
+ mt76_set(dev, addr + 0 * 4, w0);
+ mt76_set(dev, addr + 1 * 4, w1);
+ mt76_set(dev, addr + 2 * 4, MT_WTBL1_W2_ADMISSION_CONTROL);
+
+ mt76_stop_tx_ac(dev, GENMASK(3, 0));
+ addr = mt7603_wtbl2_addr(idx);
+ for (i = 0; i < MT_WTBL2_SIZE; i += 4)
+ mt76_wr(dev, addr + i, 0);
+ mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_WTBL2);
+ mt76_start_tx_ac(dev, GENMASK(3, 0));
+
+ addr = mt7603_wtbl3_addr(idx);
+ for (i = 0; i < MT_WTBL3_SIZE; i += 4)
+ mt76_wr(dev, addr + i, 0);
+
+ addr = mt7603_wtbl4_addr(idx);
+ for (i = 0; i < MT_WTBL4_SIZE; i += 4)
+ mt76_wr(dev, addr + i, 0);
+
+ mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+}
+
+static void
+mt7603_wtbl_set_skip_tx(struct mt7603_dev *dev, int idx, bool enabled)
+{
+ u32 addr = mt7603_wtbl1_addr(idx);
+ u32 val = mt76_rr(dev, addr + 3 * 4);
+
+ val &= ~MT_WTBL1_W3_SKIP_TX;
+ val |= enabled * MT_WTBL1_W3_SKIP_TX;
+
+ mt76_wr(dev, addr + 3 * 4, val);
+}
+
+void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort)
+{
+ int i, port, queue;
+
+ if (abort) {
+ port = 3; /* PSE */
+ queue = 8; /* free queue */
+ } else {
+ port = 0; /* HIF */
+ queue = 1; /* MCU queue */
+ }
+
+ mt7603_wtbl_set_skip_tx(dev, idx, true);
+
+ mt76_wr(dev, MT_TX_ABORT, MT_TX_ABORT_EN |
+ FIELD_PREP(MT_TX_ABORT_WCID, idx));
+
+ for (i = 0; i < 4; i++) {
+ mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
+ FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, idx) |
+ FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, i) |
+ FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, port) |
+ FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, queue));
+
+ WARN_ON_ONCE(!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY,
+ 0, 5000));
+ }
+
+ mt76_wr(dev, MT_TX_ABORT, 0);
+
+ mt7603_wtbl_set_skip_tx(dev, idx, false);
+}
+
+void mt7603_wtbl_set_smps(struct mt7603_dev *dev, struct mt7603_sta *sta,
+ bool enabled)
+{
+ u32 addr = mt7603_wtbl1_addr(sta->wcid.idx);
+
+ if (sta->smps == enabled)
+ return;
+
+ mt76_rmw_field(dev, addr + 2 * 4, MT_WTBL1_W2_SMPS, enabled);
+ sta->smps = enabled;
+}
+
+void mt7603_wtbl_set_ps(struct mt7603_dev *dev, struct mt7603_sta *sta,
+ bool enabled)
+{
+ int idx = sta->wcid.idx;
+ u32 addr;
+
+ spin_lock_bh(&dev->ps_lock);
+
+ if (sta->ps == enabled)
+ goto out;
+
+ mt76_wr(dev, MT_PSE_RTA,
+ FIELD_PREP(MT_PSE_RTA_TAG_ID, idx) |
+ FIELD_PREP(MT_PSE_RTA_PORT_ID, 0) |
+ FIELD_PREP(MT_PSE_RTA_QUEUE_ID, 1) |
+ FIELD_PREP(MT_PSE_RTA_REDIRECT_EN, enabled) |
+ MT_PSE_RTA_WRITE | MT_PSE_RTA_BUSY);
+
+ mt76_poll(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY, 0, 5000);
+
+ if (enabled)
+ mt7603_filter_tx(dev, idx, false);
+
+ addr = mt7603_wtbl1_addr(idx);
+ mt76_set(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
+ mt76_rmw(dev, addr + 3 * 4, MT_WTBL1_W3_POWER_SAVE,
+ enabled * MT_WTBL1_W3_POWER_SAVE);
+ mt76_clear(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
+ sta->ps = enabled;
+
+out:
+ spin_unlock_bh(&dev->ps_lock);
+}
+
+void mt7603_wtbl_clear(struct mt7603_dev *dev, int idx)
+{
+ int wtbl2_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL2_SIZE;
+ int wtbl2_frame = idx / wtbl2_frame_size;
+ int wtbl2_entry = idx % wtbl2_frame_size;
+
+ int wtbl3_base_frame = MT_WTBL3_OFFSET / MT_PSE_PAGE_SIZE;
+ int wtbl3_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL3_SIZE;
+ int wtbl3_frame = wtbl3_base_frame + idx / wtbl3_frame_size;
+ int wtbl3_entry = (idx % wtbl3_frame_size) * 2;
+
+ int wtbl4_base_frame = MT_WTBL4_OFFSET / MT_PSE_PAGE_SIZE;
+ int wtbl4_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL4_SIZE;
+ int wtbl4_frame = wtbl4_base_frame + idx / wtbl4_frame_size;
+ int wtbl4_entry = idx % wtbl4_frame_size;
+
+ u32 addr = MT_WTBL1_BASE + idx * MT_WTBL1_SIZE;
+ int i;
+
+ mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
+
+ mt76_wr(dev, addr + 0 * 4,
+ MT_WTBL1_W0_RX_CHECK_A1 |
+ MT_WTBL1_W0_RX_CHECK_A2 |
+ MT_WTBL1_W0_RX_VALID);
+ mt76_wr(dev, addr + 1 * 4, 0);
+ mt76_wr(dev, addr + 2 * 4, 0);
+
+ mt76_set(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
+
+ mt76_wr(dev, addr + 3 * 4,
+ FIELD_PREP(MT_WTBL1_W3_WTBL2_FRAME_ID, wtbl2_frame) |
+ FIELD_PREP(MT_WTBL1_W3_WTBL2_ENTRY_ID, wtbl2_entry) |
+ FIELD_PREP(MT_WTBL1_W3_WTBL4_FRAME_ID, wtbl4_frame) |
+ MT_WTBL1_W3_I_PSM | MT_WTBL1_W3_KEEP_I_PSM);
+ mt76_wr(dev, addr + 4 * 4,
+ FIELD_PREP(MT_WTBL1_W4_WTBL3_FRAME_ID, wtbl3_frame) |
+ FIELD_PREP(MT_WTBL1_W4_WTBL3_ENTRY_ID, wtbl3_entry) |
+ FIELD_PREP(MT_WTBL1_W4_WTBL4_ENTRY_ID, wtbl4_entry));
+
+ mt76_clear(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
+
+ addr = mt7603_wtbl2_addr(idx);
+
+ /* Clear BA information */
+ mt76_wr(dev, addr + (15 * 4), 0);
+
+ mt76_stop_tx_ac(dev, GENMASK(3, 0));
+ for (i = 2; i <= 4; i++)
+ mt76_wr(dev, addr + (i * 4), 0);
+ mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_WTBL2);
+ mt76_start_tx_ac(dev, GENMASK(3, 0));
+
+ mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_RX_COUNT_CLEAR);
+ mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_TX_COUNT_CLEAR);
+ mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+}
+
+void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta)
+{
+ struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
+ int idx = msta->wcid.idx;
+ u8 ampdu_density;
+ u32 addr;
+ u32 val;
+
+ addr = mt7603_wtbl1_addr(idx);
+
+ ampdu_density = sta->ht_cap.ampdu_density;
+ if (ampdu_density < IEEE80211_HT_MPDU_DENSITY_4)
+ ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
+
+ val = mt76_rr(dev, addr + 2 * 4);
+ val &= MT_WTBL1_W2_KEY_TYPE | MT_WTBL1_W2_ADMISSION_CONTROL;
+ val |= FIELD_PREP(MT_WTBL1_W2_AMPDU_FACTOR, sta->ht_cap.ampdu_factor) |
+ FIELD_PREP(MT_WTBL1_W2_MPDU_DENSITY, sta->ht_cap.ampdu_density) |
+ MT_WTBL1_W2_TXS_BAF_REPORT;
+
+ if (sta->ht_cap.cap)
+ val |= MT_WTBL1_W2_HT;
+ if (sta->vht_cap.cap)
+ val |= MT_WTBL1_W2_VHT;
+
+ mt76_wr(dev, addr + 2 * 4, val);
+
+ addr = mt7603_wtbl2_addr(idx);
+ val = mt76_rr(dev, addr + 9 * 4);
+ val &= ~(MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 |
+ MT_WTBL2_W9_SHORT_GI_80);
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
+ val |= MT_WTBL2_W9_SHORT_GI_20;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ val |= MT_WTBL2_W9_SHORT_GI_40;
+ mt76_wr(dev, addr + 9 * 4, val);
+}
+
+void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid)
+{
+ mt76_wr(dev, MT_BA_CONTROL_0, get_unaligned_le32(addr));
+ mt76_wr(dev, MT_BA_CONTROL_1,
+ (get_unaligned_le16(addr + 4) |
+ FIELD_PREP(MT_BA_CONTROL_1_TID, tid) |
+ MT_BA_CONTROL_1_RESET));
+}
+
+void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
+ int ba_size)
+{
+ u32 addr = mt7603_wtbl2_addr(wcid);
+ u32 tid_mask = FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS, BIT(tid)) |
+ (MT_WTBL2_W15_BA_WIN_SIZE <<
+ (tid * MT_WTBL2_W15_BA_WIN_SIZE_SHIFT));
+ u32 tid_val;
+ int i;
+
+ if (ba_size < 0) {
+ /* disable */
+ mt76_clear(dev, addr + (15 * 4), tid_mask);
+ return;
+ }
+
+ for (i = 7; i > 0; i--) {
+ if (ba_size >= MT_AGG_SIZE_LIMIT(i))
+ break;
+ }
+
+ tid_val = FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS, BIT(tid)) |
+ i << (tid * MT_WTBL2_W15_BA_WIN_SIZE_SHIFT);
+
+ mt76_rmw(dev, addr + (15 * 4), tid_mask, tid_val);
+}
+
+void mt7603_mac_sta_poll(struct mt7603_dev *dev)
+{
+ static const u8 ac_to_tid[4] = {
+ [IEEE80211_AC_BE] = 0,
+ [IEEE80211_AC_BK] = 1,
+ [IEEE80211_AC_VI] = 4,
+ [IEEE80211_AC_VO] = 6
+ };
+ struct ieee80211_sta *sta;
+ struct mt7603_sta *msta;
+ u32 total_airtime = 0;
+ u32 airtime[4];
+ u32 addr;
+ int i;
+
+ rcu_read_lock();
+
+ while (1) {
+ bool clear = false;
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&dev->sta_poll_list)) {
+ spin_unlock_bh(&dev->sta_poll_lock);
+ break;
+ }
+
+ msta = list_first_entry(&dev->sta_poll_list, struct mt7603_sta,
+ poll_list);
+ list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ addr = mt7603_wtbl4_addr(msta->wcid.idx);
+ for (i = 0; i < 4; i++) {
+ u32 airtime_last = msta->tx_airtime_ac[i];
+
+ msta->tx_airtime_ac[i] = mt76_rr(dev, addr + i * 8);
+ airtime[i] = msta->tx_airtime_ac[i] - airtime_last;
+ airtime[i] *= 32;
+ total_airtime += airtime[i];
+
+ if (msta->tx_airtime_ac[i] & BIT(22))
+ clear = true;
+ }
+
+ if (clear) {
+ mt7603_wtbl_update(dev, msta->wcid.idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ memset(msta->tx_airtime_ac, 0,
+ sizeof(msta->tx_airtime_ac));
+ }
+
+ if (!msta->wcid.sta)
+ continue;
+
+ sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
+ for (i = 0; i < 4; i++) {
+ struct mt76_queue *q = dev->mt76.q_tx[i];
+ u8 qidx = q->hw_idx;
+ u8 tid = ac_to_tid[i];
+ u32 txtime = airtime[qidx];
+
+ if (!txtime)
+ continue;
+
+ ieee80211_sta_register_airtime(sta, tid, txtime, 0);
+ }
+ }
+
+ rcu_read_unlock();
+
+ if (!total_airtime)
+ return;
+
+ spin_lock_bh(&dev->mt76.cc_lock);
+ dev->mphy.chan_state->cc_tx += total_airtime;
+ spin_unlock_bh(&dev->mt76.cc_lock);
+}
+
+static struct mt76_wcid *
+mt7603_rx_get_wcid(struct mt7603_dev *dev, u8 idx, bool unicast)
+{
+ struct mt7603_sta *sta;
+ struct mt76_wcid *wcid;
+
+ if (idx >= MT7603_WTBL_SIZE)
+ return NULL;
+
+ wcid = rcu_dereference(dev->mt76.wcid[idx]);
+ if (unicast || !wcid)
+ return wcid;
+
+ if (!wcid->sta)
+ return NULL;
+
+ sta = container_of(wcid, struct mt7603_sta, wcid);
+ if (!sta->vif)
+ return NULL;
+
+ return &sta->vif->sta.wcid;
+}
+
+int
+mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_hdr *hdr;
+ __le32 *rxd = (__le32 *)skb->data;
+ u32 rxd0 = le32_to_cpu(rxd[0]);
+ u32 rxd1 = le32_to_cpu(rxd[1]);
+ u32 rxd2 = le32_to_cpu(rxd[2]);
+ bool unicast = rxd1 & MT_RXD1_NORMAL_U2M;
+ bool insert_ccmp_hdr = false;
+ bool remove_pad;
+ int idx;
+ int i;
+
+ memset(status, 0, sizeof(*status));
+
+ i = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1);
+ sband = (i & 1) ? &dev->mphy.sband_5g.sband : &dev->mphy.sband_2g.sband;
+ i >>= 1;
+
+ idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
+ status->wcid = mt7603_rx_get_wcid(dev, idx, unicast);
+
+ status->band = sband->band;
+ if (i < sband->n_channels)
+ status->freq = sband->channels[i].center_freq;
+
+ if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
+ status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
+ !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) {
+ status->flag |= RX_FLAG_DECRYPTED;
+ status->flag |= RX_FLAG_IV_STRIPPED;
+ status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
+ }
+
+ if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB |
+ MT_RXD2_NORMAL_NON_AMPDU))) {
+ status->flag |= RX_FLAG_AMPDU_DETAILS;
+
+ /* all subframes of an A-MPDU have the same timestamp */
+ if (dev->rx_ampdu_ts != rxd[12]) {
+ if (!++dev->ampdu_ref)
+ dev->ampdu_ref++;
+ }
+ dev->rx_ampdu_ts = rxd[12];
+
+ status->ampdu_ref = dev->ampdu_ref;
+ }
+
+ remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
+
+ if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
+ return -EINVAL;
+
+ if (!sband->channels)
+ return -EINVAL;
+
+ rxd += 4;
+ if (rxd0 & MT_RXD0_NORMAL_GROUP_4) {
+ rxd += 4;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+ if (rxd0 & MT_RXD0_NORMAL_GROUP_1) {
+ u8 *data = (u8 *)rxd;
+
+ if (status->flag & RX_FLAG_DECRYPTED) {
+ status->iv[0] = data[5];
+ status->iv[1] = data[4];
+ status->iv[2] = data[3];
+ status->iv[3] = data[2];
+ status->iv[4] = data[1];
+ status->iv[5] = data[0];
+
+ insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+ }
+
+ rxd += 4;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+ if (rxd0 & MT_RXD0_NORMAL_GROUP_2) {
+ rxd += 2;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+ if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
+ u32 rxdg0 = le32_to_cpu(rxd[0]);
+ u32 rxdg3 = le32_to_cpu(rxd[3]);
+ bool cck = false;
+
+ i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0);
+ switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) {
+ case MT_PHY_TYPE_CCK:
+ cck = true;
+ fallthrough;
+ case MT_PHY_TYPE_OFDM:
+ i = mt76_get_rate(&dev->mt76, sband, i, cck);
+ break;
+ case MT_PHY_TYPE_HT_GF:
+ case MT_PHY_TYPE_HT:
+ status->encoding = RX_ENC_HT;
+ if (i > 15)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (rxdg0 & MT_RXV1_HT_SHORT_GI)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ if (rxdg0 & MT_RXV1_HT_AD_CODE)
+ status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+ status->enc_flags |= RX_ENC_FLAG_STBC_MASK *
+ FIELD_GET(MT_RXV1_HT_STBC, rxdg0);
+
+ status->rate_idx = i;
+
+ status->chains = dev->mphy.antenna_mask;
+ status->chain_signal[0] = FIELD_GET(MT_RXV4_IB_RSSI0, rxdg3) +
+ dev->rssi_offset[0];
+ status->chain_signal[1] = FIELD_GET(MT_RXV4_IB_RSSI1, rxdg3) +
+ dev->rssi_offset[1];
+
+ status->signal = status->chain_signal[0];
+ if (status->chains & BIT(1))
+ status->signal = max(status->signal,
+ status->chain_signal[1]);
+
+ if (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0) == 1)
+ status->bw = RATE_INFO_BW_40;
+
+ rxd += 6;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
+ skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
+
+ if (insert_ccmp_hdr) {
+ u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
+
+ mt76_insert_ccmp_hdr(skb, key_id);
+ }
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
+ return 0;
+
+ status->aggr = unicast &&
+ !ieee80211_is_qos_nullfunc(hdr->frame_control);
+ status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+ status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+
+ return 0;
+}
+
+static u16
+mt7603_mac_tx_rate_val(struct mt7603_dev *dev,
+ const struct ieee80211_tx_rate *rate, bool stbc, u8 *bw)
+{
+ u8 phy, nss, rate_idx;
+ u16 rateval;
+
+ *bw = 0;
+ if (rate->flags & IEEE80211_TX_RC_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 3);
+ phy = MT_PHY_TYPE_HT;
+ if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+ phy = MT_PHY_TYPE_HT_GF;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ *bw = 1;
+ } else {
+ const struct ieee80211_rate *r;
+ int band = dev->mphy.chandef.chan->band;
+ u16 val;
+
+ nss = 1;
+ r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
+ if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ val = r->hw_value_short;
+ else
+ val = r->hw_value;
+
+ phy = val >> 8;
+ rate_idx = val & 0xff;
+ }
+
+ rateval = (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
+ FIELD_PREP(MT_TX_RATE_MODE, phy));
+
+ if (stbc && nss == 1)
+ rateval |= MT_TX_RATE_STBC;
+
+ return rateval;
+}
+
+void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
+ struct ieee80211_tx_rate *probe_rate,
+ struct ieee80211_tx_rate *rates)
+{
+ struct ieee80211_tx_rate *ref;
+ int wcid = sta->wcid.idx;
+ u32 addr = mt7603_wtbl2_addr(wcid);
+ bool stbc = false;
+ int n_rates = sta->n_rates;
+ u8 bw, bw_prev, bw_idx = 0;
+ u16 val[4];
+ u16 probe_val;
+ u32 w9 = mt76_rr(dev, addr + 9 * 4);
+ bool rateset;
+ int i, k;
+
+ if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
+ return;
+
+ for (i = n_rates; i < 4; i++)
+ rates[i] = rates[n_rates - 1];
+
+ rateset = !(sta->rate_set_tsf & BIT(0));
+ memcpy(sta->rateset[rateset].rates, rates,
+ sizeof(sta->rateset[rateset].rates));
+ if (probe_rate) {
+ sta->rateset[rateset].probe_rate = *probe_rate;
+ ref = &sta->rateset[rateset].probe_rate;
+ } else {
+ sta->rateset[rateset].probe_rate.idx = -1;
+ ref = &sta->rateset[rateset].rates[0];
+ }
+
+ rates = sta->rateset[rateset].rates;
+ for (i = 0; i < ARRAY_SIZE(sta->rateset[rateset].rates); i++) {
+ /*
+ * We don't support switching between short and long GI
+ * within the rate set. For accurate tx status reporting, we
+ * need to make sure that flags match.
+ * For improved performance, avoid duplicate entries by
+ * decrementing the MCS index if necessary
+ */
+ if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI)
+ rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI;
+
+ for (k = 0; k < i; k++) {
+ if (rates[i].idx != rates[k].idx)
+ continue;
+ if ((rates[i].flags ^ rates[k].flags) &
+ IEEE80211_TX_RC_40_MHZ_WIDTH)
+ continue;
+
+ if (!rates[i].idx)
+ continue;
+
+ rates[i].idx--;
+ }
+ }
+
+ w9 &= MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 |
+ MT_WTBL2_W9_SHORT_GI_80;
+
+ val[0] = mt7603_mac_tx_rate_val(dev, &rates[0], stbc, &bw);
+ bw_prev = bw;
+
+ if (probe_rate) {
+ probe_val = mt7603_mac_tx_rate_val(dev, probe_rate, stbc, &bw);
+ if (bw)
+ bw_idx = 1;
+ else
+ bw_prev = 0;
+ } else {
+ probe_val = val[0];
+ }
+
+ w9 |= FIELD_PREP(MT_WTBL2_W9_CC_BW_SEL, bw);
+ w9 |= FIELD_PREP(MT_WTBL2_W9_BW_CAP, bw);
+
+ val[1] = mt7603_mac_tx_rate_val(dev, &rates[1], stbc, &bw);
+ if (bw_prev) {
+ bw_idx = 3;
+ bw_prev = bw;
+ }
+
+ val[2] = mt7603_mac_tx_rate_val(dev, &rates[2], stbc, &bw);
+ if (bw_prev) {
+ bw_idx = 5;
+ bw_prev = bw;
+ }
+
+ val[3] = mt7603_mac_tx_rate_val(dev, &rates[3], stbc, &bw);
+ if (bw_prev)
+ bw_idx = 7;
+
+ w9 |= FIELD_PREP(MT_WTBL2_W9_CHANGE_BW_RATE,
+ bw_idx ? bw_idx - 1 : 7);
+
+ mt76_wr(dev, MT_WTBL_RIUCR0, w9);
+
+ mt76_wr(dev, MT_WTBL_RIUCR1,
+ FIELD_PREP(MT_WTBL_RIUCR1_RATE0, probe_val) |
+ FIELD_PREP(MT_WTBL_RIUCR1_RATE1, val[0]) |
+ FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, val[1]));
+
+ mt76_wr(dev, MT_WTBL_RIUCR2,
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, val[1] >> 8) |
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE3, val[1]) |
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE4, val[2]) |
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, val[2]));
+
+ mt76_wr(dev, MT_WTBL_RIUCR3,
+ FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, val[2] >> 4) |
+ FIELD_PREP(MT_WTBL_RIUCR3_RATE6, val[3]) |
+ FIELD_PREP(MT_WTBL_RIUCR3_RATE7, val[3]));
+
+ mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
+ sta->rate_set_tsf = (mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0)) | rateset;
+
+ mt76_wr(dev, MT_WTBL_UPDATE,
+ FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) |
+ MT_WTBL_UPDATE_RATE_UPDATE |
+ MT_WTBL_UPDATE_TX_COUNT_CLEAR);
+
+ if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET))
+ mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
+
+ sta->rate_count = 2 * MT7603_RATE_RETRY * n_rates;
+ sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+}
+
+static enum mt7603_cipher_type
+mt7603_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+ memset(key_data, 0, 32);
+ if (!key)
+ return MT_CIPHER_NONE;
+
+ if (key->keylen > 32)
+ return MT_CIPHER_NONE;
+
+ memcpy(key_data, key->key, key->keylen);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MT_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MT_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ /* Rx/Tx MIC keys are swapped */
+ memcpy(key_data + 16, key->key + 24, 8);
+ memcpy(key_data + 24, key->key + 16, 8);
+ return MT_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MT_CIPHER_AES_CCMP;
+ default:
+ return MT_CIPHER_NONE;
+ }
+}
+
+int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid,
+ struct ieee80211_key_conf *key)
+{
+ enum mt7603_cipher_type cipher;
+ u32 addr = mt7603_wtbl3_addr(wcid);
+ u8 key_data[32];
+ int key_len = sizeof(key_data);
+
+ cipher = mt7603_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EOPNOTSUPP;
+
+ if (key && (cipher == MT_CIPHER_WEP40 || cipher == MT_CIPHER_WEP104)) {
+ addr += key->keyidx * 16;
+ key_len = 16;
+ }
+
+ mt76_wr_copy(dev, addr, key_data, key_len);
+
+ addr = mt7603_wtbl1_addr(wcid);
+ mt76_rmw_field(dev, addr + 2 * 4, MT_WTBL1_W2_KEY_TYPE, cipher);
+ if (key)
+ mt76_rmw_field(dev, addr, MT_WTBL1_W0_KEY_IDX, key->keyidx);
+ mt76_rmw_field(dev, addr, MT_WTBL1_W0_RX_KEY_VALID, !!key);
+
+ return 0;
+}
+
+static int
+mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
+ struct sk_buff *skb, enum mt76_txq_id qid,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+ int pid, struct ieee80211_key_conf *key)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rate = &info->control.rates[0];
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_queue *q = dev->mt76.q_tx[qid];
+ struct mt7603_vif *mvif;
+ int wlan_idx;
+ int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ int tx_count = 8;
+ u8 frame_type, frame_subtype;
+ u16 fc = le16_to_cpu(hdr->frame_control);
+ u16 seqno = 0;
+ u8 vif_idx = 0;
+ u32 val;
+ u8 bw;
+
+ if (vif) {
+ mvif = (struct mt7603_vif *)vif->drv_priv;
+ vif_idx = mvif->idx;
+ if (vif_idx && qid >= MT_TXQ_BEACON)
+ vif_idx += 0x10;
+ }
+
+ if (sta) {
+ struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
+
+ tx_count = msta->rate_count;
+ }
+
+ if (wcid)
+ wlan_idx = wcid->idx;
+ else
+ wlan_idx = MT7603_WTBL_RESERVED;
+
+ frame_type = (fc & IEEE80211_FCTL_FTYPE) >> 2;
+ frame_subtype = (fc & IEEE80211_FCTL_STYPE) >> 4;
+
+ val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
+ FIELD_PREP(MT_TXD0_Q_IDX, q->hw_idx);
+ txwi[0] = cpu_to_le32(val);
+
+ val = MT_TXD1_LONG_FORMAT |
+ FIELD_PREP(MT_TXD1_OWN_MAC, vif_idx) |
+ FIELD_PREP(MT_TXD1_TID,
+ skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
+ FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
+ FIELD_PREP(MT_TXD1_HDR_INFO, hdr_len / 2) |
+ FIELD_PREP(MT_TXD1_WLAN_IDX, wlan_idx) |
+ FIELD_PREP(MT_TXD1_PROTECTED, !!key);
+ txwi[1] = cpu_to_le32(val);
+
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ txwi[1] |= cpu_to_le32(MT_TXD1_NO_ACK);
+
+ val = FIELD_PREP(MT_TXD2_FRAME_TYPE, frame_type) |
+ FIELD_PREP(MT_TXD2_SUB_TYPE, frame_subtype) |
+ FIELD_PREP(MT_TXD2_MULTICAST,
+ is_multicast_ether_addr(hdr->addr1));
+ txwi[2] = cpu_to_le32(val);
+
+ if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
+ txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
+
+ txwi[4] = 0;
+
+ val = MT_TXD5_TX_STATUS_HOST | MT_TXD5_SW_POWER_MGMT |
+ FIELD_PREP(MT_TXD5_PID, pid);
+ txwi[5] = cpu_to_le32(val);
+
+ txwi[6] = 0;
+
+ if (rate->idx >= 0 && rate->count &&
+ !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
+ bool stbc = info->flags & IEEE80211_TX_CTL_STBC;
+ u16 rateval = mt7603_mac_tx_rate_val(dev, rate, stbc, &bw);
+
+ txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
+
+ val = MT_TXD6_FIXED_BW |
+ FIELD_PREP(MT_TXD6_BW, bw) |
+ FIELD_PREP(MT_TXD6_TX_RATE, rateval);
+ txwi[6] |= cpu_to_le32(val);
+
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ txwi[6] |= cpu_to_le32(MT_TXD6_SGI);
+
+ if (!(rate->flags & IEEE80211_TX_RC_MCS))
+ txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
+
+ tx_count = rate->count;
+ }
+
+ /* use maximum tx count for beacons and buffered multicast */
+ if (qid >= MT_TXQ_BEACON)
+ tx_count = 0x1f;
+
+ val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) |
+ MT_TXD3_SN_VALID;
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ seqno = le16_to_cpu(hdr->seq_ctrl);
+ else if (ieee80211_is_back_req(hdr->frame_control))
+ seqno = le16_to_cpu(bar->start_seq_num);
+ else
+ val &= ~MT_TXD3_SN_VALID;
+
+ val |= FIELD_PREP(MT_TXD3_SEQ, seqno >> 4);
+
+ txwi[3] = cpu_to_le32(val);
+
+ if (key) {
+ u64 pn = atomic64_inc_return(&key->tx_pn);
+
+ txwi[3] |= cpu_to_le32(MT_TXD3_PN_VALID);
+ txwi[4] = cpu_to_le32(pn & GENMASK(31, 0));
+ txwi[5] |= cpu_to_le32(FIELD_PREP(MT_TXD5_PN_HIGH, pn >> 32));
+ }
+
+ txwi[7] = 0;
+
+ return 0;
+}
+
+int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
+{
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+ struct mt7603_sta *msta = container_of(wcid, struct mt7603_sta, wcid);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ int pid;
+
+ if (!wcid)
+ wcid = &dev->global_sta.wcid;
+
+ if (sta) {
+ msta = (struct mt7603_sta *)sta->drv_priv;
+
+ if ((info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER |
+ IEEE80211_TX_CTL_CLEAR_PS_FILT)) ||
+ (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
+ mt7603_wtbl_set_ps(dev, msta, false);
+
+ mt76_tx_check_agg_ssn(sta, tx_info->skb);
+ }
+
+ pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
+ spin_lock_bh(&dev->mt76.lock);
+ mt7603_wtbl_set_rates(dev, msta, &info->control.rates[0],
+ msta->rates);
+ msta->rate_probe = true;
+ spin_unlock_bh(&dev->mt76.lock);
+ }
+
+ mt7603_mac_write_txwi(dev, txwi_ptr, tx_info->skb, qid, wcid,
+ sta, pid, key);
+
+ return 0;
+}
+
+static bool
+mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta,
+ struct ieee80211_tx_info *info, __le32 *txs_data)
+{
+ struct ieee80211_supported_band *sband;
+ struct mt7603_rate_set *rs;
+ int first_idx = 0, last_idx;
+ u32 rate_set_tsf;
+ u32 final_rate;
+ u32 final_rate_flags;
+ bool rs_idx;
+ bool ack_timeout;
+ bool fixed_rate;
+ bool probe;
+ bool ampdu;
+ bool cck = false;
+ int count;
+ u32 txs;
+ int idx;
+ int i;
+
+ fixed_rate = info->status.rates[0].count;
+ probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
+
+ txs = le32_to_cpu(txs_data[4]);
+ ampdu = !fixed_rate && (txs & MT_TXS4_AMPDU);
+ count = FIELD_GET(MT_TXS4_TX_COUNT, txs);
+ last_idx = FIELD_GET(MT_TXS4_LAST_TX_RATE, txs);
+
+ txs = le32_to_cpu(txs_data[0]);
+ final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
+ ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
+
+ if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT))
+ return false;
+
+ if (txs & MT_TXS0_QUEUE_TIMEOUT)
+ return false;
+
+ if (!ack_timeout)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ info->status.ampdu_len = 1;
+ info->status.ampdu_ack_len = !!(info->flags &
+ IEEE80211_TX_STAT_ACK);
+
+ if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU))
+ info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU;
+
+ first_idx = max_t(int, 0, last_idx - (count - 1) / MT7603_RATE_RETRY);
+
+ if (fixed_rate && !probe) {
+ info->status.rates[0].count = count;
+ i = 0;
+ goto out;
+ }
+
+ rate_set_tsf = READ_ONCE(sta->rate_set_tsf);
+ rs_idx = !((u32)(FIELD_GET(MT_TXS1_F0_TIMESTAMP, le32_to_cpu(txs_data[1])) -
+ rate_set_tsf) < 1000000);
+ rs_idx ^= rate_set_tsf & BIT(0);
+ rs = &sta->rateset[rs_idx];
+
+ if (!first_idx && rs->probe_rate.idx >= 0) {
+ info->status.rates[0] = rs->probe_rate;
+
+ spin_lock_bh(&dev->mt76.lock);
+ if (sta->rate_probe) {
+ mt7603_wtbl_set_rates(dev, sta, NULL,
+ sta->rates);
+ sta->rate_probe = false;
+ }
+ spin_unlock_bh(&dev->mt76.lock);
+ } else {
+ info->status.rates[0] = rs->rates[first_idx / 2];
+ }
+ info->status.rates[0].count = 0;
+
+ for (i = 0, idx = first_idx; count && idx <= last_idx; idx++) {
+ struct ieee80211_tx_rate *cur_rate;
+ int cur_count;
+
+ cur_rate = &rs->rates[idx / 2];
+ cur_count = min_t(int, MT7603_RATE_RETRY, count);
+ count -= cur_count;
+
+ if (idx && (cur_rate->idx != info->status.rates[i].idx ||
+ cur_rate->flags != info->status.rates[i].flags)) {
+ i++;
+ if (i == ARRAY_SIZE(info->status.rates)) {
+ i--;
+ break;
+ }
+
+ info->status.rates[i] = *cur_rate;
+ info->status.rates[i].count = 0;
+ }
+
+ info->status.rates[i].count += cur_count;
+ }
+
+out:
+ final_rate_flags = info->status.rates[i].flags;
+
+ switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) {
+ case MT_PHY_TYPE_CCK:
+ cck = true;
+ fallthrough;
+ case MT_PHY_TYPE_OFDM:
+ if (dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ)
+ sband = &dev->mphy.sband_5g.sband;
+ else
+ sband = &dev->mphy.sband_2g.sband;
+ final_rate &= GENMASK(5, 0);
+ final_rate = mt76_get_rate(&dev->mt76, sband, final_rate,
+ cck);
+ final_rate_flags = 0;
+ break;
+ case MT_PHY_TYPE_HT_GF:
+ case MT_PHY_TYPE_HT:
+ final_rate_flags |= IEEE80211_TX_RC_MCS;
+ final_rate &= GENMASK(5, 0);
+ if (final_rate > 15)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ info->status.rates[i].idx = final_rate;
+ info->status.rates[i].flags = final_rate_flags;
+
+ return true;
+}
+
+static bool
+mt7603_mac_add_txs_skb(struct mt7603_dev *dev, struct mt7603_sta *sta, int pid,
+ __le32 *txs_data)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ struct sk_buff_head list;
+ struct sk_buff *skb;
+
+ if (pid < MT_PACKET_ID_FIRST)
+ return false;
+
+ trace_mac_txdone(mdev, sta->wcid.idx, pid);
+
+ mt76_tx_status_lock(mdev, &list);
+ skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
+ if (skb) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (!mt7603_fill_txs(dev, sta, info, txs_data)) {
+ ieee80211_tx_info_clear_status(info);
+ info->status.rates[0].idx = -1;
+ }
+
+ mt76_tx_status_skb_done(mdev, skb, &list);
+ }
+ mt76_tx_status_unlock(mdev, &list);
+
+ return !!skb;
+}
+
+void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data)
+{
+ struct ieee80211_tx_info info = {};
+ struct ieee80211_sta *sta = NULL;
+ struct mt7603_sta *msta = NULL;
+ struct mt76_wcid *wcid;
+ __le32 *txs_data = data;
+ u32 txs;
+ u8 wcidx;
+ u8 pid;
+
+ txs = le32_to_cpu(txs_data[4]);
+ pid = FIELD_GET(MT_TXS4_PID, txs);
+ txs = le32_to_cpu(txs_data[3]);
+ wcidx = FIELD_GET(MT_TXS3_WCID, txs);
+
+ if (pid == MT_PACKET_ID_NO_ACK)
+ return;
+
+ if (wcidx >= MT7603_WTBL_SIZE)
+ return;
+
+ rcu_read_lock();
+
+ wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
+ if (!wcid)
+ goto out;
+
+ msta = container_of(wcid, struct mt7603_sta, wcid);
+ sta = wcid_to_sta(wcid);
+
+ if (list_empty(&msta->poll_list)) {
+ spin_lock_bh(&dev->sta_poll_lock);
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+ }
+
+ if (mt7603_mac_add_txs_skb(dev, msta, pid, txs_data))
+ goto out;
+
+ if (wcidx >= MT7603_WTBL_STA || !sta)
+ goto out;
+
+ if (mt7603_fill_txs(dev, msta, &info, txs_data))
+ ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
+
+out:
+ rcu_read_unlock();
+}
+
+void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
+{
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+ struct sk_buff *skb = e->skb;
+
+ if (!e->txwi) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ dev->tx_hang_check = 0;
+ mt76_tx_complete_skb(mdev, e->wcid, skb);
+}
+
+static bool
+wait_for_wpdma(struct mt7603_dev *dev)
+{
+ return mt76_poll(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
+ 0, 1000);
+}
+
+static void mt7603_pse_reset(struct mt7603_dev *dev)
+{
+ /* Clear previous reset result */
+ if (!dev->reset_cause[RESET_CAUSE_RESET_FAILED])
+ mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE_S);
+
+ /* Reset PSE */
+ mt76_set(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE);
+
+ if (!mt76_poll_msec(dev, MT_MCU_DEBUG_RESET,
+ MT_MCU_DEBUG_RESET_PSE_S,
+ MT_MCU_DEBUG_RESET_PSE_S, 500)) {
+ dev->reset_cause[RESET_CAUSE_RESET_FAILED]++;
+ mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE);
+ } else {
+ dev->reset_cause[RESET_CAUSE_RESET_FAILED] = 0;
+ mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_QUEUES);
+ }
+
+ if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] >= 3)
+ dev->reset_cause[RESET_CAUSE_RESET_FAILED] = 0;
+}
+
+void mt7603_mac_dma_start(struct mt7603_dev *dev)
+{
+ mt7603_mac_start(dev);
+
+ wait_for_wpdma(dev);
+ usleep_range(50, 100);
+
+ mt76_set(dev, MT_WPDMA_GLO_CFG,
+ (MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_RX_DMA_EN |
+ FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) |
+ MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE));
+
+ mt7603_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL);
+}
+
+void mt7603_mac_start(struct mt7603_dev *dev)
+{
+ mt76_clear(dev, MT_ARB_SCR,
+ MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
+ mt76_wr(dev, MT_WF_ARB_TX_START_0, ~0);
+ mt76_set(dev, MT_WF_ARB_RQCR, MT_WF_ARB_RQCR_RX_START);
+}
+
+void mt7603_mac_stop(struct mt7603_dev *dev)
+{
+ mt76_set(dev, MT_ARB_SCR,
+ MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
+ mt76_wr(dev, MT_WF_ARB_TX_START_0, 0);
+ mt76_clear(dev, MT_WF_ARB_RQCR, MT_WF_ARB_RQCR_RX_START);
+}
+
+void mt7603_pse_client_reset(struct mt7603_dev *dev)
+{
+ u32 addr;
+
+ addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR +
+ MT_CLIENT_RESET_TX);
+
+ /* Clear previous reset state */
+ mt76_clear(dev, addr,
+ MT_CLIENT_RESET_TX_R_E_1 |
+ MT_CLIENT_RESET_TX_R_E_2 |
+ MT_CLIENT_RESET_TX_R_E_1_S |
+ MT_CLIENT_RESET_TX_R_E_2_S);
+
+ /* Start PSE client TX abort */
+ mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_1);
+ mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_1_S,
+ MT_CLIENT_RESET_TX_R_E_1_S, 500);
+
+ mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_2);
+ mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_SW_RESET);
+
+ /* Wait for PSE client to clear TX FIFO */
+ mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_2_S,
+ MT_CLIENT_RESET_TX_R_E_2_S, 500);
+
+ /* Clear PSE client TX abort state */
+ mt76_clear(dev, addr,
+ MT_CLIENT_RESET_TX_R_E_1 |
+ MT_CLIENT_RESET_TX_R_E_2);
+}
+
+static void mt7603_dma_sched_reset(struct mt7603_dev *dev)
+{
+ if (!is_mt7628(dev))
+ return;
+
+ mt76_set(dev, MT_SCH_4, MT_SCH_4_RESET);
+ mt76_clear(dev, MT_SCH_4, MT_SCH_4_RESET);
+}
+
+static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
+{
+ int beacon_int = dev->mt76.beacon_int;
+ u32 mask = dev->mt76.mmio.irqmask;
+ int i;
+
+ ieee80211_stop_queues(dev->mt76.hw);
+ set_bit(MT76_RESET, &dev->mphy.state);
+
+ /* lock/unlock all queues to ensure that no tx is pending */
+ mt76_txq_schedule_all(&dev->mphy);
+
+ mt76_worker_disable(&dev->mt76.tx_worker);
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
+ napi_disable(&dev->mt76.napi[0]);
+ napi_disable(&dev->mt76.napi[1]);
+ napi_disable(&dev->mt76.tx_napi);
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt7603_beacon_set_timer(dev, -1, 0);
+
+ if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] ||
+ dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY ||
+ dev->cur_reset_cause == RESET_CAUSE_BEACON_STUCK ||
+ dev->cur_reset_cause == RESET_CAUSE_TX_HANG)
+ mt7603_pse_reset(dev);
+
+ if (dev->reset_cause[RESET_CAUSE_RESET_FAILED])
+ goto skip_dma_reset;
+
+ mt7603_mac_stop(dev);
+
+ mt76_clear(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+ usleep_range(1000, 2000);
+
+ mt7603_irq_disable(dev, mask);
+
+ mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF);
+
+ mt7603_pse_client_reset(dev);
+
+ for (i = 0; i < __MT_TXQ_MAX; i++)
+ mt76_queue_tx_cleanup(dev, i, true);
+
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ mt76_queue_rx_reset(dev, i);
+ }
+
+ mt7603_dma_sched_reset(dev);
+
+ mt7603_mac_dma_start(dev);
+
+ mt7603_irq_enable(dev, mask);
+
+skip_dma_reset:
+ clear_bit(MT76_RESET, &dev->mphy.state);
+ mutex_unlock(&dev->mt76.mutex);
+
+ mt76_worker_enable(&dev->mt76.tx_worker);
+ napi_enable(&dev->mt76.tx_napi);
+ napi_schedule(&dev->mt76.tx_napi);
+
+ tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
+ mt7603_beacon_set_timer(dev, -1, beacon_int);
+
+ napi_enable(&dev->mt76.napi[0]);
+ napi_schedule(&dev->mt76.napi[0]);
+
+ napi_enable(&dev->mt76.napi[1]);
+ napi_schedule(&dev->mt76.napi[1]);
+
+ ieee80211_wake_queues(dev->mt76.hw);
+ mt76_txq_schedule_all(&dev->mphy);
+}
+
+static u32 mt7603_dma_debug(struct mt7603_dev *dev, u8 index)
+{
+ u32 val;
+
+ mt76_wr(dev, MT_WPDMA_DEBUG,
+ FIELD_PREP(MT_WPDMA_DEBUG_IDX, index) |
+ MT_WPDMA_DEBUG_SEL);
+
+ val = mt76_rr(dev, MT_WPDMA_DEBUG);
+ return FIELD_GET(MT_WPDMA_DEBUG_VALUE, val);
+}
+
+static bool mt7603_rx_fifo_busy(struct mt7603_dev *dev)
+{
+ if (is_mt7628(dev))
+ return mt7603_dma_debug(dev, 9) & BIT(9);
+
+ return mt7603_dma_debug(dev, 2) & BIT(8);
+}
+
+static bool mt7603_rx_dma_busy(struct mt7603_dev *dev)
+{
+ if (!(mt76_rr(dev, MT_WPDMA_GLO_CFG) & MT_WPDMA_GLO_CFG_RX_DMA_BUSY))
+ return false;
+
+ return mt7603_rx_fifo_busy(dev);
+}
+
+static bool mt7603_tx_dma_busy(struct mt7603_dev *dev)
+{
+ u32 val;
+
+ if (!(mt76_rr(dev, MT_WPDMA_GLO_CFG) & MT_WPDMA_GLO_CFG_TX_DMA_BUSY))
+ return false;
+
+ val = mt7603_dma_debug(dev, 9);
+ return (val & BIT(8)) && (val & 0xf) != 0xf;
+}
+
+static bool mt7603_tx_hang(struct mt7603_dev *dev)
+{
+ struct mt76_queue *q;
+ u32 dma_idx, prev_dma_idx;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ q = dev->mt76.q_tx[i];
+
+ if (!q->queued)
+ continue;
+
+ prev_dma_idx = dev->tx_dma_idx[i];
+ dma_idx = readl(&q->regs->dma_idx);
+ dev->tx_dma_idx[i] = dma_idx;
+
+ if (dma_idx == prev_dma_idx &&
+ dma_idx != readl(&q->regs->cpu_idx))
+ break;
+ }
+
+ return i < 4;
+}
+
+static bool mt7603_rx_pse_busy(struct mt7603_dev *dev)
+{
+ u32 addr, val;
+
+ if (mt7603_rx_fifo_busy(dev))
+ goto out;
+
+ addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + MT_CLIENT_STATUS);
+ mt76_wr(dev, addr, 3);
+ val = mt76_rr(dev, addr) >> 16;
+
+ if (!(val & BIT(0)))
+ return false;
+
+ if (is_mt7628(dev))
+ val &= 0xa000;
+ else
+ val &= 0x8000;
+ if (!val)
+ return false;
+
+out:
+ if (mt76_rr(dev, MT_INT_SOURCE_CSR) &
+ (MT_INT_RX_DONE(0) | MT_INT_RX_DONE(1)))
+ return false;
+
+ return true;
+}
+
+static bool
+mt7603_watchdog_check(struct mt7603_dev *dev, u8 *counter,
+ enum mt7603_reset_cause cause,
+ bool (*check)(struct mt7603_dev *dev))
+{
+ if (dev->reset_test == cause + 1) {
+ dev->reset_test = 0;
+ goto trigger;
+ }
+
+ if (check) {
+ if (!check(dev) && *counter < MT7603_WATCHDOG_TIMEOUT) {
+ *counter = 0;
+ return false;
+ }
+
+ (*counter)++;
+ }
+
+ if (*counter < MT7603_WATCHDOG_TIMEOUT)
+ return false;
+trigger:
+ dev->cur_reset_cause = cause;
+ dev->reset_cause[cause]++;
+ return true;
+}
+
+void mt7603_update_channel(struct mt76_dev *mdev)
+{
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+ struct mt76_channel_state *state;
+
+ state = mdev->phy.chan_state;
+ state->cc_busy += mt76_rr(dev, MT_MIB_STAT_CCA);
+}
+
+void
+mt7603_edcca_set_strict(struct mt7603_dev *dev, bool val)
+{
+ u32 rxtd_6 = 0xd7c80000;
+
+ if (val == dev->ed_strict_mode)
+ return;
+
+ dev->ed_strict_mode = val;
+
+ /* Ensure that ED/CCA does not trigger if disabled */
+ if (!dev->ed_monitor)
+ rxtd_6 |= FIELD_PREP(MT_RXTD_6_CCAED_TH, 0x34);
+ else
+ rxtd_6 |= FIELD_PREP(MT_RXTD_6_CCAED_TH, 0x7d);
+
+ if (dev->ed_monitor && !dev->ed_strict_mode)
+ rxtd_6 |= FIELD_PREP(MT_RXTD_6_ACI_TH, 0x0f);
+ else
+ rxtd_6 |= FIELD_PREP(MT_RXTD_6_ACI_TH, 0x10);
+
+ mt76_wr(dev, MT_RXTD(6), rxtd_6);
+
+ mt76_rmw_field(dev, MT_RXTD(13), MT_RXTD_13_ACI_TH_EN,
+ dev->ed_monitor && !dev->ed_strict_mode);
+}
+
+static void
+mt7603_edcca_check(struct mt7603_dev *dev)
+{
+ u32 val = mt76_rr(dev, MT_AGC(41));
+ ktime_t cur_time;
+ int rssi0, rssi1;
+ u32 active;
+ u32 ed_busy;
+
+ if (!dev->ed_monitor)
+ return;
+
+ rssi0 = FIELD_GET(MT_AGC_41_RSSI_0, val);
+ if (rssi0 > 128)
+ rssi0 -= 256;
+
+ rssi1 = FIELD_GET(MT_AGC_41_RSSI_1, val);
+ if (rssi1 > 128)
+ rssi1 -= 256;
+
+ if (max(rssi0, rssi1) >= -40 &&
+ dev->ed_strong_signal < MT7603_EDCCA_BLOCK_TH)
+ dev->ed_strong_signal++;
+ else if (dev->ed_strong_signal > 0)
+ dev->ed_strong_signal--;
+
+ cur_time = ktime_get_boottime();
+ ed_busy = mt76_rr(dev, MT_MIB_STAT_ED) & MT_MIB_STAT_ED_MASK;
+
+ active = ktime_to_us(ktime_sub(cur_time, dev->ed_time));
+ dev->ed_time = cur_time;
+
+ if (!active)
+ return;
+
+ if (100 * ed_busy / active > 90) {
+ if (dev->ed_trigger < 0)
+ dev->ed_trigger = 0;
+ dev->ed_trigger++;
+ } else {
+ if (dev->ed_trigger > 0)
+ dev->ed_trigger = 0;
+ dev->ed_trigger--;
+ }
+
+ if (dev->ed_trigger > MT7603_EDCCA_BLOCK_TH ||
+ dev->ed_strong_signal < MT7603_EDCCA_BLOCK_TH / 2) {
+ mt7603_edcca_set_strict(dev, true);
+ } else if (dev->ed_trigger < -MT7603_EDCCA_BLOCK_TH) {
+ mt7603_edcca_set_strict(dev, false);
+ }
+
+ if (dev->ed_trigger > MT7603_EDCCA_BLOCK_TH)
+ dev->ed_trigger = MT7603_EDCCA_BLOCK_TH;
+ else if (dev->ed_trigger < -MT7603_EDCCA_BLOCK_TH)
+ dev->ed_trigger = -MT7603_EDCCA_BLOCK_TH;
+}
+
+void mt7603_cca_stats_reset(struct mt7603_dev *dev)
+{
+ mt76_set(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET);
+ mt76_clear(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET);
+ mt76_set(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_EN);
+}
+
+static void
+mt7603_adjust_sensitivity(struct mt7603_dev *dev)
+{
+ u32 agc0 = dev->agc0, agc3 = dev->agc3;
+ u32 adj;
+
+ if (!dev->sensitivity || dev->sensitivity < -100) {
+ dev->sensitivity = 0;
+ } else if (dev->sensitivity <= -84) {
+ adj = 7 + (dev->sensitivity + 92) / 2;
+
+ agc0 = 0x56f0076f;
+ agc0 |= adj << 12;
+ agc0 |= adj << 16;
+ agc3 = 0x81d0d5e3;
+ } else if (dev->sensitivity <= -72) {
+ adj = 7 + (dev->sensitivity + 80) / 2;
+
+ agc0 = 0x6af0006f;
+ agc0 |= adj << 8;
+ agc0 |= adj << 12;
+ agc0 |= adj << 16;
+
+ agc3 = 0x8181d5e3;
+ } else {
+ if (dev->sensitivity > -54)
+ dev->sensitivity = -54;
+
+ adj = 7 + (dev->sensitivity + 80) / 2;
+
+ agc0 = 0x7ff0000f;
+ agc0 |= adj << 4;
+ agc0 |= adj << 8;
+ agc0 |= adj << 12;
+ agc0 |= adj << 16;
+
+ agc3 = 0x818181e3;
+ }
+
+ mt76_wr(dev, MT_AGC(0), agc0);
+ mt76_wr(dev, MT_AGC1(0), agc0);
+
+ mt76_wr(dev, MT_AGC(3), agc3);
+ mt76_wr(dev, MT_AGC1(3), agc3);
+}
+
+static void
+mt7603_false_cca_check(struct mt7603_dev *dev)
+{
+ int pd_cck, pd_ofdm, mdrdy_cck, mdrdy_ofdm;
+ int false_cca;
+ int min_signal;
+ u32 val;
+
+ if (!dev->dynamic_sensitivity)
+ return;
+
+ val = mt76_rr(dev, MT_PHYCTRL_STAT_PD);
+ pd_cck = FIELD_GET(MT_PHYCTRL_STAT_PD_CCK, val);
+ pd_ofdm = FIELD_GET(MT_PHYCTRL_STAT_PD_OFDM, val);
+
+ val = mt76_rr(dev, MT_PHYCTRL_STAT_MDRDY);
+ mdrdy_cck = FIELD_GET(MT_PHYCTRL_STAT_MDRDY_CCK, val);
+ mdrdy_ofdm = FIELD_GET(MT_PHYCTRL_STAT_MDRDY_OFDM, val);
+
+ dev->false_cca_ofdm = pd_ofdm - mdrdy_ofdm;
+ dev->false_cca_cck = pd_cck - mdrdy_cck;
+
+ mt7603_cca_stats_reset(dev);
+
+ min_signal = mt76_get_min_avg_rssi(&dev->mt76, false);
+ if (!min_signal) {
+ dev->sensitivity = 0;
+ dev->last_cca_adj = jiffies;
+ goto out;
+ }
+
+ min_signal -= 15;
+
+ false_cca = dev->false_cca_ofdm + dev->false_cca_cck;
+ if (false_cca > 600 &&
+ dev->sensitivity < -100 + dev->sensitivity_limit) {
+ if (!dev->sensitivity)
+ dev->sensitivity = -92;
+ else
+ dev->sensitivity += 2;
+ dev->last_cca_adj = jiffies;
+ } else if (false_cca < 100 ||
+ time_after(jiffies, dev->last_cca_adj + 10 * HZ)) {
+ dev->last_cca_adj = jiffies;
+ if (!dev->sensitivity)
+ goto out;
+
+ dev->sensitivity -= 2;
+ }
+
+ if (dev->sensitivity && dev->sensitivity > min_signal) {
+ dev->sensitivity = min_signal;
+ dev->last_cca_adj = jiffies;
+ }
+
+out:
+ mt7603_adjust_sensitivity(dev);
+}
+
+void mt7603_mac_work(struct work_struct *work)
+{
+ struct mt7603_dev *dev = container_of(work, struct mt7603_dev,
+ mt76.mac_work.work);
+ bool reset = false;
+ int i, idx;
+
+ mt76_tx_status_check(&dev->mt76, NULL, false);
+
+ mutex_lock(&dev->mt76.mutex);
+
+ dev->mac_work_count++;
+ mt76_update_survey(&dev->mt76);
+ mt7603_edcca_check(dev);
+
+ for (i = 0, idx = 0; i < 2; i++) {
+ u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+ dev->mt76.aggr_stats[idx++] += val & 0xffff;
+ dev->mt76.aggr_stats[idx++] += val >> 16;
+ }
+
+ if (dev->mac_work_count == 10)
+ mt7603_false_cca_check(dev);
+
+ if (mt7603_watchdog_check(dev, &dev->rx_pse_check,
+ RESET_CAUSE_RX_PSE_BUSY,
+ mt7603_rx_pse_busy) ||
+ mt7603_watchdog_check(dev, &dev->beacon_check,
+ RESET_CAUSE_BEACON_STUCK,
+ NULL) ||
+ mt7603_watchdog_check(dev, &dev->tx_hang_check,
+ RESET_CAUSE_TX_HANG,
+ mt7603_tx_hang) ||
+ mt7603_watchdog_check(dev, &dev->tx_dma_check,
+ RESET_CAUSE_TX_BUSY,
+ mt7603_tx_dma_busy) ||
+ mt7603_watchdog_check(dev, &dev->rx_dma_check,
+ RESET_CAUSE_RX_BUSY,
+ mt7603_rx_dma_busy) ||
+ mt7603_watchdog_check(dev, &dev->mcu_hang,
+ RESET_CAUSE_MCU_HANG,
+ NULL) ||
+ dev->reset_cause[RESET_CAUSE_RESET_FAILED]) {
+ dev->beacon_check = 0;
+ dev->tx_dma_check = 0;
+ dev->tx_hang_check = 0;
+ dev->rx_dma_check = 0;
+ dev->rx_pse_check = 0;
+ dev->mcu_hang = 0;
+ dev->rx_dma_idx = ~0;
+ memset(dev->tx_dma_idx, 0xff, sizeof(dev->tx_dma_idx));
+ reset = true;
+ dev->mac_work_count = 0;
+ }
+
+ if (dev->mac_work_count >= 10)
+ dev->mac_work_count = 0;
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ if (reset)
+ mt7603_mac_watchdog_reset(dev);
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+ msecs_to_jiffies(MT7603_WATCHDOG_TIME));
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.h b/drivers/net/wireless/mediatek/mt76/mt7603/mac.h
new file mode 100644
index 000000000..17e34ecf2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.h
@@ -0,0 +1,242 @@
+/* SPDX-License-Identifier: ISC */
+
+#ifndef __MT7603_MAC_H
+#define __MT7603_MAC_H
+
+#define MT_RXD0_LENGTH GENMASK(15, 0)
+#define MT_RXD0_PKT_TYPE GENMASK(31, 29)
+
+#define MT_RXD0_NORMAL_ETH_TYPE_OFS GENMASK(22, 16)
+#define MT_RXD0_NORMAL_IP_SUM BIT(23)
+#define MT_RXD0_NORMAL_UDP_TCP_SUM BIT(24)
+#define MT_RXD0_NORMAL_GROUP_1 BIT(25)
+#define MT_RXD0_NORMAL_GROUP_2 BIT(26)
+#define MT_RXD0_NORMAL_GROUP_3 BIT(27)
+#define MT_RXD0_NORMAL_GROUP_4 BIT(28)
+
+enum rx_pkt_type {
+ PKT_TYPE_TXS = 0,
+ PKT_TYPE_TXRXV = 1,
+ PKT_TYPE_NORMAL = 2,
+ PKT_TYPE_RX_DUP_RFB = 3,
+ PKT_TYPE_RX_TMR = 4,
+ PKT_TYPE_RETRIEVE = 5,
+ PKT_TYPE_RX_EVENT = 7,
+};
+
+#define MT_RXD1_NORMAL_BSSID GENMASK(31, 26)
+#define MT_RXD1_NORMAL_PAYLOAD_FORMAT GENMASK(25, 24)
+#define MT_RXD1_NORMAL_HDR_TRANS BIT(23)
+#define MT_RXD1_NORMAL_HDR_OFFSET BIT(22)
+#define MT_RXD1_NORMAL_MAC_HDR_LEN GENMASK(21, 16)
+#define MT_RXD1_NORMAL_CH_FREQ GENMASK(15, 8)
+#define MT_RXD1_NORMAL_KEY_ID GENMASK(7, 6)
+#define MT_RXD1_NORMAL_BEACON_UC BIT(5)
+#define MT_RXD1_NORMAL_BEACON_MC BIT(4)
+#define MT_RXD1_NORMAL_BCAST BIT(3)
+#define MT_RXD1_NORMAL_MCAST BIT(2)
+#define MT_RXD1_NORMAL_U2M BIT(1)
+#define MT_RXD1_NORMAL_HTC_VLD BIT(0)
+
+#define MT_RXD2_NORMAL_NON_AMPDU BIT(31)
+#define MT_RXD2_NORMAL_NON_AMPDU_SUB BIT(30)
+#define MT_RXD2_NORMAL_NDATA BIT(29)
+#define MT_RXD2_NORMAL_NULL_FRAME BIT(28)
+#define MT_RXD2_NORMAL_FRAG BIT(27)
+#define MT_RXD2_NORMAL_UDF_VALID BIT(26)
+#define MT_RXD2_NORMAL_LLC_MIS BIT(25)
+#define MT_RXD2_NORMAL_MAX_LEN_ERROR BIT(24)
+#define MT_RXD2_NORMAL_AMSDU_ERR BIT(23)
+#define MT_RXD2_NORMAL_LEN_MISMATCH BIT(22)
+#define MT_RXD2_NORMAL_TKIP_MIC_ERR BIT(21)
+#define MT_RXD2_NORMAL_ICV_ERR BIT(20)
+#define MT_RXD2_NORMAL_CLM BIT(19)
+#define MT_RXD2_NORMAL_CM BIT(18)
+#define MT_RXD2_NORMAL_FCS_ERR BIT(17)
+#define MT_RXD2_NORMAL_SW_BIT BIT(16)
+#define MT_RXD2_NORMAL_SEC_MODE GENMASK(15, 12)
+#define MT_RXD2_NORMAL_TID GENMASK(11, 8)
+#define MT_RXD2_NORMAL_WLAN_IDX GENMASK(7, 0)
+
+#define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30)
+#define MT_RXD3_NORMAL_PF_MODE BIT(29)
+#define MT_RXD3_NORMAL_CLS_BITMAP GENMASK(28, 19)
+#define MT_RXD3_NORMAL_WOL GENMASK(18, 14)
+#define MT_RXD3_NORMAL_MAGIC_PKT BIT(13)
+#define MT_RXD3_NORMAL_OFLD GENMASK(12, 11)
+#define MT_RXD3_NORMAL_CLS BIT(10)
+#define MT_RXD3_NORMAL_PATTERN_DROP BIT(9)
+#define MT_RXD3_NORMAL_TSF_COMPARE_LOSS BIT(8)
+#define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0)
+
+#define MT_RXV1_VHTA1_B5_B4 GENMASK(31, 30)
+#define MT_RXV1_VHTA2_B8_B1 GENMASK(29, 22)
+#define MT_RXV1_HT_NO_SOUND BIT(21)
+#define MT_RXV1_HT_SMOOTH BIT(20)
+#define MT_RXV1_HT_SHORT_GI BIT(19)
+#define MT_RXV1_HT_AGGR BIT(18)
+#define MT_RXV1_VHTA1_B22 BIT(17)
+#define MT_RXV1_FRAME_MODE GENMASK(16, 15)
+#define MT_RXV1_TX_MODE GENMASK(14, 12)
+#define MT_RXV1_HT_EXT_LTF GENMASK(11, 10)
+#define MT_RXV1_HT_AD_CODE BIT(9)
+#define MT_RXV1_HT_STBC GENMASK(8, 7)
+#define MT_RXV1_TX_RATE GENMASK(6, 0)
+
+#define MT_RXV2_VHTA1_B16_B6 GENMASK(31, 21)
+#define MT_RXV2_LENGTH GENMASK(20, 0)
+
+#define MT_RXV3_F_AGC1_CAL_GAIN GENMASK(31, 29)
+#define MT_RXV3_F_AGC1_EQ_CAL BIT(28)
+#define MT_RXV3_RCPI1 GENMASK(27, 20)
+#define MT_RXV3_F_AGC0_CAL_GAIN GENMASK(19, 17)
+#define MT_RXV3_F_AGC0_EQ_CAL BIT(16)
+#define MT_RXV3_RCPI0 GENMASK(15, 8)
+#define MT_RXV3_SEL_ANT BIT(7)
+#define MT_RXV3_ACI_DET_X BIT(6)
+#define MT_RXV3_OFDM_FREQ_TRANS_DETECT BIT(5)
+#define MT_RXV3_VHTA1_B21_B17 GENMASK(4, 0)
+
+#define MT_RXV4_F_AGC_CAL_GAIN GENMASK(31, 29)
+#define MT_RXV4_F_AGC2_EQ_CAL BIT(28)
+#define MT_RXV4_IB_RSSI1 GENMASK(27, 20)
+#define MT_RXV4_F_AGC_LPF_GAIN_X GENMASK(19, 16)
+#define MT_RXV4_WB_RSSI_X GENMASK(15, 8)
+#define MT_RXV4_IB_RSSI0 GENMASK(7, 0)
+
+#define MT_RXV5_LTF_SNR0 GENMASK(31, 26)
+#define MT_RXV5_LTF_PROC_TIME GENMASK(25, 19)
+#define MT_RXV5_FOE GENMASK(18, 7)
+#define MT_RXV5_C_AGC_SATE GENMASK(6, 4)
+#define MT_RXV5_F_AGC_LNA_GAIN_0 GENMASK(3, 2)
+#define MT_RXV5_F_AGC_LNA_GAIN_1 GENMASK(1, 0)
+
+#define MT_RXV6_C_AGC_STATE GENMASK(30, 28)
+#define MT_RXV6_NS_TS_FIELD GENMASK(27, 25)
+#define MT_RXV6_RX_VALID BIT(24)
+#define MT_RXV6_NF2 GENMASK(23, 16)
+#define MT_RXV6_NF1 GENMASK(15, 8)
+#define MT_RXV6_NF0 GENMASK(7, 0)
+
+enum mt7603_tx_header_format {
+ MT_HDR_FORMAT_802_3,
+ MT_HDR_FORMAT_CMD,
+ MT_HDR_FORMAT_802_11,
+ MT_HDR_FORMAT_802_11_EXT,
+};
+
+#define MT_TXD_SIZE (8 * 4)
+
+#define MT_TXD0_P_IDX BIT(31)
+#define MT_TXD0_Q_IDX GENMASK(30, 27)
+#define MT_TXD0_UTXB BIT(26)
+#define MT_TXD0_UNXV BIT(25)
+#define MT_TXD0_UDP_TCP_SUM BIT(24)
+#define MT_TXD0_IP_SUM BIT(23)
+#define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16)
+#define MT_TXD0_TX_BYTES GENMASK(15, 0)
+
+#define MT_TXD1_OWN_MAC GENMASK(31, 26)
+#define MT_TXD1_PROTECTED BIT(23)
+#define MT_TXD1_TID GENMASK(22, 20)
+#define MT_TXD1_NO_ACK BIT(19)
+#define MT_TXD1_HDR_PAD GENMASK(18, 16)
+#define MT_TXD1_LONG_FORMAT BIT(15)
+#define MT_TXD1_HDR_FORMAT GENMASK(14, 13)
+#define MT_TXD1_HDR_INFO GENMASK(12, 8)
+#define MT_TXD1_WLAN_IDX GENMASK(7, 0)
+
+#define MT_TXD2_FIX_RATE BIT(31)
+#define MT_TXD2_TIMING_MEASURE BIT(30)
+#define MT_TXD2_BA_DISABLE BIT(29)
+#define MT_TXD2_POWER_OFFSET GENMASK(28, 24)
+#define MT_TXD2_MAX_TX_TIME GENMASK(23, 16)
+#define MT_TXD2_FRAG GENMASK(15, 14)
+#define MT_TXD2_HTC_VLD BIT(13)
+#define MT_TXD2_DURATION BIT(12)
+#define MT_TXD2_BIP BIT(11)
+#define MT_TXD2_MULTICAST BIT(10)
+#define MT_TXD2_RTS BIT(9)
+#define MT_TXD2_SOUNDING BIT(8)
+#define MT_TXD2_NDPA BIT(7)
+#define MT_TXD2_NDP BIT(6)
+#define MT_TXD2_FRAME_TYPE GENMASK(5, 4)
+#define MT_TXD2_SUB_TYPE GENMASK(3, 0)
+
+#define MT_TXD3_SN_VALID BIT(31)
+#define MT_TXD3_PN_VALID BIT(30)
+#define MT_TXD3_SEQ GENMASK(27, 16)
+#define MT_TXD3_REM_TX_COUNT GENMASK(15, 11)
+#define MT_TXD3_TX_COUNT GENMASK(10, 6)
+
+#define MT_TXD4_PN_LOW GENMASK(31, 0)
+
+#define MT_TXD5_PN_HIGH GENMASK(31, 16)
+#define MT_TXD5_SW_POWER_MGMT BIT(13)
+#define MT_TXD5_BA_SEQ_CTRL BIT(12)
+#define MT_TXD5_DA_SELECT BIT(11)
+#define MT_TXD5_TX_STATUS_HOST BIT(10)
+#define MT_TXD5_TX_STATUS_MCU BIT(9)
+#define MT_TXD5_TX_STATUS_FMT BIT(8)
+#define MT_TXD5_PID GENMASK(7, 0)
+
+#define MT_TXD6_SGI BIT(31)
+#define MT_TXD6_LDPC BIT(30)
+#define MT_TXD6_TX_RATE GENMASK(29, 18)
+#define MT_TXD6_I_TXBF BIT(17)
+#define MT_TXD6_E_TXBF BIT(16)
+#define MT_TXD6_DYN_BW BIT(15)
+#define MT_TXD6_ANT_PRI GENMASK(14, 12)
+#define MT_TXD6_SPE_EN BIT(11)
+#define MT_TXD6_FIXED_BW BIT(10)
+#define MT_TXD6_BW GENMASK(9, 8)
+#define MT_TXD6_ANT_ID GENMASK(7, 2)
+#define MT_TXD6_FIXED_RATE BIT(0)
+
+#define MT_TX_RATE_STBC BIT(11)
+#define MT_TX_RATE_NSS GENMASK(10, 9)
+#define MT_TX_RATE_MODE GENMASK(8, 6)
+#define MT_TX_RATE_IDX GENMASK(5, 0)
+
+#define MT_TXS0_ANTENNA GENMASK(31, 26)
+#define MT_TXS0_TID GENMASK(25, 22)
+#define MT_TXS0_BA_ERROR BIT(22)
+#define MT_TXS0_PS_FLAG BIT(21)
+#define MT_TXS0_TXOP_TIMEOUT BIT(20)
+#define MT_TXS0_BIP_ERROR BIT(19)
+
+#define MT_TXS0_QUEUE_TIMEOUT BIT(18)
+#define MT_TXS0_RTS_TIMEOUT BIT(17)
+#define MT_TXS0_ACK_TIMEOUT BIT(16)
+#define MT_TXS0_ACK_ERROR_MASK GENMASK(18, 16)
+
+#define MT_TXS0_TX_STATUS_HOST BIT(15)
+#define MT_TXS0_TX_STATUS_MCU BIT(14)
+#define MT_TXS0_TXS_FORMAT BIT(13)
+#define MT_TXS0_FIXED_RATE BIT(12)
+#define MT_TXS0_TX_RATE GENMASK(11, 0)
+
+#define MT_TXS1_F0_TIMESTAMP GENMASK(31, 0)
+#define MT_TXS1_F1_NOISE_2 GENMASK(23, 16)
+#define MT_TXS1_F1_NOISE_1 GENMASK(15, 8)
+#define MT_TXS1_F1_NOISE_0 GENMASK(7, 0)
+
+#define MT_TXS2_F0_FRONT_TIME GENMASK(24, 0)
+#define MT_TXS2_F1_RCPI_2 GENMASK(23, 16)
+#define MT_TXS2_F1_RCPI_1 GENMASK(15, 8)
+#define MT_TXS2_F1_RCPI_0 GENMASK(7, 0)
+
+#define MT_TXS3_WCID GENMASK(31, 24)
+#define MT_TXS3_RXV_SEQNO GENMASK(23, 16)
+#define MT_TXS3_TX_DELAY GENMASK(15, 0)
+
+#define MT_TXS4_LAST_TX_RATE GENMASK(31, 29)
+#define MT_TXS4_TX_COUNT GENMASK(28, 24)
+#define MT_TXS4_AMPDU BIT(23)
+#define MT_TXS4_ACKED_MPDU BIT(22)
+#define MT_TXS4_PID GENMASK(21, 14)
+#define MT_TXS4_BW GENMASK(13, 12)
+#define MT_TXS4_F0_SEQNO GENMASK(11, 0)
+#define MT_TXS4_F1_TSSI GENMASK(11, 0)
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
new file mode 100644
index 000000000..bdff89cc3
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -0,0 +1,730 @@
+// SPDX-License-Identifier: ISC
+
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include "mt7603.h"
+#include "mac.h"
+#include "eeprom.h"
+
+static int
+mt7603_start(struct ieee80211_hw *hw)
+{
+ struct mt7603_dev *dev = hw->priv;
+
+ mt7603_mac_reset_counters(dev);
+ mt7603_mac_start(dev);
+ dev->mphy.survey_time = ktime_get_boottime();
+ set_bit(MT76_STATE_RUNNING, &dev->mphy.state);
+ mt7603_mac_work(&dev->mt76.mac_work.work);
+
+ return 0;
+}
+
+static void
+mt7603_stop(struct ieee80211_hw *hw)
+{
+ struct mt7603_dev *dev = hw->priv;
+
+ clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
+ mt7603_mac_stop(dev);
+}
+
+static int
+mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+ struct mt7603_dev *dev = hw->priv;
+ struct mt76_txq *mtxq;
+ u8 bc_addr[ETH_ALEN];
+ int idx;
+ int ret = 0;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mvif->idx = ffs(~dev->mphy.vif_mask) - 1;
+ if (mvif->idx >= MT7603_MAX_INTERFACES) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ mt76_wr(dev, MT_MAC_ADDR0(mvif->idx),
+ get_unaligned_le32(vif->addr));
+ mt76_wr(dev, MT_MAC_ADDR1(mvif->idx),
+ (get_unaligned_le16(vif->addr + 4) |
+ MT_MAC_ADDR1_VALID));
+
+ if (vif->type == NL80211_IFTYPE_AP) {
+ mt76_wr(dev, MT_BSSID0(mvif->idx),
+ get_unaligned_le32(vif->addr));
+ mt76_wr(dev, MT_BSSID1(mvif->idx),
+ (get_unaligned_le16(vif->addr + 4) |
+ MT_BSSID1_VALID));
+ }
+
+ idx = MT7603_WTBL_RESERVED - 1 - mvif->idx;
+ dev->mphy.vif_mask |= BIT(mvif->idx);
+ INIT_LIST_HEAD(&mvif->sta.poll_list);
+ mvif->sta.wcid.idx = idx;
+ mvif->sta.wcid.hw_key_idx = -1;
+
+ eth_broadcast_addr(bc_addr);
+ mt7603_wtbl_init(dev, idx, mvif->idx, bc_addr);
+
+ mtxq = (struct mt76_txq *)vif->txq->drv_priv;
+ mtxq->wcid = &mvif->sta.wcid;
+ rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
+
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static void
+mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+ struct mt7603_sta *msta = &mvif->sta;
+ struct mt7603_dev *dev = hw->priv;
+ int idx = msta->wcid.idx;
+
+ mt76_wr(dev, MT_MAC_ADDR0(mvif->idx), 0);
+ mt76_wr(dev, MT_MAC_ADDR1(mvif->idx), 0);
+ mt76_wr(dev, MT_BSSID0(mvif->idx), 0);
+ mt76_wr(dev, MT_BSSID1(mvif->idx), 0);
+ mt7603_beacon_set_timer(dev, mvif->idx, 0);
+
+ rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (!list_empty(&msta->poll_list))
+ list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ mutex_lock(&dev->mt76.mutex);
+ dev->mphy.vif_mask &= ~BIT(mvif->idx);
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+void mt7603_init_edcca(struct mt7603_dev *dev)
+{
+ /* Set lower signal level to -65dBm */
+ mt76_rmw_field(dev, MT_RXTD(8), MT_RXTD_8_LOWER_SIGNAL, 0x23);
+
+ /* clear previous energy detect monitor results */
+ mt76_rr(dev, MT_MIB_STAT_ED);
+
+ if (dev->ed_monitor)
+ mt76_set(dev, MT_MIB_CTL, MT_MIB_CTL_ED_TIME);
+ else
+ mt76_clear(dev, MT_MIB_CTL, MT_MIB_CTL_ED_TIME);
+
+ dev->ed_strict_mode = 0xff;
+ dev->ed_strong_signal = 0;
+ dev->ed_time = ktime_get_boottime();
+
+ mt7603_edcca_set_strict(dev, false);
+}
+
+static int
+mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
+{
+ u8 *rssi_data = (u8 *)dev->mt76.eeprom.data;
+ int idx, ret;
+ u8 bw = MT_BW_20;
+ bool failed = false;
+
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
+
+ mutex_lock(&dev->mt76.mutex);
+ set_bit(MT76_RESET, &dev->mphy.state);
+
+ mt7603_beacon_set_timer(dev, -1, 0);
+ mt76_set_channel(&dev->mphy);
+ mt7603_mac_stop(dev);
+
+ if (def->width == NL80211_CHAN_WIDTH_40)
+ bw = MT_BW_40;
+
+ dev->mphy.chandef = *def;
+ mt76_rmw_field(dev, MT_AGG_BWCR, MT_AGG_BWCR_BW, bw);
+ ret = mt7603_mcu_set_channel(dev);
+ if (ret) {
+ failed = true;
+ goto out;
+ }
+
+ if (def->chan->band == NL80211_BAND_5GHZ) {
+ idx = 1;
+ rssi_data += MT_EE_RSSI_OFFSET_5G;
+ } else {
+ idx = 0;
+ rssi_data += MT_EE_RSSI_OFFSET_2G;
+ }
+
+ memcpy(dev->rssi_offset, rssi_data, sizeof(dev->rssi_offset));
+
+ idx |= (def->chan -
+ mt76_hw(dev)->wiphy->bands[def->chan->band]->channels) << 1;
+ mt76_wr(dev, MT_WF_RMAC_CH_FREQ, idx);
+ mt7603_mac_set_timing(dev);
+ mt7603_mac_start(dev);
+
+ clear_bit(MT76_RESET, &dev->mphy.state);
+
+ mt76_txq_schedule_all(&dev->mphy);
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+ msecs_to_jiffies(MT7603_WATCHDOG_TIME));
+
+ /* reset channel stats */
+ mt76_clear(dev, MT_MIB_CTL, MT_MIB_CTL_READ_CLR_DIS);
+ mt76_set(dev, MT_MIB_CTL,
+ MT_MIB_CTL_CCA_NAV_TX | MT_MIB_CTL_PSCCA_TIME);
+ mt76_rr(dev, MT_MIB_STAT_CCA);
+ mt7603_cca_stats_reset(dev);
+
+ dev->mphy.survey_time = ktime_get_boottime();
+
+ mt7603_init_edcca(dev);
+
+out:
+ if (!(mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL))
+ mt7603_beacon_set_timer(dev, -1, dev->mt76.beacon_int);
+ mutex_unlock(&dev->mt76.mutex);
+
+ tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
+
+ if (failed)
+ mt7603_mac_work(&dev->mt76.mac_work.work);
+
+ return ret;
+}
+
+static int
+mt7603_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct mt7603_dev *dev = hw->priv;
+ int ret = 0;
+
+ if (changed & (IEEE80211_CONF_CHANGE_CHANNEL |
+ IEEE80211_CONF_CHANGE_POWER)) {
+ ieee80211_stop_queues(hw);
+ ret = mt7603_set_channel(dev, &hw->conf.chandef);
+ ieee80211_wake_queues(hw);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ mutex_lock(&dev->mt76.mutex);
+
+ if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+ dev->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
+ else
+ dev->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
+
+ mt76_wr(dev, MT_WF_RFCR, dev->rxfilter);
+
+ mutex_unlock(&dev->mt76.mutex);
+ }
+
+ return ret;
+}
+
+static void
+mt7603_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct mt7603_dev *dev = hw->priv;
+ u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+ flags |= *total_flags & FIF_##_flag; \
+ dev->rxfilter &= ~(_hw); \
+ dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ } while (0)
+
+ dev->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
+ MT_WF_RFCR_DROP_OTHER_BEACON |
+ MT_WF_RFCR_DROP_FRAME_REPORT |
+ MT_WF_RFCR_DROP_PROBEREQ |
+ MT_WF_RFCR_DROP_MCAST_FILTERED |
+ MT_WF_RFCR_DROP_MCAST |
+ MT_WF_RFCR_DROP_BCAST |
+ MT_WF_RFCR_DROP_DUPLICATE |
+ MT_WF_RFCR_DROP_A2_BSSID |
+ MT_WF_RFCR_DROP_UNWANTED_CTL |
+ MT_WF_RFCR_DROP_STBC_MULTI);
+
+ MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM |
+ MT_WF_RFCR_DROP_A3_MAC |
+ MT_WF_RFCR_DROP_A3_BSSID);
+
+ MT76_FILTER(FCSFAIL, MT_WF_RFCR_DROP_FCSFAIL);
+
+ MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS |
+ MT_WF_RFCR_DROP_RTS |
+ MT_WF_RFCR_DROP_CTL_RSV |
+ MT_WF_RFCR_DROP_NDPA);
+
+ *total_flags = flags;
+ mt76_wr(dev, MT_WF_RFCR, dev->rxfilter);
+}
+
+static void
+mt7603_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
+{
+ struct mt7603_dev *dev = hw->priv;
+ struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BSSID)) {
+ if (info->assoc || info->ibss_joined) {
+ mt76_wr(dev, MT_BSSID0(mvif->idx),
+ get_unaligned_le32(info->bssid));
+ mt76_wr(dev, MT_BSSID1(mvif->idx),
+ (get_unaligned_le16(info->bssid + 4) |
+ MT_BSSID1_VALID));
+ } else {
+ mt76_wr(dev, MT_BSSID0(mvif->idx), 0);
+ mt76_wr(dev, MT_BSSID1(mvif->idx), 0);
+ }
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ int slottime = info->use_short_slot ? 9 : 20;
+
+ if (slottime != dev->slottime) {
+ dev->slottime = slottime;
+ mt7603_mac_set_timing(dev);
+ }
+ }
+
+ if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON_INT)) {
+ int beacon_int = !!info->enable_beacon * info->beacon_int;
+
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
+ mt7603_beacon_set_timer(dev, mvif->idx, beacon_int);
+ tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+int
+mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+ struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
+ struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+ int idx;
+ int ret = 0;
+
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7603_WTBL_STA - 1);
+ if (idx < 0)
+ return -ENOSPC;
+
+ INIT_LIST_HEAD(&msta->poll_list);
+ __skb_queue_head_init(&msta->psq);
+ msta->ps = ~0;
+ msta->smps = ~0;
+ msta->wcid.sta = 1;
+ msta->wcid.idx = idx;
+ mt7603_wtbl_init(dev, idx, mvif->idx, sta->addr);
+ mt7603_wtbl_set_ps(dev, msta, false);
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
+
+ return ret;
+}
+
+void
+mt7603_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+
+ mt7603_wtbl_update_cap(dev, sta);
+}
+
+void
+mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+ struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
+ struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+
+ spin_lock_bh(&dev->ps_lock);
+ __skb_queue_purge(&msta->psq);
+ mt7603_filter_tx(dev, wcid->idx, true);
+ spin_unlock_bh(&dev->ps_lock);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (!list_empty(&msta->poll_list))
+ list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ mt7603_wtbl_clear(dev, wcid->idx);
+}
+
+static void
+mt7603_ps_tx_list(struct mt7603_dev *dev, struct sk_buff_head *list)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(list)) != NULL)
+ mt76_tx_queue_skb_raw(dev, skb_get_queue_mapping(skb),
+ skb, 0);
+}
+
+void
+mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
+{
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+ struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
+ struct sk_buff_head list;
+
+ mt76_stop_tx_queues(&dev->mt76, sta, true);
+ mt7603_wtbl_set_ps(dev, msta, ps);
+ if (ps)
+ return;
+
+ __skb_queue_head_init(&list);
+
+ spin_lock_bh(&dev->ps_lock);
+ skb_queue_splice_tail_init(&msta->psq, &list);
+ spin_unlock_bh(&dev->ps_lock);
+
+ mt7603_ps_tx_list(dev, &list);
+}
+
+static void
+mt7603_ps_set_more_data(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *)&skb->data[MT_TXD_SIZE];
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+}
+
+static void
+mt7603_release_buffered_frames(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u16 tids, int nframes,
+ enum ieee80211_frame_release_type reason,
+ bool more_data)
+{
+ struct mt7603_dev *dev = hw->priv;
+ struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
+ struct sk_buff_head list;
+ struct sk_buff *skb, *tmp;
+
+ __skb_queue_head_init(&list);
+
+ mt7603_wtbl_set_ps(dev, msta, false);
+
+ spin_lock_bh(&dev->ps_lock);
+ skb_queue_walk_safe(&msta->psq, skb, tmp) {
+ if (!nframes)
+ break;
+
+ if (!(tids & BIT(skb->priority)))
+ continue;
+
+ skb_set_queue_mapping(skb, MT_TXQ_PSD);
+ __skb_unlink(skb, &msta->psq);
+ mt7603_ps_set_more_data(skb);
+ __skb_queue_tail(&list, skb);
+ nframes--;
+ }
+ spin_unlock_bh(&dev->ps_lock);
+
+ if (!skb_queue_empty(&list))
+ ieee80211_sta_eosp(sta);
+
+ mt7603_ps_tx_list(dev, &list);
+
+ if (nframes)
+ mt76_release_buffered_frames(hw, sta, tids, nframes, reason,
+ more_data);
+}
+
+static int
+mt7603_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt7603_dev *dev = hw->priv;
+ struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+ struct mt7603_sta *msta = sta ? (struct mt7603_sta *)sta->drv_priv :
+ &mvif->sta;
+ struct mt76_wcid *wcid = &msta->wcid;
+ int idx = key->keyidx;
+
+ /* fall back to sw encryption for unsupported ciphers */
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * The hardware does not support per-STA RX GTK, fall back
+ * to software mode for these.
+ */
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
+ (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+ key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ if (cmd == SET_KEY) {
+ key->hw_key_idx = wcid->idx;
+ wcid->hw_key_idx = idx;
+ } else {
+ if (idx == wcid->hw_key_idx)
+ wcid->hw_key_idx = -1;
+
+ key = NULL;
+ }
+ mt76_wcid_key_setup(&dev->mt76, wcid, key);
+
+ return mt7603_wtbl_set_key(dev, wcid->idx, key);
+}
+
+static int
+mt7603_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct mt7603_dev *dev = hw->priv;
+ u16 cw_min = (1 << 5) - 1;
+ u16 cw_max = (1 << 10) - 1;
+ u32 val;
+
+ queue = dev->mt76.q_tx[queue]->hw_idx;
+
+ if (params->cw_min)
+ cw_min = params->cw_min;
+ if (params->cw_max)
+ cw_max = params->cw_max;
+
+ mutex_lock(&dev->mt76.mutex);
+ mt7603_mac_stop(dev);
+
+ val = mt76_rr(dev, MT_WMM_TXOP(queue));
+ val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(queue));
+ val |= params->txop << MT_WMM_TXOP_SHIFT(queue);
+ mt76_wr(dev, MT_WMM_TXOP(queue), val);
+
+ val = mt76_rr(dev, MT_WMM_AIFSN);
+ val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(queue));
+ val |= params->aifs << MT_WMM_AIFSN_SHIFT(queue);
+ mt76_wr(dev, MT_WMM_AIFSN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMIN);
+ val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(queue));
+ val |= cw_min << MT_WMM_CWMIN_SHIFT(queue);
+ mt76_wr(dev, MT_WMM_CWMIN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMAX(queue));
+ val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(queue));
+ val |= cw_max << MT_WMM_CWMAX_SHIFT(queue);
+ mt76_wr(dev, MT_WMM_CWMAX(queue), val);
+
+ mt7603_mac_start(dev);
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+static void
+mt7603_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+}
+
+static int
+mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ struct mt7603_dev *dev = hw->priv;
+ struct ieee80211_sta *sta = params->sta;
+ struct ieee80211_txq *txq = sta->txq[params->tid];
+ struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
+ u16 tid = params->tid;
+ u16 ssn = params->ssn;
+ u8 ba_size = params->buf_size;
+ struct mt76_txq *mtxq;
+ int ret = 0;
+
+ if (!txq)
+ return -EINVAL;
+
+ mtxq = (struct mt76_txq *)txq->drv_priv;
+
+ mutex_lock(&dev->mt76.mutex);
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn,
+ params->buf_size);
+ mt7603_mac_rx_ba_reset(dev, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ mtxq->aggr = true;
+ mtxq->send_bar = false;
+ mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, ba_size);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ mtxq->aggr = false;
+ mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ mtxq->aggr = false;
+ mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1);
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static void
+mt7603_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7603_dev *dev = hw->priv;
+ struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
+ struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates);
+ int i;
+
+ if (!sta_rates)
+ return;
+
+ spin_lock_bh(&dev->mt76.lock);
+ for (i = 0; i < ARRAY_SIZE(msta->rates); i++) {
+ msta->rates[i].idx = sta_rates->rate[i].idx;
+ msta->rates[i].count = sta_rates->rate[i].count;
+ msta->rates[i].flags = sta_rates->rate[i].flags;
+
+ if (msta->rates[i].idx < 0 || !msta->rates[i].count)
+ break;
+ }
+ msta->n_rates = i;
+ mt7603_wtbl_set_rates(dev, msta, NULL, msta->rates);
+ msta->rate_probe = false;
+ mt7603_wtbl_set_smps(dev, msta,
+ sta->smps_mode == IEEE80211_SMPS_DYNAMIC);
+ spin_unlock_bh(&dev->mt76.lock);
+}
+
+static void
+mt7603_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
+{
+ struct mt7603_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mt76.mutex);
+ dev->coverage_class = max_t(s16, coverage_class, 0);
+ mt7603_mac_set_timing(dev);
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static void mt7603_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt7603_dev *dev = hw->priv;
+ struct mt76_wcid *wcid = &dev->global_sta.wcid;
+
+ if (control->sta) {
+ struct mt7603_sta *msta;
+
+ msta = (struct mt7603_sta *)control->sta->drv_priv;
+ wcid = &msta->wcid;
+ } else if (vif) {
+ struct mt7603_vif *mvif;
+
+ mvif = (struct mt7603_vif *)vif->drv_priv;
+ wcid = &mvif->sta.wcid;
+ }
+
+ mt76_tx(&dev->mphy, control->sta, wcid, skb);
+}
+
+const struct ieee80211_ops mt7603_ops = {
+ .tx = mt7603_tx,
+ .start = mt7603_start,
+ .stop = mt7603_stop,
+ .add_interface = mt7603_add_interface,
+ .remove_interface = mt7603_remove_interface,
+ .config = mt7603_config,
+ .configure_filter = mt7603_configure_filter,
+ .bss_info_changed = mt7603_bss_info_changed,
+ .sta_state = mt76_sta_state,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
+ .set_key = mt7603_set_key,
+ .conf_tx = mt7603_conf_tx,
+ .sw_scan_start = mt76_sw_scan,
+ .sw_scan_complete = mt76_sw_scan_complete,
+ .flush = mt7603_flush,
+ .ampdu_action = mt7603_ampdu_action,
+ .get_txpower = mt76_get_txpower,
+ .wake_tx_queue = mt76_wake_tx_queue,
+ .sta_rate_tbl_update = mt7603_sta_rate_tbl_update,
+ .release_buffered_frames = mt7603_release_buffered_frames,
+ .set_coverage_class = mt7603_set_coverage_class,
+ .set_tim = mt76_set_tim,
+ .get_survey = mt76_get_survey,
+ .get_antenna = mt76_get_antenna,
+};
+
+MODULE_LICENSE("Dual BSD/GPL");
+
+static int __init mt7603_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&mt76_wmac_driver);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_PCI
+ ret = pci_register_driver(&mt7603_pci_driver);
+ if (ret)
+ platform_driver_unregister(&mt76_wmac_driver);
+#endif
+ return ret;
+}
+
+static void __exit mt7603_exit(void)
+{
+#ifdef CONFIG_PCI
+ pci_unregister_driver(&mt7603_pci_driver);
+#endif
+ platform_driver_unregister(&mt76_wmac_driver);
+}
+
+module_init(mt7603_init);
+module_exit(mt7603_exit);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
new file mode 100644
index 000000000..a47a3a644
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: ISC
+
+#include <linux/firmware.h>
+#include "mt7603.h"
+#include "mcu.h"
+#include "eeprom.h"
+
+#define MCU_SKB_RESERVE 8
+
+struct mt7603_fw_trailer {
+ char fw_ver[10];
+ char build_date[15];
+ __le32 dl_len;
+} __packed;
+
+static int
+__mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb,
+ int cmd, int *wait_seq)
+{
+ int hdrlen = dev->mcu_running ? sizeof(struct mt7603_mcu_txd) : 12;
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt7603_mcu_txd *txd;
+ u8 seq;
+
+ seq = ++mdev->mcu.msg_seq & 0xf;
+ if (!seq)
+ seq = ++mdev->mcu.msg_seq & 0xf;
+
+ txd = (struct mt7603_mcu_txd *)skb_push(skb, hdrlen);
+
+ txd->len = cpu_to_le16(skb->len);
+ if (cmd == -MCU_CMD_FW_SCATTER)
+ txd->pq_id = cpu_to_le16(MCU_PORT_QUEUE_FW);
+ else
+ txd->pq_id = cpu_to_le16(MCU_PORT_QUEUE);
+ txd->pkt_type = MCU_PKT_ID;
+ txd->seq = seq;
+
+ if (cmd < 0) {
+ txd->cid = -cmd;
+ txd->set_query = MCU_Q_NA;
+ } else {
+ txd->cid = MCU_CMD_EXT_CID;
+ txd->ext_cid = cmd;
+ txd->set_query = MCU_Q_SET;
+ txd->ext_cid_ack = 1;
+ }
+
+ if (wait_seq)
+ *wait_seq = seq;
+
+ return mt76_tx_queue_skb_raw(dev, MT_TXQ_MCU, skb, 0);
+}
+
+static int
+mt7603_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
+ int len, bool wait_resp)
+{
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+ unsigned long expires = jiffies + 3 * HZ;
+ struct mt7603_mcu_rxd *rxd;
+ struct sk_buff *skb;
+ int ret, seq;
+
+ skb = mt76_mcu_msg_alloc(mdev, data, len);
+ if (!skb)
+ return -ENOMEM;
+
+ mutex_lock(&mdev->mcu.mutex);
+
+ ret = __mt7603_mcu_msg_send(dev, skb, cmd, &seq);
+ if (ret)
+ goto out;
+
+ while (wait_resp) {
+ bool check_seq = false;
+
+ skb = mt76_mcu_get_response(&dev->mt76, expires);
+ if (!skb) {
+ dev_err(mdev->dev,
+ "MCU message %d (seq %d) timed out\n",
+ cmd, seq);
+ dev->mcu_hang = MT7603_WATCHDOG_TIMEOUT;
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ rxd = (struct mt7603_mcu_rxd *)skb->data;
+ if (seq == rxd->seq)
+ check_seq = true;
+
+ dev_kfree_skb(skb);
+
+ if (check_seq)
+ break;
+ }
+
+out:
+ mutex_unlock(&mdev->mcu.mutex);
+
+ return ret;
+}
+
+static int
+mt7603_mcu_init_download(struct mt7603_dev *dev, u32 addr, u32 len)
+{
+ struct {
+ __le32 addr;
+ __le32 len;
+ __le32 mode;
+ } req = {
+ .addr = cpu_to_le32(addr),
+ .len = cpu_to_le32(len),
+ .mode = cpu_to_le32(BIT(31)),
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_TARGET_ADDRESS_LEN_REQ,
+ &req, sizeof(req), true);
+}
+
+static int
+mt7603_mcu_send_firmware(struct mt7603_dev *dev, const void *data, int len)
+{
+ int cur_len, ret = 0;
+
+ while (len > 0) {
+ cur_len = min_t(int, 4096 - sizeof(struct mt7603_mcu_txd),
+ len);
+
+ ret = __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_SCATTER,
+ data, cur_len, false);
+ if (ret)
+ break;
+
+ data += cur_len;
+ len -= cur_len;
+ }
+
+ return ret;
+}
+
+static int
+mt7603_mcu_start_firmware(struct mt7603_dev *dev, u32 addr)
+{
+ struct {
+ __le32 override;
+ __le32 addr;
+ } req = {
+ .override = cpu_to_le32(addr ? 1 : 0),
+ .addr = cpu_to_le32(addr),
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_START_REQ,
+ &req, sizeof(req), true);
+}
+
+static int
+mt7603_mcu_restart(struct mt76_dev *dev)
+{
+ return __mt76_mcu_send_msg(dev, -MCU_CMD_RESTART_DL_REQ,
+ NULL, 0, true);
+}
+
+static int mt7603_load_firmware(struct mt7603_dev *dev)
+{
+ const struct firmware *fw;
+ const struct mt7603_fw_trailer *hdr;
+ const char *firmware;
+ int dl_len;
+ u32 addr, val;
+ int ret;
+
+ if (is_mt7628(dev)) {
+ if (mt76xx_rev(dev) == MT7628_REV_E1)
+ firmware = MT7628_FIRMWARE_E1;
+ else
+ firmware = MT7628_FIRMWARE_E2;
+ } else {
+ if (mt76xx_rev(dev) < MT7603_REV_E2)
+ firmware = MT7603_FIRMWARE_E1;
+ else
+ firmware = MT7603_FIRMWARE_E2;
+ }
+
+ ret = request_firmware(&fw, firmware, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt7603_fw_trailer *)(fw->data + fw->size -
+ sizeof(*hdr));
+
+ dev_info(dev->mt76.dev, "Firmware Version: %.10s\n", hdr->fw_ver);
+ dev_info(dev->mt76.dev, "Build Time: %.15s\n", hdr->build_date);
+
+ addr = mt7603_reg_map(dev, 0x50012498);
+ mt76_wr(dev, addr, 0x5);
+ mt76_wr(dev, addr, 0x5);
+ udelay(1);
+
+ /* switch to bypass mode */
+ mt76_rmw(dev, MT_SCH_4, MT_SCH_4_FORCE_QID,
+ MT_SCH_4_BYPASS | FIELD_PREP(MT_SCH_4_FORCE_QID, 5));
+
+ val = mt76_rr(dev, MT_TOP_MISC2);
+ if (val & BIT(1)) {
+ dev_info(dev->mt76.dev, "Firmware already running...\n");
+ goto running;
+ }
+
+ if (!mt76_poll_msec(dev, MT_TOP_MISC2, BIT(0) | BIT(1), BIT(0), 500)) {
+ dev_err(dev->mt76.dev, "Timeout waiting for ROM code to become ready\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ dl_len = le32_to_cpu(hdr->dl_len) + 4;
+ ret = mt7603_mcu_init_download(dev, MCU_FIRMWARE_ADDRESS, dl_len);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Download request failed\n");
+ goto out;
+ }
+
+ ret = mt7603_mcu_send_firmware(dev, fw->data, dl_len);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to send firmware to device\n");
+ goto out;
+ }
+
+ ret = mt7603_mcu_start_firmware(dev, MCU_FIRMWARE_ADDRESS);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to start firmware\n");
+ goto out;
+ }
+
+ if (!mt76_poll_msec(dev, MT_TOP_MISC2, BIT(1), BIT(1), 500)) {
+ dev_err(dev->mt76.dev, "Timeout waiting for firmware to initialize\n");
+ ret = -EIO;
+ goto out;
+ }
+
+running:
+ mt76_clear(dev, MT_SCH_4, MT_SCH_4_FORCE_QID | MT_SCH_4_BYPASS);
+
+ mt76_set(dev, MT_SCH_4, BIT(8));
+ mt76_clear(dev, MT_SCH_4, BIT(8));
+
+ dev->mcu_running = true;
+ snprintf(dev->mt76.hw->wiphy->fw_version,
+ sizeof(dev->mt76.hw->wiphy->fw_version),
+ "%.10s-%.15s", hdr->fw_ver, hdr->build_date);
+ dev_info(dev->mt76.dev, "firmware init done\n");
+
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+int mt7603_mcu_init(struct mt7603_dev *dev)
+{
+ static const struct mt76_mcu_ops mt7603_mcu_ops = {
+ .headroom = sizeof(struct mt7603_mcu_txd),
+ .mcu_send_msg = mt7603_mcu_msg_send,
+ .mcu_restart = mt7603_mcu_restart,
+ };
+
+ dev->mt76.mcu_ops = &mt7603_mcu_ops;
+ return mt7603_load_firmware(dev);
+}
+
+void mt7603_mcu_exit(struct mt7603_dev *dev)
+{
+ __mt76_mcu_restart(&dev->mt76);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
+}
+
+int mt7603_mcu_set_eeprom(struct mt7603_dev *dev)
+{
+ static const u16 req_fields[] = {
+#define WORD(_start) \
+ _start, \
+ _start + 1
+#define GROUP_2G(_start) \
+ WORD(_start), \
+ WORD(_start + 2), \
+ WORD(_start + 4)
+
+ MT_EE_NIC_CONF_0 + 1,
+ WORD(MT_EE_NIC_CONF_1),
+ MT_EE_WIFI_RF_SETTING,
+ MT_EE_TX_POWER_DELTA_BW40,
+ MT_EE_TX_POWER_DELTA_BW80 + 1,
+ MT_EE_TX_POWER_EXT_PA_5G,
+ MT_EE_TEMP_SENSOR_CAL,
+ GROUP_2G(MT_EE_TX_POWER_0_START_2G),
+ GROUP_2G(MT_EE_TX_POWER_1_START_2G),
+ WORD(MT_EE_TX_POWER_CCK),
+ WORD(MT_EE_TX_POWER_OFDM_2G_6M),
+ WORD(MT_EE_TX_POWER_OFDM_2G_24M),
+ WORD(MT_EE_TX_POWER_OFDM_2G_54M),
+ WORD(MT_EE_TX_POWER_HT_BPSK_QPSK),
+ WORD(MT_EE_TX_POWER_HT_16_64_QAM),
+ WORD(MT_EE_TX_POWER_HT_64_QAM),
+ MT_EE_ELAN_RX_MODE_GAIN,
+ MT_EE_ELAN_RX_MODE_NF,
+ MT_EE_ELAN_RX_MODE_P1DB,
+ MT_EE_ELAN_BYPASS_MODE_GAIN,
+ MT_EE_ELAN_BYPASS_MODE_NF,
+ MT_EE_ELAN_BYPASS_MODE_P1DB,
+ WORD(MT_EE_STEP_NUM_NEG_6_7),
+ WORD(MT_EE_STEP_NUM_NEG_4_5),
+ WORD(MT_EE_STEP_NUM_NEG_2_3),
+ WORD(MT_EE_STEP_NUM_NEG_0_1),
+ WORD(MT_EE_REF_STEP_24G),
+ WORD(MT_EE_STEP_NUM_PLUS_1_2),
+ WORD(MT_EE_STEP_NUM_PLUS_3_4),
+ WORD(MT_EE_STEP_NUM_PLUS_5_6),
+ MT_EE_STEP_NUM_PLUS_7,
+ MT_EE_XTAL_FREQ_OFFSET,
+ MT_EE_XTAL_TRIM_2_COMP,
+ MT_EE_XTAL_TRIM_3_COMP,
+ MT_EE_XTAL_WF_RFCAL,
+
+ /* unknown fields below */
+ WORD(0x24),
+ 0x34,
+ 0x39,
+ 0x3b,
+ WORD(0x42),
+ WORD(0x9e),
+ 0xf2,
+ WORD(0xf8),
+ 0xfa,
+ 0x12e,
+ WORD(0x130), WORD(0x132), WORD(0x134), WORD(0x136),
+ WORD(0x138), WORD(0x13a), WORD(0x13c), WORD(0x13e),
+
+#undef GROUP_2G
+#undef WORD
+
+ };
+ struct req_data {
+ __le16 addr;
+ u8 val;
+ u8 pad;
+ } __packed;
+ struct {
+ u8 buffer_mode;
+ u8 len;
+ u8 pad[2];
+ } req_hdr = {
+ .buffer_mode = 1,
+ .len = ARRAY_SIZE(req_fields) - 1,
+ };
+ const int size = 0xff * sizeof(struct req_data);
+ u8 *req, *eep = (u8 *)dev->mt76.eeprom.data;
+ int i, ret, len = sizeof(req_hdr) + size;
+ struct req_data *data;
+
+ BUILD_BUG_ON(ARRAY_SIZE(req_fields) * sizeof(*data) > size);
+
+ req = kmalloc(len, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ memcpy(req, &req_hdr, sizeof(req_hdr));
+ data = (struct req_data *)(req + sizeof(req_hdr));
+ memset(data, 0, size);
+ for (i = 0; i < ARRAY_SIZE(req_fields); i++) {
+ data[i].addr = cpu_to_le16(req_fields[i]);
+ data[i].val = eep[req_fields[i]];
+ }
+
+ ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
+ req, len, true);
+ kfree(req);
+
+ return ret;
+}
+
+static int mt7603_mcu_set_tx_power(struct mt7603_dev *dev)
+{
+ struct {
+ u8 center_channel;
+ u8 tssi;
+ u8 temp_comp;
+ u8 target_power[2];
+ u8 rate_power_delta[14];
+ u8 bw_power_delta;
+ u8 ch_power_delta[6];
+ u8 temp_comp_power[17];
+ u8 reserved;
+ } req = {
+ .center_channel = dev->mphy.chandef.chan->hw_value,
+#define EEP_VAL(n) ((u8 *)dev->mt76.eeprom.data)[n]
+ .tssi = EEP_VAL(MT_EE_NIC_CONF_1 + 1),
+ .temp_comp = EEP_VAL(MT_EE_NIC_CONF_1),
+ .target_power = {
+ EEP_VAL(MT_EE_TX_POWER_0_START_2G + 2),
+ EEP_VAL(MT_EE_TX_POWER_1_START_2G + 2)
+ },
+ .bw_power_delta = EEP_VAL(MT_EE_TX_POWER_DELTA_BW40),
+ .ch_power_delta = {
+ EEP_VAL(MT_EE_TX_POWER_0_START_2G + 3),
+ EEP_VAL(MT_EE_TX_POWER_0_START_2G + 4),
+ EEP_VAL(MT_EE_TX_POWER_0_START_2G + 5),
+ EEP_VAL(MT_EE_TX_POWER_1_START_2G + 3),
+ EEP_VAL(MT_EE_TX_POWER_1_START_2G + 4),
+ EEP_VAL(MT_EE_TX_POWER_1_START_2G + 5)
+ },
+#undef EEP_VAL
+ };
+ u8 *eep = (u8 *)dev->mt76.eeprom.data;
+
+ memcpy(req.rate_power_delta, eep + MT_EE_TX_POWER_CCK,
+ sizeof(req.rate_power_delta));
+
+ memcpy(req.temp_comp_power, eep + MT_EE_STEP_NUM_NEG_6_7,
+ sizeof(req.temp_comp_power));
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_TX_POWER_CTRL,
+ &req, sizeof(req), true);
+}
+
+int mt7603_mcu_set_channel(struct mt7603_dev *dev)
+{
+ struct cfg80211_chan_def *chandef = &dev->mphy.chandef;
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ int n_chains = hweight8(dev->mphy.antenna_mask);
+ struct {
+ u8 control_chan;
+ u8 center_chan;
+ u8 bw;
+ u8 tx_streams;
+ u8 rx_streams;
+ u8 _res0[7];
+ u8 txpower[21];
+ u8 _res1[3];
+ } req = {
+ .control_chan = chandef->chan->hw_value,
+ .center_chan = chandef->chan->hw_value,
+ .bw = MT_BW_20,
+ .tx_streams = n_chains,
+ .rx_streams = n_chains,
+ };
+ s8 tx_power;
+ int i, ret;
+
+ if (dev->mphy.chandef.width == NL80211_CHAN_WIDTH_40) {
+ req.bw = MT_BW_40;
+ if (chandef->center_freq1 > chandef->chan->center_freq)
+ req.center_chan += 2;
+ else
+ req.center_chan -= 2;
+ }
+
+ tx_power = hw->conf.power_level * 2;
+ if (dev->mphy.antenna_mask == 3)
+ tx_power -= 6;
+ tx_power = min(tx_power, dev->tx_power_limit);
+
+ dev->mphy.txpower_cur = tx_power;
+
+ for (i = 0; i < ARRAY_SIZE(req.txpower); i++)
+ req.txpower[i] = tx_power;
+
+ ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_CHANNEL_SWITCH,
+ &req, sizeof(req), true);
+ if (ret)
+ return ret;
+
+ return mt7603_mcu_set_tx_power(dev);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.h
new file mode 100644
index 000000000..30df8a3fd
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: ISC */
+
+#ifndef __MT7603_MCU_H
+#define __MT7603_MCU_H
+
+struct mt7603_mcu_txd {
+ __le16 len;
+ __le16 pq_id;
+
+ u8 cid;
+ u8 pkt_type;
+ u8 set_query;
+ u8 seq;
+
+ u8 uc_d2b0_rev;
+ u8 ext_cid;
+ u8 uc_d2b2_rev;
+ u8 ext_cid_ack;
+
+ u32 au4_d3_to_d7_rev[5];
+} __packed __aligned(4);
+
+struct mt7603_mcu_rxd {
+ __le16 len;
+ __le16 pkt_type_id;
+
+ u8 eid;
+ u8 seq;
+ __le16 __rsv;
+
+ u8 ext_eid;
+ u8 __rsv1[3];
+};
+
+#define MCU_PKT_ID 0xa0
+#define MCU_PORT_QUEUE 0x8000
+#define MCU_PORT_QUEUE_FW 0xc000
+
+#define MCU_FIRMWARE_ADDRESS 0x100000
+
+enum {
+ MCU_Q_QUERY,
+ MCU_Q_SET,
+ MCU_Q_RESERVED,
+ MCU_Q_NA
+};
+
+enum {
+ MCU_CMD_TARGET_ADDRESS_LEN_REQ = 0x01,
+ MCU_CMD_FW_START_REQ = 0x02,
+ MCU_CMD_INIT_ACCESS_REG = 0x3,
+ MCU_CMD_PATCH_START_REQ = 0x05,
+ MCU_CMD_PATCH_FINISH_REQ = 0x07,
+ MCU_CMD_PATCH_SEM_CONTROL = 0x10,
+ MCU_CMD_HIF_LOOPBACK = 0x20,
+ MCU_CMD_CH_PRIVILEGE = 0x20,
+ MCU_CMD_ACCESS_REG = 0xC2,
+ MCU_CMD_EXT_CID = 0xED,
+ MCU_CMD_FW_SCATTER = 0xEE,
+ MCU_CMD_RESTART_DL_REQ = 0xEF,
+};
+
+enum {
+ MCU_EXT_CMD_RF_REG_ACCESS = 0x02,
+ MCU_EXT_CMD_RF_TEST = 0x04,
+ MCU_EXT_CMD_RADIO_ON_OFF_CTRL = 0x05,
+ MCU_EXT_CMD_WIFI_RX_DISABLE = 0x06,
+ MCU_EXT_CMD_PM_STATE_CTRL = 0x07,
+ MCU_EXT_CMD_CHANNEL_SWITCH = 0x08,
+ MCU_EXT_CMD_NIC_CAPABILITY = 0x09,
+ MCU_EXT_CMD_PWR_SAVING = 0x0A,
+ MCU_EXT_CMD_MULTIPLE_REG_ACCESS = 0x0E,
+ MCU_EXT_CMD_AP_PWR_SAVING_CAPABILITY = 0xF,
+ MCU_EXT_CMD_SEC_ADDREMOVE_KEY = 0x10,
+ MCU_EXT_CMD_SET_TX_POWER_CTRL = 0x11,
+ MCU_EXT_CMD_FW_LOG_2_HOST = 0x13,
+ MCU_EXT_CMD_PS_RETRIEVE_START = 0x14,
+ MCU_EXT_CMD_LED_CTRL = 0x17,
+ MCU_EXT_CMD_PACKET_FILTER = 0x18,
+ MCU_EXT_CMD_PWR_MGT_BIT_WIFI = 0x1B,
+ MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21,
+ MCU_EXT_CMD_THERMAL_PROTECT = 0x23,
+ MCU_EXT_CMD_EDCA_SET = 0x27,
+ MCU_EXT_CMD_SLOT_TIME_SET = 0x28,
+ MCU_EXT_CMD_CONFIG_INTERNAL_SETTING = 0x29,
+ MCU_EXT_CMD_NOA_OFFLOAD_CTRL = 0x2B,
+ MCU_EXT_CMD_GET_THEMAL_SENSOR = 0x2C,
+ MCU_EXT_CMD_WAKEUP_OPTION = 0x2E,
+ MCU_EXT_CMD_AC_QUEUE_CONTROL = 0x31,
+ MCU_EXT_CMD_BCN_UPDATE = 0x33
+};
+
+enum {
+ MCU_EXT_EVENT_CMD_RESULT = 0x0,
+ MCU_EXT_EVENT_RF_REG_ACCESS = 0x2,
+ MCU_EXT_EVENT_MULTI_CR_ACCESS = 0x0E,
+ MCU_EXT_EVENT_FW_LOG_2_HOST = 0x13,
+ MCU_EXT_EVENT_BEACON_LOSS = 0x1A,
+ MCU_EXT_EVENT_THERMAL_PROTECT = 0x22,
+ MCU_EXT_EVENT_BCN_UPDATE = 0x31,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
new file mode 100644
index 000000000..2a6e4332a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
@@ -0,0 +1,267 @@
+/* SPDX-License-Identifier: ISC */
+
+#ifndef __MT7603_H
+#define __MT7603_H
+
+#include <linux/interrupt.h>
+#include <linux/ktime.h>
+#include "../mt76.h"
+#include "regs.h"
+
+#define MT7603_MAX_INTERFACES 4
+#define MT7603_WTBL_SIZE 128
+#define MT7603_WTBL_RESERVED (MT7603_WTBL_SIZE - 1)
+#define MT7603_WTBL_STA (MT7603_WTBL_RESERVED - MT7603_MAX_INTERFACES)
+
+#define MT7603_RATE_RETRY 2
+
+#define MT7603_MCU_RX_RING_SIZE 64
+#define MT7603_RX_RING_SIZE 128
+#define MT7603_TX_RING_SIZE 256
+#define MT7603_PSD_RING_SIZE 128
+
+#define MT7603_FIRMWARE_E1 "mt7603_e1.bin"
+#define MT7603_FIRMWARE_E2 "mt7603_e2.bin"
+#define MT7628_FIRMWARE_E1 "mt7628_e1.bin"
+#define MT7628_FIRMWARE_E2 "mt7628_e2.bin"
+
+#define MT7603_EEPROM_SIZE 1024
+
+#define MT_AGG_SIZE_LIMIT(_n) (((_n) + 1) * 4)
+
+#define MT7603_PRE_TBTT_TIME 5000 /* ms */
+
+#define MT7603_WATCHDOG_TIME 100 /* ms */
+#define MT7603_WATCHDOG_TIMEOUT 10 /* number of checks */
+
+#define MT7603_EDCCA_BLOCK_TH 10
+
+#define MT7603_CFEND_RATE_DEFAULT 0x69 /* chip default (24M) */
+#define MT7603_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
+
+struct mt7603_vif;
+struct mt7603_sta;
+
+enum {
+ MT7603_REV_E1 = 0x00,
+ MT7603_REV_E2 = 0x10,
+ MT7628_REV_E1 = 0x8a00,
+};
+
+enum mt7603_bw {
+ MT_BW_20,
+ MT_BW_40,
+ MT_BW_80,
+};
+
+struct mt7603_rate_set {
+ struct ieee80211_tx_rate probe_rate;
+ struct ieee80211_tx_rate rates[4];
+};
+
+struct mt7603_sta {
+ struct mt76_wcid wcid; /* must be first */
+
+ struct mt7603_vif *vif;
+
+ struct list_head poll_list;
+ u32 tx_airtime_ac[4];
+
+ struct sk_buff_head psq;
+
+ struct ieee80211_tx_rate rates[4];
+
+ struct mt7603_rate_set rateset[2];
+ u32 rate_set_tsf;
+
+ u8 rate_count;
+ u8 n_rates;
+
+ u8 rate_probe;
+ u8 smps;
+
+ u8 ps;
+};
+
+struct mt7603_vif {
+ struct mt7603_sta sta; /* must be first */
+
+ u8 idx;
+};
+
+enum mt7603_reset_cause {
+ RESET_CAUSE_TX_HANG,
+ RESET_CAUSE_TX_BUSY,
+ RESET_CAUSE_RX_BUSY,
+ RESET_CAUSE_BEACON_STUCK,
+ RESET_CAUSE_RX_PSE_BUSY,
+ RESET_CAUSE_MCU_HANG,
+ RESET_CAUSE_RESET_FAILED,
+ __RESET_CAUSE_MAX
+};
+
+struct mt7603_dev {
+ union { /* must be first */
+ struct mt76_dev mt76;
+ struct mt76_phy mphy;
+ };
+
+ const struct mt76_bus_ops *bus_ops;
+
+ u32 rxfilter;
+
+ struct list_head sta_poll_list;
+ spinlock_t sta_poll_lock;
+
+ struct mt7603_sta global_sta;
+
+ u32 agc0, agc3;
+ u32 false_cca_ofdm, false_cca_cck;
+ unsigned long last_cca_adj;
+
+ u32 ampdu_ref;
+ __le32 rx_ampdu_ts;
+ u8 rssi_offset[3];
+
+ u8 slottime;
+ s16 coverage_class;
+
+ s8 tx_power_limit;
+
+ ktime_t ed_time;
+
+ spinlock_t ps_lock;
+
+ u8 mac_work_count;
+
+ u8 mcu_running;
+
+ u8 ed_monitor_enabled;
+ u8 ed_monitor;
+ s8 ed_trigger;
+ u8 ed_strict_mode;
+ u8 ed_strong_signal;
+
+ bool dynamic_sensitivity;
+ s8 sensitivity;
+ u8 sensitivity_limit;
+
+ u8 beacon_check;
+ u8 tx_hang_check;
+ u8 tx_dma_check;
+ u8 rx_dma_check;
+ u8 rx_pse_check;
+ u8 mcu_hang;
+
+ enum mt7603_reset_cause cur_reset_cause;
+
+ u16 tx_dma_idx[4];
+ u16 rx_dma_idx;
+
+ u32 reset_test;
+
+ unsigned int reset_cause[__RESET_CAUSE_MAX];
+};
+
+extern const struct mt76_driver_ops mt7603_drv_ops;
+extern const struct ieee80211_ops mt7603_ops;
+extern struct pci_driver mt7603_pci_driver;
+extern struct platform_driver mt76_wmac_driver;
+
+static inline bool is_mt7603(struct mt7603_dev *dev)
+{
+ return mt76xx_chip(dev) == 0x7603;
+}
+
+static inline bool is_mt7628(struct mt7603_dev *dev)
+{
+ return mt76xx_chip(dev) == 0x7628;
+}
+
+/* need offset to prevent conflict with ampdu_ack_len */
+#define MT_RATE_DRIVER_DATA_OFFSET 4
+
+u32 mt7603_reg_map(struct mt7603_dev *dev, u32 addr);
+
+irqreturn_t mt7603_irq_handler(int irq, void *dev_instance);
+
+int mt7603_register_device(struct mt7603_dev *dev);
+void mt7603_unregister_device(struct mt7603_dev *dev);
+int mt7603_eeprom_init(struct mt7603_dev *dev);
+int mt7603_dma_init(struct mt7603_dev *dev);
+void mt7603_dma_cleanup(struct mt7603_dev *dev);
+int mt7603_mcu_init(struct mt7603_dev *dev);
+void mt7603_init_debugfs(struct mt7603_dev *dev);
+
+static inline void mt7603_irq_enable(struct mt7603_dev *dev, u32 mask)
+{
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask);
+}
+
+static inline void mt7603_irq_disable(struct mt7603_dev *dev, u32 mask)
+{
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
+}
+
+void mt7603_mac_reset_counters(struct mt7603_dev *dev);
+void mt7603_mac_dma_start(struct mt7603_dev *dev);
+void mt7603_mac_start(struct mt7603_dev *dev);
+void mt7603_mac_stop(struct mt7603_dev *dev);
+void mt7603_mac_work(struct work_struct *work);
+void mt7603_mac_set_timing(struct mt7603_dev *dev);
+void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval);
+int mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb);
+void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data);
+void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid);
+void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
+ int ba_size);
+void mt7603_mac_sta_poll(struct mt7603_dev *dev);
+
+void mt7603_pse_client_reset(struct mt7603_dev *dev);
+
+int mt7603_mcu_set_channel(struct mt7603_dev *dev);
+int mt7603_mcu_set_eeprom(struct mt7603_dev *dev);
+void mt7603_mcu_exit(struct mt7603_dev *dev);
+
+void mt7603_wtbl_init(struct mt7603_dev *dev, int idx, int vif,
+ const u8 *mac_addr);
+void mt7603_wtbl_clear(struct mt7603_dev *dev, int idx);
+void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta);
+void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
+ struct ieee80211_tx_rate *probe_rate,
+ struct ieee80211_tx_rate *rates);
+int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid,
+ struct ieee80211_key_conf *key);
+void mt7603_wtbl_set_ps(struct mt7603_dev *dev, struct mt7603_sta *sta,
+ bool enabled);
+void mt7603_wtbl_set_smps(struct mt7603_dev *dev, struct mt7603_sta *sta,
+ bool enabled);
+void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort);
+
+int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info);
+
+void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
+
+void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb);
+void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
+void mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
+int mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt7603_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
+void mt7603_pre_tbtt_tasklet(unsigned long arg);
+
+void mt7603_update_channel(struct mt76_dev *mdev);
+
+void mt7603_edcca_set_strict(struct mt7603_dev *dev, bool val);
+void mt7603_cca_stats_reset(struct mt7603_dev *dev);
+
+void mt7603_init_edcca(struct mt7603_dev *dev);
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/pci.c b/drivers/net/wireless/mediatek/mt76/mt7603/pci.c
new file mode 100644
index 000000000..06fa28f64
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/pci.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: ISC
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mt7603.h"
+
+static const struct pci_device_id mt76pci_device_table[] = {
+ { PCI_DEVICE(0x14c3, 0x7603) },
+ { },
+};
+
+static int
+mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct mt7603_dev *dev;
+ struct mt76_dev *mdev;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops,
+ &mt7603_drv_ops);
+ if (!mdev)
+ return -ENOMEM;
+
+ dev = container_of(mdev, struct mt7603_dev, mt76);
+ mt76_mmio_init(mdev, pcim_iomap_table(pdev)[0]);
+
+ mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
+ (mt76_rr(dev, MT_HW_REV) & 0xff);
+ dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
+ ret = devm_request_irq(mdev->dev, pdev->irq, mt7603_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+ goto error;
+
+ ret = mt7603_register_device(dev);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ mt76_free_device(&dev->mt76);
+
+ return ret;
+}
+
+static void
+mt76pci_remove(struct pci_dev *pdev)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+
+ mt7603_unregister_device(dev);
+}
+
+MODULE_DEVICE_TABLE(pci, mt76pci_device_table);
+MODULE_FIRMWARE(MT7603_FIRMWARE_E1);
+MODULE_FIRMWARE(MT7603_FIRMWARE_E2);
+
+struct pci_driver mt7603_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt76pci_device_table,
+ .probe = mt76pci_probe,
+ .remove = mt76pci_remove,
+};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
new file mode 100644
index 000000000..6741e6907
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
@@ -0,0 +1,780 @@
+/* SPDX-License-Identifier: ISC */
+
+#ifndef __MT7603_REGS_H
+#define __MT7603_REGS_H
+
+#define MT_HW_REV 0x1000
+#define MT_HW_CHIPID 0x1008
+#define MT_TOP_MISC2 0x1134
+
+#define MT_MCU_BASE 0x2000
+#define MT_MCU(ofs) (MT_MCU_BASE + (ofs))
+
+#define MT_MCU_PCIE_REMAP_1 MT_MCU(0x500)
+#define MT_MCU_PCIE_REMAP_1_OFFSET GENMASK(17, 0)
+#define MT_MCU_PCIE_REMAP_1_BASE GENMASK(31, 18)
+
+#define MT_MCU_PCIE_REMAP_2 MT_MCU(0x504)
+#define MT_MCU_PCIE_REMAP_2_OFFSET GENMASK(18, 0)
+#define MT_MCU_PCIE_REMAP_2_BASE GENMASK(31, 19)
+
+#define MT_HIF_BASE 0x4000
+#define MT_HIF(ofs) (MT_HIF_BASE + (ofs))
+
+#define MT_INT_SOURCE_CSR MT_HIF(0x200)
+#define MT_INT_MASK_CSR MT_HIF(0x204)
+#define MT_DELAY_INT_CFG MT_HIF(0x210)
+
+#define MT_INT_RX_DONE(_n) BIT(_n)
+#define MT_INT_RX_DONE_ALL GENMASK(1, 0)
+#define MT_INT_TX_DONE_ALL GENMASK(19, 4)
+#define MT_INT_TX_DONE(_n) BIT((_n) + 4)
+
+#define MT_INT_RX_COHERENT BIT(20)
+#define MT_INT_TX_COHERENT BIT(21)
+#define MT_INT_MAC_IRQ3 BIT(27)
+
+#define MT_INT_MCU_CMD BIT(30)
+
+#define MT_WPDMA_GLO_CFG MT_HIF(0x208)
+#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0)
+#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
+#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2)
+#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
+#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4)
+#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6)
+#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
+#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN GENMASK(15, 8)
+#define MT_WPDMA_GLO_CFG_SW_RESET BIT(24)
+#define MT_WPDMA_GLO_CFG_FORCE_TX_EOF BIT(25)
+#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS BIT(30)
+#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
+
+#define MT_WPDMA_RST_IDX MT_HIF(0x20c)
+
+#define MT_WPDMA_DEBUG MT_HIF(0x244)
+#define MT_WPDMA_DEBUG_VALUE GENMASK(17, 0)
+#define MT_WPDMA_DEBUG_SEL BIT(27)
+#define MT_WPDMA_DEBUG_IDX GENMASK(31, 28)
+
+#define MT_TX_RING_BASE MT_HIF(0x300)
+#define MT_RX_RING_BASE MT_HIF(0x400)
+
+#define MT_TXTIME_THRESH_BASE MT_HIF(0x500)
+#define MT_TXTIME_THRESH(n) (MT_TXTIME_THRESH_BASE + ((n) * 4))
+
+#define MT_PAGE_COUNT_BASE MT_HIF(0x540)
+#define MT_PAGE_COUNT(n) (MT_PAGE_COUNT_BASE + ((n) * 4))
+
+#define MT_SCH_1 MT_HIF(0x588)
+#define MT_SCH_2 MT_HIF(0x58c)
+#define MT_SCH_3 MT_HIF(0x590)
+
+#define MT_SCH_4 MT_HIF(0x594)
+#define MT_SCH_4_FORCE_QID GENMASK(4, 0)
+#define MT_SCH_4_BYPASS BIT(5)
+#define MT_SCH_4_RESET BIT(8)
+
+#define MT_GROUP_THRESH_BASE MT_HIF(0x598)
+#define MT_GROUP_THRESH(n) (MT_GROUP_THRESH_BASE + ((n) * 4))
+
+#define MT_QUEUE_PRIORITY_1 MT_HIF(0x580)
+#define MT_QUEUE_PRIORITY_2 MT_HIF(0x584)
+
+#define MT_BMAP_0 MT_HIF(0x5b0)
+#define MT_BMAP_1 MT_HIF(0x5b4)
+#define MT_BMAP_2 MT_HIF(0x5b8)
+
+#define MT_HIGH_PRIORITY_1 MT_HIF(0x5bc)
+#define MT_HIGH_PRIORITY_2 MT_HIF(0x5c0)
+
+#define MT_PRIORITY_MASK MT_HIF(0x5c4)
+
+#define MT_RSV_MAX_THRESH MT_HIF(0x5c8)
+
+#define MT_PSE_BASE 0x8000
+#define MT_PSE(ofs) (MT_PSE_BASE + (ofs))
+
+#define MT_MCU_DEBUG_RESET MT_PSE(0x16c)
+#define MT_MCU_DEBUG_RESET_PSE BIT(0)
+#define MT_MCU_DEBUG_RESET_PSE_S BIT(1)
+#define MT_MCU_DEBUG_RESET_QUEUES GENMASK(6, 2)
+
+#define MT_PSE_FC_P0 MT_PSE(0x120)
+#define MT_PSE_FC_P0_MIN_RESERVE GENMASK(11, 0)
+#define MT_PSE_FC_P0_MAX_QUOTA GENMASK(27, 16)
+
+#define MT_PSE_FRP MT_PSE(0x138)
+#define MT_PSE_FRP_P0 GENMASK(2, 0)
+#define MT_PSE_FRP_P1 GENMASK(5, 3)
+#define MT_PSE_FRP_P2_RQ0 GENMASK(8, 6)
+#define MT_PSE_FRP_P2_RQ1 GENMASK(11, 9)
+#define MT_PSE_FRP_P2_RQ2 GENMASK(14, 12)
+
+#define MT_FC_RSV_COUNT_0 MT_PSE(0x13c)
+#define MT_FC_RSV_COUNT_0_P0 GENMASK(11, 0)
+#define MT_FC_RSV_COUNT_0_P1 GENMASK(27, 16)
+
+#define MT_FC_SP2_Q0Q1 MT_PSE(0x14c)
+#define MT_FC_SP2_Q0Q1_SRC_COUNT_Q0 GENMASK(11, 0)
+#define MT_FC_SP2_Q0Q1_SRC_COUNT_Q1 GENMASK(27, 16)
+
+#define MT_PSE_FW_SHARED MT_PSE(0x17c)
+
+#define MT_PSE_RTA MT_PSE(0x194)
+#define MT_PSE_RTA_QUEUE_ID GENMASK(4, 0)
+#define MT_PSE_RTA_PORT_ID GENMASK(6, 5)
+#define MT_PSE_RTA_REDIRECT_EN BIT(7)
+#define MT_PSE_RTA_TAG_ID GENMASK(15, 8)
+#define MT_PSE_RTA_WRITE BIT(16)
+#define MT_PSE_RTA_BUSY BIT(31)
+
+#define MT_WF_PHY_BASE 0x10000
+#define MT_WF_PHY_OFFSET 0x1000
+#define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs))
+
+#define MT_AGC_BASE MT_WF_PHY(0x500)
+#define MT_AGC(n) (MT_AGC_BASE + ((n) * 4))
+
+#define MT_AGC1_BASE MT_WF_PHY(0x1500)
+#define MT_AGC1(n) (MT_AGC1_BASE + ((n) * 4))
+
+#define MT_AGC_41_RSSI_0 GENMASK(23, 16)
+#define MT_AGC_41_RSSI_1 GENMASK(7, 0)
+
+#define MT_RXTD_BASE MT_WF_PHY(0x600)
+#define MT_RXTD(n) (MT_RXTD_BASE + ((n) * 4))
+
+#define MT_RXTD_6_ACI_TH GENMASK(4, 0)
+#define MT_RXTD_6_CCAED_TH GENMASK(14, 8)
+
+#define MT_RXTD_8_LOWER_SIGNAL GENMASK(5, 0)
+
+#define MT_RXTD_13_ACI_TH_EN BIT(0)
+
+#define MT_WF_PHY_CR_TSSI_BASE MT_WF_PHY(0xd00)
+#define MT_WF_PHY_CR_TSSI(phy, n) (MT_WF_PHY_CR_TSSI_BASE + \
+ ((phy) * MT_WF_PHY_OFFSET) + \
+ ((n) * 4))
+
+#define MT_PHYCTRL_BASE MT_WF_PHY(0x4100)
+#define MT_PHYCTRL(n) (MT_PHYCTRL_BASE + ((n) * 4))
+
+#define MT_PHYCTRL_2_STATUS_RESET BIT(6)
+#define MT_PHYCTRL_2_STATUS_EN BIT(7)
+
+#define MT_PHYCTRL_STAT_PD MT_PHYCTRL(3)
+#define MT_PHYCTRL_STAT_PD_OFDM GENMASK(31, 16)
+#define MT_PHYCTRL_STAT_PD_CCK GENMASK(15, 0)
+
+#define MT_PHYCTRL_STAT_MDRDY MT_PHYCTRL(8)
+#define MT_PHYCTRL_STAT_MDRDY_OFDM GENMASK(31, 16)
+#define MT_PHYCTRL_STAT_MDRDY_CCK GENMASK(15, 0)
+
+#define MT_WF_AGG_BASE 0x21200
+#define MT_WF_AGG(ofs) (MT_WF_AGG_BASE + (ofs))
+
+#define MT_AGG_ARCR MT_WF_AGG(0x010)
+#define MT_AGG_ARCR_INIT_RATE1 BIT(0)
+#define MT_AGG_ARCR_FB_SGI_DISABLE BIT(1)
+#define MT_AGG_ARCR_RATE8_DOWN_WRAP BIT(2)
+#define MT_AGG_ARCR_RTS_RATE_THR GENMASK(12, 8)
+#define MT_AGG_ARCR_RATE_DOWN_RATIO GENMASK(17, 16)
+#define MT_AGG_ARCR_RATE_DOWN_RATIO_EN BIT(19)
+#define MT_AGG_ARCR_RATE_UP_EXTRA_TH GENMASK(22, 20)
+#define MT_AGG_ARCR_SPE_DIS_TH GENMASK(27, 24)
+
+#define MT_AGG_ARUCR MT_WF_AGG(0x014)
+#define MT_AGG_ARDCR MT_WF_AGG(0x018)
+#define MT_AGG_ARxCR_LIMIT_SHIFT(_n) (4 * (_n))
+#define MT_AGG_ARxCR_LIMIT(_n) GENMASK(2 + \
+ MT_AGG_ARxCR_LIMIT_SHIFT(_n), \
+ MT_AGG_ARxCR_LIMIT_SHIFT(_n))
+
+#define MT_AGG_LIMIT MT_WF_AGG(0x040)
+#define MT_AGG_LIMIT_1 MT_WF_AGG(0x044)
+#define MT_AGG_LIMIT_AC(_n) GENMASK(((_n) + 1) * 8 - 1, (_n) * 8)
+
+#define MT_AGG_BA_SIZE_LIMIT_0 MT_WF_AGG(0x048)
+#define MT_AGG_BA_SIZE_LIMIT_1 MT_WF_AGG(0x04c)
+#define MT_AGG_BA_SIZE_LIMIT_SHIFT 8
+
+#define MT_AGG_PCR MT_WF_AGG(0x050)
+#define MT_AGG_PCR_MM BIT(16)
+#define MT_AGG_PCR_GF BIT(17)
+#define MT_AGG_PCR_BW40 BIT(18)
+#define MT_AGG_PCR_RIFS BIT(19)
+#define MT_AGG_PCR_BW80 BIT(20)
+#define MT_AGG_PCR_BW160 BIT(21)
+#define MT_AGG_PCR_ERP BIT(22)
+
+#define MT_AGG_PCR_RTS MT_WF_AGG(0x054)
+#define MT_AGG_PCR_RTS_THR GENMASK(19, 0)
+#define MT_AGG_PCR_RTS_PKT_THR GENMASK(31, 25)
+
+#define MT_AGG_ASRCR MT_WF_AGG(0x060)
+#define MT_AGG_ASRCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(5, 0))
+
+#define MT_AGG_CONTROL MT_WF_AGG(0x070)
+#define MT_AGG_CONTROL_NO_BA_RULE BIT(0)
+#define MT_AGG_CONTROL_NO_BA_AR_RULE BIT(1)
+#define MT_AGG_CONTROL_CFEND_SPE_EN BIT(3)
+#define MT_AGG_CONTROL_CFEND_RATE GENMASK(15, 4)
+#define MT_AGG_CONTROL_BAR_SPE_EN BIT(19)
+#define MT_AGG_CONTROL_BAR_RATE GENMASK(31, 20)
+
+#define MT_AGG_TMP MT_WF_AGG(0x0d8)
+
+#define MT_AGG_BWCR MT_WF_AGG(0x0ec)
+#define MT_AGG_BWCR_BW GENMASK(3, 2)
+
+#define MT_AGG_RETRY_CONTROL MT_WF_AGG(0x0f4)
+#define MT_AGG_RETRY_CONTROL_RTS_LIMIT GENMASK(11, 7)
+#define MT_AGG_RETRY_CONTROL_BAR_LIMIT GENMASK(15, 12)
+
+#define MT_WF_DMA_BASE 0x21c00
+#define MT_WF_DMA(ofs) (MT_WF_DMA_BASE + (ofs))
+
+#define MT_DMA_DCR0 MT_WF_DMA(0x000)
+#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 0)
+#define MT_DMA_DCR0_DAMSDU BIT(16)
+#define MT_DMA_DCR0_RX_VEC_DROP BIT(17)
+
+#define MT_DMA_DCR1 MT_WF_DMA(0x004)
+
+#define MT_DMA_FQCR0 MT_WF_DMA(0x008)
+#define MT_DMA_FQCR0_TARGET_WCID GENMASK(7, 0)
+#define MT_DMA_FQCR0_TARGET_BSS GENMASK(13, 8)
+#define MT_DMA_FQCR0_TARGET_QID GENMASK(20, 16)
+#define MT_DMA_FQCR0_DEST_PORT_ID GENMASK(23, 22)
+#define MT_DMA_FQCR0_DEST_QUEUE_ID GENMASK(28, 24)
+#define MT_DMA_FQCR0_MODE BIT(29)
+#define MT_DMA_FQCR0_STATUS BIT(30)
+#define MT_DMA_FQCR0_BUSY BIT(31)
+
+#define MT_DMA_RCFR0 MT_WF_DMA(0x070)
+#define MT_DMA_VCFR0 MT_WF_DMA(0x07c)
+
+#define MT_DMA_TCFR0 MT_WF_DMA(0x080)
+#define MT_DMA_TCFR1 MT_WF_DMA(0x084)
+#define MT_DMA_TCFR_TXS_AGGR_TIMEOUT GENMASK(27, 16)
+#define MT_DMA_TCFR_TXS_QUEUE BIT(14)
+#define MT_DMA_TCFR_TXS_AGGR_COUNT GENMASK(12, 8)
+#define MT_DMA_TCFR_TXS_BIT_MAP GENMASK(6, 0)
+
+#define MT_DMA_TMCFR0 MT_WF_DMA(0x088)
+
+#define MT_WF_ARB_BASE 0x21400
+#define MT_WF_ARB(ofs) (MT_WF_ARB_BASE + (ofs))
+
+#define MT_WMM_AIFSN MT_WF_ARB(0x020)
+#define MT_WMM_AIFSN_MASK GENMASK(3, 0)
+#define MT_WMM_AIFSN_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_CWMAX_BASE MT_WF_ARB(0x028)
+#define MT_WMM_CWMAX(_n) (MT_WMM_CWMAX_BASE + (((_n) / 2) << 2))
+#define MT_WMM_CWMAX_SHIFT(_n) (((_n) & 1) * 16)
+#define MT_WMM_CWMAX_MASK GENMASK(15, 0)
+
+#define MT_WMM_CWMIN MT_WF_ARB(0x040)
+#define MT_WMM_CWMIN_MASK GENMASK(7, 0)
+#define MT_WMM_CWMIN_SHIFT(_n) ((_n) * 8)
+
+#define MT_WF_ARB_RQCR MT_WF_ARB(0x070)
+#define MT_WF_ARB_RQCR_RX_START BIT(0)
+#define MT_WF_ARB_RQCR_RXV_START BIT(4)
+#define MT_WF_ARB_RQCR_RXV_R_EN BIT(7)
+#define MT_WF_ARB_RQCR_RXV_T_EN BIT(8)
+
+#define MT_ARB_SCR MT_WF_ARB(0x080)
+#define MT_ARB_SCR_BCNQ_OPMODE_MASK GENMASK(1, 0)
+#define MT_ARB_SCR_BCNQ_OPMODE_SHIFT(n) ((n) * 2)
+#define MT_ARB_SCR_TX_DISABLE BIT(8)
+#define MT_ARB_SCR_RX_DISABLE BIT(9)
+#define MT_ARB_SCR_BCNQ_EMPTY_SKIP BIT(28)
+#define MT_ARB_SCR_TTTT_BTIM_PRIO BIT(29)
+#define MT_ARB_SCR_TBTT_BCN_PRIO BIT(30)
+#define MT_ARB_SCR_TBTT_BCAST_PRIO BIT(31)
+
+enum {
+ MT_BCNQ_OPMODE_STA = 0,
+ MT_BCNQ_OPMODE_AP = 1,
+ MT_BCNQ_OPMODE_ADHOC = 2,
+};
+
+#define MT_WF_ARB_TX_START_0 MT_WF_ARB(0x100)
+#define MT_WF_ARB_TX_START_1 MT_WF_ARB(0x104)
+#define MT_WF_ARB_TX_FLUSH_0 MT_WF_ARB(0x108)
+#define MT_WF_ARB_TX_FLUSH_1 MT_WF_ARB(0x10c)
+#define MT_WF_ARB_TX_STOP_0 MT_WF_ARB(0x110)
+#define MT_WF_ARB_TX_STOP_1 MT_WF_ARB(0x114)
+
+#define MT_WF_ARB_BCN_START MT_WF_ARB(0x118)
+#define MT_WF_ARB_BCN_START_BSSn(n) BIT(0 + (n))
+#define MT_WF_ARB_BCN_START_T_PRE_TTTT BIT(10)
+#define MT_WF_ARB_BCN_START_T_TTTT BIT(11)
+#define MT_WF_ARB_BCN_START_T_PRE_TBTT BIT(12)
+#define MT_WF_ARB_BCN_START_T_TBTT BIT(13)
+#define MT_WF_ARB_BCN_START_T_SLOT_IDLE BIT(14)
+#define MT_WF_ARB_BCN_START_T_TX_START BIT(15)
+#define MT_WF_ARB_BCN_START_BSS0n(n) BIT((n) ? 16 + ((n) - 1) : 0)
+
+#define MT_WF_ARB_BCN_FLUSH MT_WF_ARB(0x11c)
+#define MT_WF_ARB_BCN_FLUSH_BSSn(n) BIT(0 + (n))
+#define MT_WF_ARB_BCN_FLUSH_BSS0n(n) BIT((n) ? 16 + ((n) - 1) : 0)
+
+#define MT_WF_ARB_CAB_START MT_WF_ARB(0x120)
+#define MT_WF_ARB_CAB_START_BSSn(n) BIT(0 + (n))
+#define MT_WF_ARB_CAB_START_BSS0n(n) BIT((n) ? 16 + ((n) - 1) : 0)
+
+#define MT_WF_ARB_CAB_FLUSH MT_WF_ARB(0x124)
+#define MT_WF_ARB_CAB_FLUSH_BSSn(n) BIT(0 + (n))
+#define MT_WF_ARB_CAB_FLUSH_BSS0n(n) BIT((n) ? 16 + ((n) - 1) : 0)
+
+#define MT_WF_ARB_CAB_COUNT(n) MT_WF_ARB(0x128 + (n) * 4)
+#define MT_WF_ARB_CAB_COUNT_SHIFT 4
+#define MT_WF_ARB_CAB_COUNT_MASK GENMASK(3, 0)
+#define MT_WF_ARB_CAB_COUNT_B0_REG(n) MT_WF_ARB_CAB_COUNT(((n) > 12 ? 2 : \
+ ((n) > 4 ? 1 : 0)))
+#define MT_WF_ARB_CAB_COUNT_B0_SHIFT(n) (((n) > 12 ? (n) - 12 : \
+ ((n) > 4 ? (n) - 4 : \
+ (n) ? (n) + 3 : 0)) * 4)
+
+#define MT_TX_ABORT MT_WF_ARB(0x134)
+#define MT_TX_ABORT_EN BIT(0)
+#define MT_TX_ABORT_WCID GENMASK(15, 8)
+
+#define MT_WF_TMAC_BASE 0x21600
+#define MT_WF_TMAC(ofs) (MT_WF_TMAC_BASE + (ofs))
+
+#define MT_TMAC_TCR MT_WF_TMAC(0x000)
+#define MT_TMAC_TCR_BLINK_SEL GENMASK(7, 6)
+#define MT_TMAC_TCR_PRE_RTS_GUARD GENMASK(11, 8)
+#define MT_TMAC_TCR_PRE_RTS_SEC_IDLE GENMASK(13, 12)
+#define MT_TMAC_TCR_RTS_SIGTA BIT(14)
+#define MT_TMAC_TCR_LDPC_OFS BIT(15)
+#define MT_TMAC_TCR_TX_STREAMS GENMASK(17, 16)
+#define MT_TMAC_TCR_SCH_IDLE_SEL GENMASK(19, 18)
+#define MT_TMAC_TCR_SCH_DET_PER_IOD BIT(20)
+#define MT_TMAC_TCR_DCH_DET_DISABLE BIT(21)
+#define MT_TMAC_TCR_TX_RIFS BIT(22)
+#define MT_TMAC_TCR_RX_RIFS_MODE BIT(23)
+#define MT_TMAC_TCR_TXOP_TBTT_CTL BIT(24)
+#define MT_TMAC_TCR_TBTT_TX_STOP_CTL BIT(25)
+#define MT_TMAC_TCR_TXOP_BURST_STOP BIT(26)
+#define MT_TMAC_TCR_RDG_RA_MODE BIT(27)
+#define MT_TMAC_TCR_RDG_RESP BIT(29)
+#define MT_TMAC_TCR_RDG_NO_PENDING BIT(30)
+#define MT_TMAC_TCR_SMOOTHING BIT(31)
+
+#define MT_WMM_TXOP_BASE MT_WF_TMAC(0x010)
+#define MT_WMM_TXOP(_n) (MT_WMM_TXOP_BASE + \
+ ((((_n) / 2) ^ 0x1) << 2))
+#define MT_WMM_TXOP_SHIFT(_n) (((_n) & 1) * 16)
+#define MT_WMM_TXOP_MASK GENMASK(15, 0)
+
+#define MT_TIMEOUT_CCK MT_WF_TMAC(0x090)
+#define MT_TIMEOUT_OFDM MT_WF_TMAC(0x094)
+#define MT_TIMEOUT_VAL_PLCP GENMASK(15, 0)
+#define MT_TIMEOUT_VAL_CCA GENMASK(31, 16)
+
+#define MT_TXREQ MT_WF_TMAC(0x09c)
+#define MT_TXREQ_CCA_SRC_SEL GENMASK(31, 30)
+
+#define MT_RXREQ MT_WF_TMAC(0x0a0)
+#define MT_RXREQ_DELAY GENMASK(8, 0)
+
+#define MT_IFS MT_WF_TMAC(0x0a4)
+#define MT_IFS_EIFS GENMASK(8, 0)
+#define MT_IFS_RIFS GENMASK(14, 10)
+#define MT_IFS_SIFS GENMASK(22, 16)
+#define MT_IFS_SLOT GENMASK(30, 24)
+
+#define MT_TMAC_PCR MT_WF_TMAC(0x0b4)
+#define MT_TMAC_PCR_RATE GENMASK(8, 0)
+#define MT_TMAC_PCR_RATE_FIXED BIT(15)
+#define MT_TMAC_PCR_ANT_ID GENMASK(21, 16)
+#define MT_TMAC_PCR_ANT_ID_SEL BIT(22)
+#define MT_TMAC_PCR_SPE_EN BIT(23)
+#define MT_TMAC_PCR_ANT_PRI GENMASK(26, 24)
+#define MT_TMAC_PCR_ANT_PRI_SEL GENMASK(27)
+
+#define MT_WF_RMAC_BASE 0x21800
+#define MT_WF_RMAC(ofs) (MT_WF_RMAC_BASE + (ofs))
+
+#define MT_WF_RFCR MT_WF_RMAC(0x000)
+#define MT_WF_RFCR_DROP_STBC_MULTI BIT(0)
+#define MT_WF_RFCR_DROP_FCSFAIL BIT(1)
+#define MT_WF_RFCR_DROP_VERSION BIT(3)
+#define MT_WF_RFCR_DROP_PROBEREQ BIT(4)
+#define MT_WF_RFCR_DROP_MCAST BIT(5)
+#define MT_WF_RFCR_DROP_BCAST BIT(6)
+#define MT_WF_RFCR_DROP_MCAST_FILTERED BIT(7)
+#define MT_WF_RFCR_DROP_A3_MAC BIT(8)
+#define MT_WF_RFCR_DROP_A3_BSSID BIT(9)
+#define MT_WF_RFCR_DROP_A2_BSSID BIT(10)
+#define MT_WF_RFCR_DROP_OTHER_BEACON BIT(11)
+#define MT_WF_RFCR_DROP_FRAME_REPORT BIT(12)
+#define MT_WF_RFCR_DROP_CTL_RSV BIT(13)
+#define MT_WF_RFCR_DROP_CTS BIT(14)
+#define MT_WF_RFCR_DROP_RTS BIT(15)
+#define MT_WF_RFCR_DROP_DUPLICATE BIT(16)
+#define MT_WF_RFCR_DROP_OTHER_BSS BIT(17)
+#define MT_WF_RFCR_DROP_OTHER_UC BIT(18)
+#define MT_WF_RFCR_DROP_OTHER_TIM BIT(19)
+#define MT_WF_RFCR_DROP_NDPA BIT(20)
+#define MT_WF_RFCR_DROP_UNWANTED_CTL BIT(21)
+
+#define MT_BSSID0(idx) MT_WF_RMAC(0x004 + (idx) * 8)
+#define MT_BSSID1(idx) MT_WF_RMAC(0x008 + (idx) * 8)
+#define MT_BSSID1_VALID BIT(16)
+
+#define MT_MAC_ADDR0(idx) MT_WF_RMAC(0x024 + (idx) * 8)
+#define MT_MAC_ADDR1(idx) MT_WF_RMAC(0x028 + (idx) * 8)
+#define MT_MAC_ADDR1_ADDR GENMASK(15, 0)
+#define MT_MAC_ADDR1_VALID BIT(16)
+
+#define MT_BA_CONTROL_0 MT_WF_RMAC(0x068)
+#define MT_BA_CONTROL_1 MT_WF_RMAC(0x06c)
+#define MT_BA_CONTROL_1_ADDR GENMASK(15, 0)
+#define MT_BA_CONTROL_1_TID GENMASK(19, 16)
+#define MT_BA_CONTROL_1_IGNORE_TID BIT(20)
+#define MT_BA_CONTROL_1_IGNORE_ALL BIT(21)
+#define MT_BA_CONTROL_1_RESET BIT(22)
+
+#define MT_WF_RMACDR MT_WF_RMAC(0x078)
+#define MT_WF_RMACDR_TSF_PROBERSP_DIS BIT(0)
+#define MT_WF_RMACDR_TSF_TIM BIT(4)
+#define MT_WF_RMACDR_MBSSID_MASK GENMASK(25, 24)
+#define MT_WF_RMACDR_CHECK_HTC_BY_RATE BIT(26)
+#define MT_WF_RMACDR_MAXLEN_20BIT BIT(30)
+
+#define MT_WF_RMAC_RMCR MT_WF_RMAC(0x080)
+#define MT_WF_RMAC_RMCR_SMPS_MODE GENMASK(21, 20)
+#define MT_WF_RMAC_RMCR_RX_STREAMS GENMASK(24, 22)
+#define MT_WF_RMAC_RMCR_SMPS_RTS BIT(25)
+
+#define MT_WF_RMAC_CH_FREQ MT_WF_RMAC(0x090)
+#define MT_WF_RMAC_MAXMINLEN MT_WF_RMAC(0x098)
+#define MT_WF_RFCR1 MT_WF_RMAC(0x0a4)
+#define MT_WF_RMAC_TMR_PA MT_WF_RMAC(0x0e0)
+
+#define MT_WF_SEC_BASE 0x21a00
+#define MT_WF_SEC(ofs) (MT_WF_SEC_BASE + (ofs))
+
+#define MT_SEC_SCR MT_WF_SEC(0x004)
+#define MT_SEC_SCR_MASK_ORDER GENMASK(1, 0)
+
+#define MT_WTBL_OFF_BASE 0x23000
+#define MT_WTBL_OFF(n) (MT_WTBL_OFF_BASE + (n))
+
+#define MT_WTBL_UPDATE MT_WTBL_OFF(0x000)
+#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(7, 0)
+#define MT_WTBL_UPDATE_WTBL2 BIT(11)
+#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12)
+#define MT_WTBL_UPDATE_RATE_UPDATE BIT(13)
+#define MT_WTBL_UPDATE_TX_COUNT_CLEAR BIT(14)
+#define MT_WTBL_UPDATE_RX_COUNT_CLEAR BIT(15)
+#define MT_WTBL_UPDATE_BUSY BIT(16)
+
+#define MT_WTBL_RMVTCR MT_WTBL_OFF(0x008)
+#define MT_WTBL_RMVTCR_RX_MV_MODE BIT(23)
+
+#define MT_LPON_BASE 0x24000
+#define MT_LPON(n) (MT_LPON_BASE + (n))
+
+#define MT_LPON_T0CR MT_LPON(0x010)
+#define MT_LPON_T0CR_MODE GENMASK(1, 0)
+
+#define MT_LPON_UTTR0 MT_LPON(0x018)
+#define MT_LPON_UTTR1 MT_LPON(0x01c)
+
+#define MT_LPON_BTEIR MT_LPON(0x020)
+#define MT_LPON_BTEIR_MBSS_MODE GENMASK(31, 29)
+
+#define MT_PRE_TBTT MT_LPON(0x030)
+#define MT_PRE_TBTT_MASK GENMASK(7, 0)
+#define MT_PRE_TBTT_SHIFT 8
+
+#define MT_TBTT MT_LPON(0x034)
+#define MT_TBTT_PERIOD GENMASK(15, 0)
+#define MT_TBTT_DTIM_PERIOD GENMASK(23, 16)
+#define MT_TBTT_TBTT_WAKE_PERIOD GENMASK(27, 24)
+#define MT_TBTT_DTIM_WAKE_PERIOD GENMASK(30, 28)
+#define MT_TBTT_CAL_ENABLE BIT(31)
+
+#define MT_TBTT_TIMER_CFG MT_LPON(0x05c)
+
+#define MT_LPON_SBTOR(n) MT_LPON(0x0a0)
+#define MT_LPON_SBTOR_SUB_BSS_EN BIT(29)
+#define MT_LPON_SBTOR_TIME_OFFSET GENMASK(19, 0)
+
+#define MT_INT_WAKEUP_BASE 0x24400
+#define MT_INT_WAKEUP(n) (MT_INT_WAKEUP_BASE + (n))
+
+#define MT_HW_INT_STATUS(n) MT_INT_WAKEUP(0x3c + (n) * 8)
+#define MT_HW_INT_MASK(n) MT_INT_WAKEUP(0x40 + (n) * 8)
+
+#define MT_HW_INT3_TBTT0 BIT(15)
+#define MT_HW_INT3_PRE_TBTT0 BIT(31)
+
+#define MT_WTBL1_BASE 0x28000
+
+#define MT_WTBL_ON_BASE (MT_WTBL1_BASE + 0x2000)
+#define MT_WTBL_ON(_n) (MT_WTBL_ON_BASE + (_n))
+
+#define MT_WTBL_RIUCR0 MT_WTBL_ON(0x200)
+
+#define MT_WTBL_RIUCR1 MT_WTBL_ON(0x204)
+#define MT_WTBL_RIUCR1_RATE0 GENMASK(11, 0)
+#define MT_WTBL_RIUCR1_RATE1 GENMASK(23, 12)
+#define MT_WTBL_RIUCR1_RATE2_LO GENMASK(31, 24)
+
+#define MT_WTBL_RIUCR2 MT_WTBL_ON(0x208)
+#define MT_WTBL_RIUCR2_RATE2_HI GENMASK(3, 0)
+#define MT_WTBL_RIUCR2_RATE3 GENMASK(15, 4)
+#define MT_WTBL_RIUCR2_RATE4 GENMASK(27, 16)
+#define MT_WTBL_RIUCR2_RATE5_LO GENMASK(31, 28)
+
+#define MT_WTBL_RIUCR3 MT_WTBL_ON(0x20c)
+#define MT_WTBL_RIUCR3_RATE5_HI GENMASK(7, 0)
+#define MT_WTBL_RIUCR3_RATE6 GENMASK(19, 8)
+#define MT_WTBL_RIUCR3_RATE7 GENMASK(31, 20)
+
+#define MT_MIB_BASE 0x2c000
+#define MT_MIB(_n) (MT_MIB_BASE + (_n))
+
+#define MT_MIB_CTL MT_MIB(0x00)
+#define MT_MIB_CTL_PSCCA_TIME GENMASK(13, 11)
+#define MT_MIB_CTL_CCA_NAV_TX GENMASK(16, 14)
+#define MT_MIB_CTL_ED_TIME GENMASK(30, 28)
+#define MT_MIB_CTL_READ_CLR_DIS BIT(31)
+
+#define MT_MIB_STAT(_n) MT_MIB(0x08 + (_n) * 4)
+
+#define MT_MIB_STAT_CCA MT_MIB_STAT(9)
+#define MT_MIB_STAT_CCA_MASK GENMASK(23, 0)
+
+#define MT_MIB_STAT_PSCCA MT_MIB_STAT(16)
+#define MT_MIB_STAT_PSCCA_MASK GENMASK(23, 0)
+
+#define MT_TX_AGG_CNT(n) MT_MIB(0xa8 + ((n) << 2))
+
+#define MT_MIB_STAT_ED MT_MIB_STAT(18)
+#define MT_MIB_STAT_ED_MASK GENMASK(23, 0)
+
+#define MT_PCIE_REMAP_BASE_1 0x40000
+#define MT_PCIE_REMAP_BASE_2 0x80000
+
+#define MT_TX_HW_QUEUE_MGMT 4
+#define MT_TX_HW_QUEUE_MCU 5
+#define MT_TX_HW_QUEUE_BCN 7
+#define MT_TX_HW_QUEUE_BMC 8
+
+#define MT_LED_BASE_PHYS 0x80024000
+#define MT_LED_PHYS(_n) (MT_LED_BASE_PHYS + (_n))
+
+#define MT_LED_CTRL MT_LED_PHYS(0x00)
+
+#define MT_LED_CTRL_REPLAY(_n) BIT(0 + (8 * (_n)))
+#define MT_LED_CTRL_POLARITY(_n) BIT(1 + (8 * (_n)))
+#define MT_LED_CTRL_TX_BLINK_MODE(_n) BIT(2 + (8 * (_n)))
+#define MT_LED_CTRL_TX_MANUAL_BLINK(_n) BIT(3 + (8 * (_n)))
+#define MT_LED_CTRL_TX_OVER_BLINK(_n) BIT(5 + (8 * (_n)))
+#define MT_LED_CTRL_KICK(_n) BIT(7 + (8 * (_n)))
+
+#define MT_LED_STATUS_0(_n) MT_LED_PHYS(0x10 + ((_n) * 8))
+#define MT_LED_STATUS_1(_n) MT_LED_PHYS(0x14 + ((_n) * 8))
+#define MT_LED_STATUS_OFF GENMASK(31, 24)
+#define MT_LED_STATUS_ON GENMASK(23, 16)
+#define MT_LED_STATUS_DURATION GENMASK(15, 0)
+
+#define MT_CLIENT_BASE_PHYS_ADDR 0x800c0000
+
+#define MT_CLIENT_TMAC_INFO_TEMPLATE 0x040
+
+#define MT_CLIENT_STATUS 0x06c
+
+#define MT_CLIENT_RESET_TX 0x070
+#define MT_CLIENT_RESET_TX_R_E_1 BIT(16)
+#define MT_CLIENT_RESET_TX_R_E_2 BIT(17)
+#define MT_CLIENT_RESET_TX_R_E_1_S BIT(20)
+#define MT_CLIENT_RESET_TX_R_E_2_S BIT(21)
+
+#define MT_EFUSE_BASE 0x81070000
+
+#define MT_EFUSE_BASE_CTRL 0x000
+#define MT_EFUSE_BASE_CTRL_EMPTY BIT(30)
+
+#define MT_EFUSE_CTRL 0x008
+#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0)
+#define MT_EFUSE_CTRL_MODE GENMASK(7, 6)
+#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8)
+#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14)
+#define MT_EFUSE_CTRL_AIN GENMASK(25, 16)
+#define MT_EFUSE_CTRL_VALID BIT(29)
+#define MT_EFUSE_CTRL_KICK BIT(30)
+#define MT_EFUSE_CTRL_SEL BIT(31)
+
+#define MT_EFUSE_WDATA(_i) (0x010 + ((_i) * 4))
+#define MT_EFUSE_RDATA(_i) (0x030 + ((_i) * 4))
+
+#define MT_CLIENT_RXINF 0x068
+#define MT_CLIENT_RXINF_RXSH_GROUPS GENMASK(2, 0)
+
+#define MT_PSE_BASE_PHYS_ADDR 0xa0000000
+
+#define MT_PSE_WTBL_2_PHYS_ADDR 0xa5000000
+
+#define MT_WTBL1_SIZE (8 * 4)
+#define MT_WTBL2_SIZE (16 * 4)
+#define MT_WTBL3_OFFSET (MT7603_WTBL_SIZE * MT_WTBL2_SIZE)
+#define MT_WTBL3_SIZE (16 * 4)
+#define MT_WTBL4_OFFSET (MT7603_WTBL_SIZE * MT_WTBL3_SIZE + \
+ MT_WTBL3_OFFSET)
+#define MT_WTBL4_SIZE (8 * 4)
+
+#define MT_WTBL1_W0_ADDR_HI GENMASK(15, 0)
+#define MT_WTBL1_W0_MUAR_IDX GENMASK(21, 16)
+#define MT_WTBL1_W0_RX_CHECK_A1 BIT(22)
+#define MT_WTBL1_W0_KEY_IDX GENMASK(24, 23)
+#define MT_WTBL1_W0_RX_CHECK_KEY_IDX BIT(25)
+#define MT_WTBL1_W0_RX_KEY_VALID BIT(26)
+#define MT_WTBL1_W0_RX_IK_VALID BIT(27)
+#define MT_WTBL1_W0_RX_VALID BIT(28)
+#define MT_WTBL1_W0_RX_CHECK_A2 BIT(29)
+#define MT_WTBL1_W0_RX_DATA_VALID BIT(30)
+#define MT_WTBL1_W0_WRITE_BURST BIT(31)
+
+#define MT_WTBL1_W1_ADDR_LO GENMASK(31, 0)
+
+#define MT_WTBL1_W2_MPDU_DENSITY GENMASK(2, 0)
+#define MT_WTBL1_W2_KEY_TYPE GENMASK(6, 3)
+#define MT_WTBL1_W2_EVEN_PN BIT(7)
+#define MT_WTBL1_W2_TO_DS BIT(8)
+#define MT_WTBL1_W2_FROM_DS BIT(9)
+#define MT_WTBL1_W2_HEADER_TRANS BIT(10)
+#define MT_WTBL1_W2_AMPDU_FACTOR GENMASK(13, 11)
+#define MT_WTBL1_W2_PWR_MGMT BIT(14)
+#define MT_WTBL1_W2_RDG BIT(15)
+#define MT_WTBL1_W2_RTS BIT(16)
+#define MT_WTBL1_W2_CFACK BIT(17)
+#define MT_WTBL1_W2_RDG_BA BIT(18)
+#define MT_WTBL1_W2_SMPS BIT(19)
+#define MT_WTBL1_W2_TXS_BAF_REPORT BIT(20)
+#define MT_WTBL1_W2_DYN_BW BIT(21)
+#define MT_WTBL1_W2_LDPC BIT(22)
+#define MT_WTBL1_W2_ITXBF BIT(23)
+#define MT_WTBL1_W2_ETXBF BIT(24)
+#define MT_WTBL1_W2_TXOP_PS BIT(25)
+#define MT_WTBL1_W2_MESH BIT(26)
+#define MT_WTBL1_W2_QOS BIT(27)
+#define MT_WTBL1_W2_HT BIT(28)
+#define MT_WTBL1_W2_VHT BIT(29)
+#define MT_WTBL1_W2_ADMISSION_CONTROL BIT(30)
+#define MT_WTBL1_W2_GROUP_ID BIT(31)
+
+#define MT_WTBL1_W3_WTBL2_FRAME_ID GENMASK(10, 0)
+#define MT_WTBL1_W3_WTBL2_ENTRY_ID GENMASK(15, 11)
+#define MT_WTBL1_W3_WTBL4_FRAME_ID GENMASK(26, 16)
+#define MT_WTBL1_W3_CHECK_PER BIT(27)
+#define MT_WTBL1_W3_KEEP_I_PSM BIT(28)
+#define MT_WTBL1_W3_I_PSM BIT(29)
+#define MT_WTBL1_W3_POWER_SAVE BIT(30)
+#define MT_WTBL1_W3_SKIP_TX BIT(31)
+
+#define MT_WTBL1_W4_WTBL3_FRAME_ID GENMASK(10, 0)
+#define MT_WTBL1_W4_WTBL3_ENTRY_ID GENMASK(16, 11)
+#define MT_WTBL1_W4_WTBL4_ENTRY_ID GENMASK(22, 17)
+#define MT_WTBL1_W4_PARTIAL_AID GENMASK(31, 23)
+
+#define MT_WTBL2_W0_PN_LO GENMASK(31, 0)
+
+#define MT_WTBL2_W1_PN_HI GENMASK(15, 0)
+#define MT_WTBL2_W1_NON_QOS_SEQNO GENMASK(27, 16)
+
+#define MT_WTBL2_W2_TID0_SN GENMASK(11, 0)
+#define MT_WTBL2_W2_TID1_SN GENMASK(23, 12)
+#define MT_WTBL2_W2_TID2_SN_LO GENMASK(31, 24)
+
+#define MT_WTBL2_W3_TID2_SN_HI GENMASK(3, 0)
+#define MT_WTBL2_W3_TID3_SN GENMASK(15, 4)
+#define MT_WTBL2_W3_TID4_SN GENMASK(27, 16)
+#define MT_WTBL2_W3_TID5_SN_LO GENMASK(31, 28)
+
+#define MT_WTBL2_W4_TID5_SN_HI GENMASK(7, 0)
+#define MT_WTBL2_W4_TID6_SN GENMASK(19, 8)
+#define MT_WTBL2_W4_TID7_SN GENMASK(31, 20)
+
+#define MT_WTBL2_W5_TX_COUNT_RATE1 GENMASK(15, 0)
+#define MT_WTBL2_W5_FAIL_COUNT_RATE1 GENAMSK(31, 16)
+
+#define MT_WTBL2_W6_TX_COUNT_RATE2 GENMASK(7, 0)
+#define MT_WTBL2_W6_TX_COUNT_RATE3 GENMASK(15, 8)
+#define MT_WTBL2_W6_TX_COUNT_RATE4 GENMASK(23, 16)
+#define MT_WTBL2_W6_TX_COUNT_RATE5 GENMASK(31, 24)
+
+#define MT_WTBL2_W7_TX_COUNT_CUR_BW GENMASK(15, 0)
+#define MT_WTBL2_W7_FAIL_COUNT_CUR_BW GENMASK(31, 16)
+
+#define MT_WTBL2_W8_TX_COUNT_OTHER_BW GENMASK(15, 0)
+#define MT_WTBL2_W8_FAIL_COUNT_OTHER_BW GENMASK(31, 16)
+
+#define MT_WTBL2_W9_POWER_OFFSET GENMASK(4, 0)
+#define MT_WTBL2_W9_SPATIAL_EXT BIT(5)
+#define MT_WTBL2_W9_ANT_PRIORITY GENMASK(8, 6)
+#define MT_WTBL2_W9_CC_BW_SEL GENMASK(10, 9)
+#define MT_WTBL2_W9_CHANGE_BW_RATE GENMASK(13, 11)
+#define MT_WTBL2_W9_BW_CAP GENMASK(15, 14)
+#define MT_WTBL2_W9_SHORT_GI_20 BIT(16)
+#define MT_WTBL2_W9_SHORT_GI_40 BIT(17)
+#define MT_WTBL2_W9_SHORT_GI_80 BIT(18)
+#define MT_WTBL2_W9_SHORT_GI_160 BIT(19)
+#define MT_WTBL2_W9_MPDU_FAIL_COUNT GENMASK(25, 23)
+#define MT_WTBL2_W9_MPDU_OK_COUNT GENMASK(28, 26)
+#define MT_WTBL2_W9_RATE_IDX GENMASK(31, 29)
+
+#define MT_WTBL2_W10_RATE1 GENMASK(11, 0)
+#define MT_WTBL2_W10_RATE2 GENMASK(23, 12)
+#define MT_WTBL2_W10_RATE3_LO GENMASK(31, 24)
+
+#define MT_WTBL2_W11_RATE3_HI GENMASK(3, 0)
+#define MT_WTBL2_W11_RATE4 GENMASK(15, 4)
+#define MT_WTBL2_W11_RATE5 GENMASK(27, 16)
+#define MT_WTBL2_W11_RATE6_LO GENMASK(31, 28)
+
+#define MT_WTBL2_W12_RATE6_HI GENMASK(7, 0)
+#define MT_WTBL2_W12_RATE7 GENMASK(19, 8)
+#define MT_WTBL2_W12_RATE8 GENMASK(31, 20)
+
+#define MT_WTBL2_W13_AVG_RCPI0 GENMASK(7, 0)
+#define MT_WTBL2_W13_AVG_RCPI1 GENMASK(15, 8)
+#define MT_WTBL2_W13_AVG_RCPI2 GENAMSK(23, 16)
+
+#define MT_WTBL2_W14_CC_NOISE_1S GENMASK(6, 0)
+#define MT_WTBL2_W14_CC_NOISE_2S GENMASK(13, 7)
+#define MT_WTBL2_W14_CC_NOISE_3S GENMASK(20, 14)
+#define MT_WTBL2_W14_CHAN_EST_RMS GENMASK(24, 21)
+#define MT_WTBL2_W14_CC_NOISE_SEL BIT(15)
+#define MT_WTBL2_W14_ANT_SEL GENMASK(31, 26)
+
+#define MT_WTBL2_W15_BA_WIN_SIZE GENMASK(2, 0)
+#define MT_WTBL2_W15_BA_WIN_SIZE_SHIFT 3
+#define MT_WTBL2_W15_BA_EN_TIDS GENMASK(31, 24)
+
+#define MT_WTBL1_OR (MT_WTBL1_BASE + 0x2300)
+#define MT_WTBL1_OR_PSM_WRITE BIT(31)
+
+enum mt7603_cipher_type {
+ MT_CIPHER_NONE,
+ MT_CIPHER_WEP40,
+ MT_CIPHER_TKIP,
+ MT_CIPHER_TKIP_NO_MIC,
+ MT_CIPHER_AES_CCMP,
+ MT_CIPHER_WEP104,
+ MT_CIPHER_BIP_CMAC_128,
+ MT_CIPHER_WEP128,
+ MT_CIPHER_WAPI,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
new file mode 100644
index 000000000..ba927033b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: ISC
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "mt7603.h"
+
+static int
+mt76_wmac_probe(struct platform_device *pdev)
+{
+ struct mt7603_dev *dev;
+ void __iomem *mem_base;
+ struct mt76_dev *mdev;
+ int irq;
+ int ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ mem_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mem_base))
+ return PTR_ERR(mem_base);
+
+ mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops,
+ &mt7603_drv_ops);
+ if (!mdev)
+ return -ENOMEM;
+
+ dev = container_of(mdev, struct mt7603_dev, mt76);
+ mt76_mmio_init(mdev, mem_base);
+
+ mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
+ (mt76_rr(dev, MT_HW_REV) & 0xff);
+ dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
+ ret = devm_request_irq(mdev->dev, irq, mt7603_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+ goto error;
+
+ ret = mt7603_register_device(dev);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ ieee80211_free_hw(mt76_hw(dev));
+ return ret;
+}
+
+static int
+mt76_wmac_remove(struct platform_device *pdev)
+{
+ struct mt76_dev *mdev = platform_get_drvdata(pdev);
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+
+ mt7603_unregister_device(dev);
+
+ return 0;
+}
+
+static const struct of_device_id of_wmac_match[] = {
+ { .compatible = "mediatek,mt7628-wmac" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, of_wmac_match);
+MODULE_FIRMWARE(MT7628_FIRMWARE_E1);
+MODULE_FIRMWARE(MT7628_FIRMWARE_E2);
+
+struct platform_driver mt76_wmac_driver = {
+ .probe = mt76_wmac_probe,
+ .remove = mt76_wmac_remove,
+ .driver = {
+ .name = "mt76_wmac",
+ .of_match_table = of_wmac_match,
+ },
+};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
new file mode 100644
index 000000000..f372fb629
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config MT7615_COMMON
+ tristate
+ select MT76_CORE
+
+config MT7615E
+ tristate "MediaTek MT7615E and MT7663E (PCIe) support"
+ select MT7615_COMMON
+ depends on MAC80211
+ depends on PCI
+ help
+ This adds support for MT7615-based wireless PCIe devices,
+ which support concurrent dual-band operation at both 5GHz
+ and 2.4GHz, IEEE 802.11ac 4x4:4SS 1733Mbps PHY rate, wave2
+ MU-MIMO up to 4 users/group and 160MHz channels.
+
+ To compile this driver as a module, choose M here.
+
+config MT7622_WMAC
+ bool "MT7622 (SoC) WMAC support"
+ depends on MT7615E
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select REGMAP
+ default y
+ help
+ This adds support for the built-in WMAC on MT7622 SoC devices
+ which has the same feature set as a MT7615, but limited to
+ 2.4 GHz only.
+
+config MT7663_USB_SDIO_COMMON
+ tristate
+ select MT7615_COMMON
+
+config MT7663U
+ tristate "MediaTek MT7663U (USB) support"
+ select MT76_USB
+ select MT7663_USB_SDIO_COMMON
+ depends on MAC80211
+ depends on USB
+ help
+ This adds support for MT7663U 802.11ac 2x2:2 wireless devices.
+
+ To compile this driver as a module, choose M here.
+
+config MT7663S
+ tristate "MediaTek MT7663S (SDIO) support"
+ select MT76_SDIO
+ select MT7663_USB_SDIO_COMMON
+ depends on MAC80211
+ depends on MMC
+ help
+ This adds support for MT7663S 802.11ac 2x2:2 wireless devices.
+
+ To compile this driver as a module, choose M here.
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile
new file mode 100644
index 000000000..e8fc4a7ae
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile
@@ -0,0 +1,20 @@
+#SPDX-License-Identifier: ISC
+
+obj-$(CONFIG_MT7615_COMMON) += mt7615-common.o
+obj-$(CONFIG_MT7615E) += mt7615e.o
+obj-$(CONFIG_MT7663_USB_SDIO_COMMON) += mt7663-usb-sdio-common.o
+obj-$(CONFIG_MT7663U) += mt7663u.o
+obj-$(CONFIG_MT7663S) += mt7663s.o
+
+CFLAGS_trace.o := -I$(src)
+
+mt7615-common-y := main.o init.o mcu.o eeprom.o mac.o \
+ debugfs.o trace.o
+mt7615-common-$(CONFIG_NL80211_TESTMODE) += testmode.o
+
+mt7615e-y := pci.o pci_init.o dma.o pci_mac.o mmio.o
+mt7615e-$(CONFIG_MT7622_WMAC) += soc.o
+
+mt7663-usb-sdio-common-y := usb_sdio.o
+mt7663u-y := usb.o usb_mcu.o
+mt7663s-y := sdio.o sdio_mcu.o sdio_txrx.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
new file mode 100644
index 000000000..00ba550fc
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
@@ -0,0 +1,417 @@
+// SPDX-License-Identifier: ISC
+
+#include "mt7615.h"
+
+static int
+mt7615_radar_pattern_set(void *data, u64 val)
+{
+ struct mt7615_dev *dev = data;
+ int err;
+
+ if (!mt7615_wait_for_mcu_init(dev))
+ return 0;
+
+ mt7615_mutex_acquire(dev);
+ err = mt7615_mcu_rdd_send_pattern(dev);
+ mt7615_mutex_release(dev);
+
+ return err;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_pattern, NULL,
+ mt7615_radar_pattern_set, "%lld\n");
+
+static int
+mt7615_scs_set(void *data, u64 val)
+{
+ struct mt7615_dev *dev = data;
+ struct mt7615_phy *ext_phy;
+
+ if (!mt7615_wait_for_mcu_init(dev))
+ return 0;
+
+ mt7615_mac_set_scs(&dev->phy, val);
+ ext_phy = mt7615_ext_phy(dev);
+ if (ext_phy)
+ mt7615_mac_set_scs(ext_phy, val);
+
+ return 0;
+}
+
+static int
+mt7615_scs_get(void *data, u64 *val)
+{
+ struct mt7615_dev *dev = data;
+
+ *val = dev->phy.scs_en;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_scs, mt7615_scs_get,
+ mt7615_scs_set, "%lld\n");
+
+static int
+mt7615_pm_set(void *data, u64 val)
+{
+ struct mt7615_dev *dev = data;
+
+ if (!mt7615_wait_for_mcu_init(dev))
+ return 0;
+
+ return mt7615_pm_set_enable(dev, val);
+}
+
+static int
+mt7615_pm_get(void *data, u64 *val)
+{
+ struct mt7615_dev *dev = data;
+
+ *val = dev->pm.enable;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_pm, mt7615_pm_get, mt7615_pm_set, "%lld\n");
+
+static int
+mt7615_pm_idle_timeout_set(void *data, u64 val)
+{
+ struct mt7615_dev *dev = data;
+
+ dev->pm.idle_timeout = msecs_to_jiffies(val);
+
+ return 0;
+}
+
+static int
+mt7615_pm_idle_timeout_get(void *data, u64 *val)
+{
+ struct mt7615_dev *dev = data;
+
+ *val = jiffies_to_msecs(dev->pm.idle_timeout);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_pm_idle_timeout, mt7615_pm_idle_timeout_get,
+ mt7615_pm_idle_timeout_set, "%lld\n");
+
+static int
+mt7615_dbdc_set(void *data, u64 val)
+{
+ struct mt7615_dev *dev = data;
+
+ if (!mt7615_wait_for_mcu_init(dev))
+ return 0;
+
+ if (val)
+ mt7615_register_ext_phy(dev);
+ else
+ mt7615_unregister_ext_phy(dev);
+
+ return 0;
+}
+
+static int
+mt7615_dbdc_get(void *data, u64 *val)
+{
+ struct mt7615_dev *dev = data;
+
+ *val = !!mt7615_ext_phy(dev);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_dbdc, mt7615_dbdc_get,
+ mt7615_dbdc_set, "%lld\n");
+
+static int
+mt7615_fw_debug_set(void *data, u64 val)
+{
+ struct mt7615_dev *dev = data;
+
+ if (!mt7615_wait_for_mcu_init(dev))
+ return 0;
+
+ dev->fw_debug = val;
+
+ mt7615_mutex_acquire(dev);
+ mt7615_mcu_fw_log_2_host(dev, dev->fw_debug ? 2 : 0);
+ mt7615_mutex_release(dev);
+
+ return 0;
+}
+
+static int
+mt7615_fw_debug_get(void *data, u64 *val)
+{
+ struct mt7615_dev *dev = data;
+
+ *val = dev->fw_debug;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug, mt7615_fw_debug_get,
+ mt7615_fw_debug_set, "%lld\n");
+
+static int
+mt7615_reset_test_set(void *data, u64 val)
+{
+ struct mt7615_dev *dev = data;
+ struct sk_buff *skb;
+
+ if (!mt7615_wait_for_mcu_init(dev))
+ return 0;
+
+ skb = alloc_skb(1, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, 1);
+
+ mt7615_mutex_acquire(dev);
+ mt76_tx_queue_skb_raw(dev, 0, skb, 0);
+ mt7615_mutex_release(dev);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_reset_test, NULL,
+ mt7615_reset_test_set, "%lld\n");
+
+static void
+mt7615_ampdu_stat_read_phy(struct mt7615_phy *phy,
+ struct seq_file *file)
+{
+ struct mt7615_dev *dev = file->private;
+ u32 reg = is_mt7663(&dev->mt76) ? MT_MIB_ARNG(0) : MT_AGG_ASRCR0;
+ bool ext_phy = phy != &dev->phy;
+ int bound[7], i, range;
+
+ if (!phy)
+ return;
+
+ range = mt76_rr(dev, reg);
+ for (i = 0; i < 4; i++)
+ bound[i] = MT_AGG_ASRCR_RANGE(range, i) + 1;
+
+ range = mt76_rr(dev, reg + 4);
+ for (i = 0; i < 3; i++)
+ bound[i + 4] = MT_AGG_ASRCR_RANGE(range, i) + 1;
+
+ seq_printf(file, "\nPhy %d\n", ext_phy);
+
+ seq_printf(file, "Length: %8d | ", bound[0]);
+ for (i = 0; i < ARRAY_SIZE(bound) - 1; i++)
+ seq_printf(file, "%3d -%3d | ",
+ bound[i], bound[i + 1]);
+ seq_puts(file, "\nCount: ");
+
+ range = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
+ for (i = 0; i < ARRAY_SIZE(bound); i++)
+ seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i + range]);
+ seq_puts(file, "\n");
+
+ seq_printf(file, "BA miss count: %d\n", phy->mib.ba_miss_cnt);
+ seq_printf(file, "PER: %ld.%1ld%%\n",
+ phy->mib.aggr_per / 10, phy->mib.aggr_per % 10);
+}
+
+static int
+mt7615_ampdu_stat_show(struct seq_file *file, void *data)
+{
+ struct mt7615_dev *dev = file->private;
+
+ mt7615_mutex_acquire(dev);
+
+ mt7615_ampdu_stat_read_phy(&dev->phy, file);
+ mt7615_ampdu_stat_read_phy(mt7615_ext_phy(dev), file);
+
+ mt7615_mutex_release(dev);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mt7615_ampdu_stat);
+
+static void
+mt7615_radio_read_phy(struct mt7615_phy *phy, struct seq_file *s)
+{
+ struct mt7615_dev *dev = dev_get_drvdata(s->private);
+ bool ext_phy = phy != &dev->phy;
+
+ if (!phy)
+ return;
+
+ seq_printf(s, "Radio %d sensitivity: ofdm=%d cck=%d\n", ext_phy,
+ phy->ofdm_sensitivity, phy->cck_sensitivity);
+ seq_printf(s, "Radio %d false CCA: ofdm=%d cck=%d\n", ext_phy,
+ phy->false_cca_ofdm, phy->false_cca_cck);
+}
+
+static int
+mt7615_radio_read(struct seq_file *s, void *data)
+{
+ struct mt7615_dev *dev = dev_get_drvdata(s->private);
+
+ mt7615_radio_read_phy(&dev->phy, s);
+ mt7615_radio_read_phy(mt7615_ext_phy(dev), s);
+
+ return 0;
+}
+
+static int mt7615_read_temperature(struct seq_file *s, void *data)
+{
+ struct mt7615_dev *dev = dev_get_drvdata(s->private);
+ int temp;
+
+ if (!mt7615_wait_for_mcu_init(dev))
+ return 0;
+
+ /* cpu */
+ mt7615_mutex_acquire(dev);
+ temp = mt7615_mcu_get_temperature(dev, 0);
+ mt7615_mutex_release(dev);
+
+ seq_printf(s, "Temperature: %d\n", temp);
+
+ return 0;
+}
+
+static int
+mt7615_queues_acq(struct seq_file *s, void *data)
+{
+ struct mt7615_dev *dev = dev_get_drvdata(s->private);
+ int i;
+
+ mt7615_mutex_acquire(dev);
+
+ for (i = 0; i < 16; i++) {
+ int j, wmm_idx = i % MT7615_MAX_WMM_SETS;
+ int acs = i / MT7615_MAX_WMM_SETS;
+ u32 ctrl, val, qlen = 0;
+
+ val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, wmm_idx));
+ ctrl = BIT(31) | BIT(15) | (acs << 8);
+
+ for (j = 0; j < 32; j++) {
+ if (val & BIT(j))
+ continue;
+
+ mt76_wr(dev, MT_PLE_FL_Q0_CTRL,
+ ctrl | (j + (wmm_idx << 5)));
+ qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL,
+ GENMASK(11, 0));
+ }
+ seq_printf(s, "AC%d%d: queued=%d\n", wmm_idx, acs, qlen);
+ }
+
+ mt7615_mutex_release(dev);
+
+ return 0;
+}
+
+static int
+mt7615_queues_read(struct seq_file *s, void *data)
+{
+ struct mt7615_dev *dev = dev_get_drvdata(s->private);
+ static const struct {
+ char *queue;
+ int id;
+ } queue_map[] = {
+ { "PDMA0", MT_TXQ_BE },
+ { "MCUQ", MT_TXQ_MCU },
+ { "MCUFWQ", MT_TXQ_FWDL },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(queue_map); i++) {
+ struct mt76_queue *q = dev->mt76.q_tx[queue_map[i].id];
+
+ if (!q)
+ continue;
+
+ seq_printf(s,
+ "%s: queued=%d head=%d tail=%d\n",
+ queue_map[i].queue, q->queued, q->head,
+ q->tail);
+ }
+
+ return 0;
+}
+
+static int
+mt7615_rf_reg_set(void *data, u64 val)
+{
+ struct mt7615_dev *dev = data;
+
+ mt7615_rf_wr(dev, dev->debugfs_rf_wf, dev->debugfs_rf_reg, val);
+
+ return 0;
+}
+
+static int
+mt7615_rf_reg_get(void *data, u64 *val)
+{
+ struct mt7615_dev *dev = data;
+
+ *val = mt7615_rf_rr(dev, dev->debugfs_rf_wf, dev->debugfs_rf_reg);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_rf_reg, mt7615_rf_reg_get, mt7615_rf_reg_set,
+ "0x%08llx\n");
+
+int mt7615_init_debugfs(struct mt7615_dev *dev)
+{
+ struct dentry *dir;
+
+ dir = mt76_register_debugfs(&dev->mt76);
+ if (!dir)
+ return -ENOMEM;
+
+ if (is_mt7615(&dev->mt76))
+ debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", dir,
+ mt7615_queues_read);
+ else
+ debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", dir,
+ mt76_queues_read);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir,
+ mt7615_queues_acq);
+ debugfs_create_file("ampdu_stat", 0400, dir, dev, &mt7615_ampdu_stat_fops);
+ debugfs_create_file("scs", 0600, dir, dev, &fops_scs);
+ debugfs_create_file("dbdc", 0600, dir, dev, &fops_dbdc);
+ debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug);
+ debugfs_create_file("runtime-pm", 0600, dir, dev, &fops_pm);
+ debugfs_create_file("idle-timeout", 0600, dir, dev,
+ &fops_pm_idle_timeout);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "radio", dir,
+ mt7615_radio_read);
+ debugfs_create_u32("dfs_hw_pattern", 0400, dir, &dev->hw_pattern);
+ /* test pattern knobs */
+ debugfs_create_u8("pattern_len", 0600, dir,
+ &dev->radar_pattern.n_pulses);
+ debugfs_create_u32("pulse_period", 0600, dir,
+ &dev->radar_pattern.period);
+ debugfs_create_u16("pulse_width", 0600, dir,
+ &dev->radar_pattern.width);
+ debugfs_create_u16("pulse_power", 0600, dir,
+ &dev->radar_pattern.power);
+ debugfs_create_file("radar_trigger", 0200, dir, dev,
+ &fops_radar_pattern);
+ debugfs_create_file("reset_test", 0200, dir, dev,
+ &fops_reset_test);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "temperature", dir,
+ mt7615_read_temperature);
+
+ debugfs_create_u32("rf_wfidx", 0600, dir, &dev->debugfs_rf_wf);
+ debugfs_create_u32("rf_regidx", 0600, dir, &dev->debugfs_rf_reg);
+ debugfs_create_file_unsafe("rf_regval", 0600, dir, dev,
+ &fops_rf_reg);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt7615_init_debugfs);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
new file mode 100644
index 000000000..637ef0882
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ * Roy Luo <royluo@google.com>
+ * Lorenzo Bianconi <lorenzo@kernel.org>
+ * Felix Fietkau <nbd@nbd.name>
+ */
+
+#include "mt7615.h"
+#include "../dma.h"
+#include "mac.h"
+
+static int
+mt7615_init_tx_queue(struct mt7615_dev *dev, int qid, int idx, int n_desc)
+{
+ struct mt76_queue *hwq;
+ int err;
+
+ hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
+ if (!hwq)
+ return -ENOMEM;
+
+ err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
+ if (err < 0)
+ return err;
+
+ dev->mt76.q_tx[qid] = hwq;
+
+ return 0;
+}
+
+static int
+mt7622_init_tx_queues_multi(struct mt7615_dev *dev)
+{
+ static const u8 wmm_queue_map[] = {
+ [IEEE80211_AC_BK] = MT7622_TXQ_AC0,
+ [IEEE80211_AC_BE] = MT7622_TXQ_AC1,
+ [IEEE80211_AC_VI] = MT7622_TXQ_AC2,
+ [IEEE80211_AC_VO] = MT7622_TXQ_AC3,
+ };
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
+ ret = mt7615_init_tx_queue(dev, i, wmm_queue_map[i],
+ MT7615_TX_RING_SIZE / 2);
+ if (ret)
+ return ret;
+ }
+
+ ret = mt7615_init_tx_queue(dev, MT_TXQ_PSD,
+ MT7622_TXQ_MGMT, MT7615_TX_MGMT_RING_SIZE);
+ if (ret)
+ return ret;
+
+ ret = mt7615_init_tx_queue(dev, MT_TXQ_MCU,
+ MT7622_TXQ_MCU, MT7615_TX_MCU_RING_SIZE);
+ return ret;
+}
+
+static int
+mt7615_init_tx_queues(struct mt7615_dev *dev)
+{
+ int ret, i;
+
+ ret = mt7615_init_tx_queue(dev, MT_TXQ_FWDL,
+ MT7615_TXQ_FWDL,
+ MT7615_TX_FWDL_RING_SIZE);
+ if (ret)
+ return ret;
+
+ if (!is_mt7615(&dev->mt76))
+ return mt7622_init_tx_queues_multi(dev);
+
+ ret = mt7615_init_tx_queue(dev, 0, 0, MT7615_TX_RING_SIZE);
+ if (ret)
+ return ret;
+
+ for (i = 1; i < MT_TXQ_MCU; i++)
+ dev->mt76.q_tx[i] = dev->mt76.q_tx[0];
+
+ ret = mt7615_init_tx_queue(dev, MT_TXQ_MCU, MT7615_TXQ_MCU,
+ MT7615_TX_MCU_RING_SIZE);
+ return 0;
+}
+
+static int mt7615_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct mt7615_dev *dev;
+
+ dev = container_of(napi, struct mt7615_dev, mt76.tx_napi);
+
+ mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false);
+
+ if (napi_complete_done(napi, 0))
+ mt7615_irq_enable(dev, mt7615_tx_mcu_int_mask(dev));
+
+ return 0;
+}
+
+int mt7615_wait_pdma_busy(struct mt7615_dev *dev)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+
+ if (!is_mt7663(mdev)) {
+ u32 mask = MT_PDMA_TX_BUSY | MT_PDMA_RX_BUSY;
+ u32 reg = mt7615_reg_map(dev, MT_PDMA_BUSY);
+
+ if (!mt76_poll_msec(dev, reg, mask, 0, 1000)) {
+ dev_err(mdev->dev, "PDMA engine busy\n");
+ return -EIO;
+ }
+
+ return 0;
+ }
+
+ if (!mt76_poll_msec(dev, MT_PDMA_BUSY_STATUS,
+ MT_PDMA_TX_IDX_BUSY, 0, 1000)) {
+ dev_err(mdev->dev, "PDMA engine tx busy\n");
+ return -EIO;
+ }
+
+ if (!mt76_poll_msec(dev, MT_PSE_PG_INFO,
+ MT_PSE_SRC_CNT, 0, 1000)) {
+ dev_err(mdev->dev, "PSE engine busy\n");
+ return -EIO;
+ }
+
+ if (!mt76_poll_msec(dev, MT_PDMA_BUSY_STATUS,
+ MT_PDMA_BUSY_IDX, 0, 1000)) {
+ dev_err(mdev->dev, "PDMA engine busy\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void mt7622_dma_sched_init(struct mt7615_dev *dev)
+{
+ u32 reg = mt7615_reg_map(dev, MT_DMASHDL_BASE);
+ int i;
+
+ mt76_rmw(dev, reg + MT_DMASHDL_PKT_MAX_SIZE,
+ MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE,
+ FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) |
+ FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 8));
+
+ for (i = 0; i <= 5; i++)
+ mt76_wr(dev, reg + MT_DMASHDL_GROUP_QUOTA(i),
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x10) |
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x800));
+
+ mt76_wr(dev, reg + MT_DMASHDL_Q_MAP(0), 0x42104210);
+ mt76_wr(dev, reg + MT_DMASHDL_Q_MAP(1), 0x42104210);
+ mt76_wr(dev, reg + MT_DMASHDL_Q_MAP(2), 0x5);
+ mt76_wr(dev, reg + MT_DMASHDL_Q_MAP(3), 0);
+
+ mt76_wr(dev, reg + MT_DMASHDL_SCHED_SET0, 0x6012345f);
+ mt76_wr(dev, reg + MT_DMASHDL_SCHED_SET1, 0xedcba987);
+}
+
+static void mt7663_dma_sched_init(struct mt7615_dev *dev)
+{
+ int i;
+
+ mt76_rmw(dev, MT_DMA_SHDL(MT_DMASHDL_PKT_MAX_SIZE),
+ MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE,
+ FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) |
+ FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 8));
+
+ /* enable refill control group 0, 1, 2, 4, 5 */
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_REFILL), 0xffc80000);
+ /* enable group 0, 1, 2, 4, 5, 15 */
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_OPTIONAL), 0x70068037);
+
+ /* each group min quota must larger then PLE_PKT_MAX_SIZE_NUM */
+ for (i = 0; i < 5; i++)
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(i)),
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x40) |
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x800));
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(5)),
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x40) |
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x40));
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(15)),
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x20) |
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x20));
+
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(0)), 0x42104210);
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(1)), 0x42104210);
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(2)), 0x00050005);
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(3)), 0);
+ /* ALTX0 and ALTX1 QID mapping to group 5 */
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET0), 0x6012345f);
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET1), 0xedcba987);
+}
+
+int mt7615_dma_init(struct mt7615_dev *dev)
+{
+ int rx_ring_size = MT7615_RX_RING_SIZE;
+ int rx_buf_size = MT_RX_BUF_SIZE;
+ int ret;
+
+ /* Increase buffer size to receive large VHT MPDUs */
+ if (dev->mphy.cap.has_5ghz)
+ rx_buf_size *= 2;
+
+ mt76_dma_attach(&dev->mt76);
+
+ mt76_wr(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE |
+ MT_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN |
+ MT_WPDMA_GLO_CFG_OMIT_TX_INFO);
+
+ mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT0, 0x1);
+
+ mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT21, 0x1);
+
+ mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 0x3);
+
+ mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_MULTI_DMA_EN, 0x3);
+
+ if (is_mt7615(&dev->mt76)) {
+ mt76_set(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY);
+
+ mt76_wr(dev, MT_WPDMA_GLO_CFG1, 0x1);
+ mt76_wr(dev, MT_WPDMA_TX_PRE_CFG, 0xf0000);
+ mt76_wr(dev, MT_WPDMA_RX_PRE_CFG, 0xf7f0000);
+ mt76_wr(dev, MT_WPDMA_ABT_CFG, 0x4000026);
+ mt76_wr(dev, MT_WPDMA_ABT_CFG1, 0x18811881);
+ mt76_set(dev, 0x7158, BIT(16));
+ mt76_clear(dev, 0x7000, BIT(23));
+ }
+
+ mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
+
+ ret = mt7615_init_tx_queues(dev);
+ if (ret)
+ return ret;
+
+ /* init rx queues */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
+ MT7615_RX_MCU_RING_SIZE, rx_buf_size,
+ MT_RX_RING_BASE);
+ if (ret)
+ return ret;
+
+ if (!is_mt7615(&dev->mt76))
+ rx_ring_size /= 2;
+
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0,
+ rx_ring_size, rx_buf_size, MT_RX_RING_BASE);
+ if (ret)
+ return ret;
+
+ mt76_wr(dev, MT_DELAY_INT_CFG, 0);
+
+ ret = mt76_init_queues(dev);
+ if (ret < 0)
+ return ret;
+
+ netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi,
+ mt7615_poll_tx, NAPI_POLL_WEIGHT);
+ napi_enable(&dev->mt76.tx_napi);
+
+ mt76_poll(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 1000);
+
+ /* start dma engine */
+ mt76_set(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_RX_DMA_EN);
+
+ /* enable interrupts for TX/RX rings */
+ mt7615_irq_enable(dev, MT_INT_RX_DONE_ALL | mt7615_tx_mcu_int_mask(dev) |
+ MT_INT_MCU_CMD);
+
+ if (is_mt7622(&dev->mt76))
+ mt7622_dma_sched_init(dev);
+
+ if (is_mt7663(&dev->mt76))
+ mt7663_dma_sched_init(dev);
+
+ return 0;
+}
+
+void mt7615_dma_cleanup(struct mt7615_dev *dev)
+{
+ mt76_clear(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_RX_DMA_EN);
+ mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_SW_RESET);
+
+ mt76_dma_cleanup(&dev->mt76);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
new file mode 100644
index 000000000..85f56487f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ * Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/of.h>
+#include "mt7615.h"
+#include "eeprom.h"
+
+static int mt7615_efuse_read(struct mt7615_dev *dev, u32 base,
+ u16 addr, u8 *data)
+{
+ u32 val;
+ int i;
+
+ val = mt76_rr(dev, base + MT_EFUSE_CTRL);
+ val &= ~(MT_EFUSE_CTRL_AIN | MT_EFUSE_CTRL_MODE);
+ val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf);
+ val |= MT_EFUSE_CTRL_KICK;
+ mt76_wr(dev, base + MT_EFUSE_CTRL, val);
+
+ if (!mt76_poll(dev, base + MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
+ return -ETIMEDOUT;
+
+ udelay(2);
+
+ val = mt76_rr(dev, base + MT_EFUSE_CTRL);
+ if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT ||
+ WARN_ON_ONCE(!(val & MT_EFUSE_CTRL_VALID))) {
+ memset(data, 0x0, 16);
+ return 0;
+ }
+
+ for (i = 0; i < 4; i++) {
+ val = mt76_rr(dev, base + MT_EFUSE_RDATA(i));
+ put_unaligned_le32(val, data + 4 * i);
+ }
+
+ return 0;
+}
+
+static int mt7615_efuse_init(struct mt7615_dev *dev, u32 base)
+{
+ int i, len = MT7615_EEPROM_SIZE;
+ void *buf;
+ u32 val;
+
+ val = mt76_rr(dev, base + MT_EFUSE_BASE_CTRL);
+ if (val & MT_EFUSE_BASE_CTRL_EMPTY)
+ return 0;
+
+ dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, len, GFP_KERNEL);
+ dev->mt76.otp.size = len;
+ if (!dev->mt76.otp.data)
+ return -ENOMEM;
+
+ buf = dev->mt76.otp.data;
+ for (i = 0; i + 16 <= len; i += 16) {
+ int ret;
+
+ ret = mt7615_efuse_read(dev, base, i, buf + i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mt7615_eeprom_load(struct mt7615_dev *dev, u32 addr)
+{
+ int ret;
+
+ ret = mt76_eeprom_init(&dev->mt76, MT7615_EEPROM_FULL_SIZE);
+ if (ret < 0)
+ return ret;
+
+ return mt7615_efuse_init(dev, addr);
+}
+
+static int mt7615_check_eeprom(struct mt76_dev *dev)
+{
+ u16 val = get_unaligned_le16(dev->eeprom.data);
+
+ switch (val) {
+ case 0x7615:
+ case 0x7622:
+ case 0x7663:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void
+mt7615_eeprom_parse_hw_band_cap(struct mt7615_dev *dev)
+{
+ u8 val, *eeprom = dev->mt76.eeprom.data;
+
+ if (is_mt7663(&dev->mt76)) {
+ /* dual band */
+ dev->mphy.cap.has_2ghz = true;
+ dev->mphy.cap.has_5ghz = true;
+ return;
+ }
+
+ if (is_mt7622(&dev->mt76)) {
+ /* 2GHz only */
+ dev->mphy.cap.has_2ghz = true;
+ return;
+ }
+
+ if (is_mt7611(&dev->mt76)) {
+ /* 5GHz only */
+ dev->mphy.cap.has_5ghz = true;
+ return;
+ }
+
+ val = FIELD_GET(MT_EE_NIC_WIFI_CONF_BAND_SEL,
+ eeprom[MT_EE_WIFI_CONF]);
+ switch (val) {
+ case MT_EE_5GHZ:
+ dev->mphy.cap.has_5ghz = true;
+ break;
+ case MT_EE_DBDC:
+ dev->dbdc_support = true;
+ fallthrough;
+ case MT_EE_2GHZ:
+ dev->mphy.cap.has_2ghz = true;
+ break;
+ default:
+ dev->mphy.cap.has_2ghz = true;
+ dev->mphy.cap.has_5ghz = true;
+ break;
+ }
+}
+
+static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
+{
+ u8 *eeprom = dev->mt76.eeprom.data;
+ u8 tx_mask, max_nss;
+
+ mt7615_eeprom_parse_hw_band_cap(dev);
+
+ if (is_mt7663(&dev->mt76)) {
+ max_nss = 2;
+ tx_mask = FIELD_GET(MT_EE_HW_CONF1_TX_MASK,
+ eeprom[MT7663_EE_HW_CONF1]);
+ } else {
+ u32 val;
+
+ /* read tx-rx mask from eeprom */
+ val = mt76_rr(dev, MT_TOP_STRAP_STA);
+ max_nss = val & MT_TOP_3NSS ? 3 : 4;
+
+ tx_mask = FIELD_GET(MT_EE_NIC_CONF_TX_MASK,
+ eeprom[MT_EE_NIC_CONF_0]);
+ }
+ if (!tx_mask || tx_mask > max_nss)
+ tx_mask = max_nss;
+
+ dev->chainmask = BIT(tx_mask) - 1;
+ dev->mphy.antenna_mask = dev->chainmask;
+ dev->phy.chainmask = dev->chainmask;
+}
+
+static int mt7663_eeprom_get_target_power_index(struct mt7615_dev *dev,
+ struct ieee80211_channel *chan,
+ u8 chain_idx)
+{
+ int index, group;
+
+ if (chain_idx > 1)
+ return -EINVAL;
+
+ if (chan->band == NL80211_BAND_2GHZ)
+ return MT7663_EE_TX0_2G_TARGET_POWER + (chain_idx << 4);
+
+ group = mt7615_get_channel_group(chan->hw_value);
+ if (chain_idx == 1)
+ index = MT7663_EE_TX1_5G_G0_TARGET_POWER;
+ else
+ index = MT7663_EE_TX0_5G_G0_TARGET_POWER;
+
+ return index + group * 3;
+}
+
+int mt7615_eeprom_get_target_power_index(struct mt7615_dev *dev,
+ struct ieee80211_channel *chan,
+ u8 chain_idx)
+{
+ int index;
+
+ if (is_mt7663(&dev->mt76))
+ return mt7663_eeprom_get_target_power_index(dev, chan,
+ chain_idx);
+
+ if (chain_idx > 3)
+ return -EINVAL;
+
+ /* TSSI disabled */
+ if (mt7615_ext_pa_enabled(dev, chan->band)) {
+ if (chan->band == NL80211_BAND_2GHZ)
+ return MT_EE_EXT_PA_2G_TARGET_POWER;
+ else
+ return MT_EE_EXT_PA_5G_TARGET_POWER;
+ }
+
+ /* TSSI enabled */
+ if (chan->band == NL80211_BAND_2GHZ) {
+ index = MT_EE_TX0_2G_TARGET_POWER + chain_idx * 6;
+ } else {
+ int group = mt7615_get_channel_group(chan->hw_value);
+
+ switch (chain_idx) {
+ case 1:
+ index = MT_EE_TX1_5G_G0_TARGET_POWER;
+ break;
+ case 2:
+ index = MT_EE_TX2_5G_G0_TARGET_POWER;
+ break;
+ case 3:
+ index = MT_EE_TX3_5G_G0_TARGET_POWER;
+ break;
+ case 0:
+ default:
+ index = MT_EE_TX0_5G_G0_TARGET_POWER;
+ break;
+ }
+ index += 5 * group;
+ }
+
+ return index;
+}
+
+int mt7615_eeprom_get_power_delta_index(struct mt7615_dev *dev,
+ enum nl80211_band band)
+{
+ /* assume the first rate has the highest power offset */
+ if (is_mt7663(&dev->mt76)) {
+ if (band == NL80211_BAND_2GHZ)
+ return MT_EE_TX0_5G_G0_TARGET_POWER;
+ else
+ return MT7663_EE_5G_RATE_POWER;
+ }
+
+ if (band == NL80211_BAND_2GHZ)
+ return MT_EE_2G_RATE_POWER;
+ else
+ return MT_EE_5G_RATE_POWER;
+}
+
+static void mt7615_apply_cal_free_data(struct mt7615_dev *dev)
+{
+ static const u16 ical[] = {
+ 0x53, 0x54, 0x55, 0x56, 0x57, 0x5c, 0x5d, 0x62, 0x63, 0x68,
+ 0x69, 0x6e, 0x6f, 0x73, 0x74, 0x78, 0x79, 0x82, 0x83, 0x87,
+ 0x88, 0x8c, 0x8d, 0x91, 0x92, 0x96, 0x97, 0x9b, 0x9c, 0xa0,
+ 0xa1, 0xaa, 0xab, 0xaf, 0xb0, 0xb4, 0xb5, 0xb9, 0xba, 0xf4,
+ 0xf7, 0xff,
+ 0x140, 0x141, 0x145, 0x146, 0x14a, 0x14b, 0x154, 0x155, 0x159,
+ 0x15a, 0x15e, 0x15f, 0x163, 0x164, 0x168, 0x169, 0x16d, 0x16e,
+ 0x172, 0x173, 0x17c, 0x17d, 0x181, 0x182, 0x186, 0x187, 0x18b,
+ 0x18c
+ };
+ static const u16 ical_nocheck[] = {
+ 0x110, 0x111, 0x112, 0x113, 0x114, 0x115, 0x116, 0x117, 0x118,
+ 0x1b5, 0x1b6, 0x1b7, 0x3ac, 0x3ad, 0x3ae, 0x3af, 0x3b0, 0x3b1,
+ 0x3b2
+ };
+ u8 *eeprom = dev->mt76.eeprom.data;
+ u8 *otp = dev->mt76.otp.data;
+ int i;
+
+ if (!otp)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(ical); i++)
+ if (!otp[ical[i]])
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(ical); i++)
+ eeprom[ical[i]] = otp[ical[i]];
+
+ for (i = 0; i < ARRAY_SIZE(ical_nocheck); i++)
+ eeprom[ical_nocheck[i]] = otp[ical_nocheck[i]];
+}
+
+static void mt7622_apply_cal_free_data(struct mt7615_dev *dev)
+{
+ static const u16 ical[] = {
+ 0x53, 0x54, 0x55, 0x56, 0xf4, 0xf7, 0x144, 0x156, 0x15b
+ };
+ u8 *eeprom = dev->mt76.eeprom.data;
+ u8 *otp = dev->mt76.otp.data;
+ int i;
+
+ if (!otp)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(ical); i++) {
+ if (!otp[ical[i]])
+ continue;
+
+ eeprom[ical[i]] = otp[ical[i]];
+ }
+}
+
+static void mt7615_cal_free_data(struct mt7615_dev *dev)
+{
+ struct device_node *np = dev->mt76.dev->of_node;
+
+ if (!np || !of_property_read_bool(np, "mediatek,eeprom-merge-otp"))
+ return;
+
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7622:
+ mt7622_apply_cal_free_data(dev);
+ break;
+ case 0x7615:
+ case 0x7611:
+ mt7615_apply_cal_free_data(dev);
+ break;
+ }
+}
+
+int mt7615_eeprom_init(struct mt7615_dev *dev, u32 addr)
+{
+ int ret;
+
+ ret = mt7615_eeprom_load(dev, addr);
+ if (ret < 0)
+ return ret;
+
+ ret = mt7615_check_eeprom(&dev->mt76);
+ if (ret && dev->mt76.otp.data) {
+ memcpy(dev->mt76.eeprom.data, dev->mt76.otp.data,
+ MT7615_EEPROM_SIZE);
+ } else {
+ dev->flash_eeprom = true;
+ mt7615_cal_free_data(dev);
+ }
+
+ mt7615_eeprom_parse_hw_cap(dev);
+ memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
+ ETH_ALEN);
+
+ mt76_eeprom_override(&dev->mt76);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt7615_eeprom_init);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
new file mode 100644
index 000000000..a024dee10
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2019 MediaTek Inc. */
+
+#ifndef __MT7615_EEPROM_H
+#define __MT7615_EEPROM_H
+
+#include "mt7615.h"
+
+
+#define MT7615_EEPROM_DCOC_OFFSET MT7615_EEPROM_SIZE
+#define MT7615_EEPROM_DCOC_SIZE 256
+#define MT7615_EEPROM_DCOC_COUNT 34
+
+#define MT7615_EEPROM_TXDPD_OFFSET (MT7615_EEPROM_SIZE + \
+ MT7615_EEPROM_DCOC_COUNT * \
+ MT7615_EEPROM_DCOC_SIZE)
+#define MT7615_EEPROM_TXDPD_SIZE 216
+#define MT7615_EEPROM_TXDPD_COUNT (44 + 3)
+
+#define MT7615_EEPROM_FULL_SIZE (MT7615_EEPROM_TXDPD_OFFSET + \
+ MT7615_EEPROM_TXDPD_COUNT * \
+ MT7615_EEPROM_TXDPD_SIZE)
+
+enum mt7615_eeprom_field {
+ MT_EE_CHIP_ID = 0x000,
+ MT_EE_VERSION = 0x002,
+ MT_EE_MAC_ADDR = 0x004,
+ MT_EE_NIC_CONF_0 = 0x034,
+ MT_EE_NIC_CONF_1 = 0x036,
+ MT_EE_WIFI_CONF = 0x03e,
+ MT_EE_CALDATA_FLASH = 0x052,
+ MT_EE_TX0_2G_TARGET_POWER = 0x058,
+ MT_EE_TX0_5G_G0_TARGET_POWER = 0x070,
+ MT7663_EE_5G_RATE_POWER = 0x089,
+ MT_EE_TX1_5G_G0_TARGET_POWER = 0x098,
+ MT_EE_2G_RATE_POWER = 0x0be,
+ MT_EE_5G_RATE_POWER = 0x0d5,
+ MT7663_EE_TX0_2G_TARGET_POWER = 0x0e3,
+ MT_EE_EXT_PA_2G_TARGET_POWER = 0x0f2,
+ MT_EE_EXT_PA_5G_TARGET_POWER = 0x0f3,
+ MT_EE_TX2_5G_G0_TARGET_POWER = 0x142,
+ MT_EE_TX3_5G_G0_TARGET_POWER = 0x16a,
+ MT7663_EE_HW_CONF1 = 0x1b0,
+ MT7663_EE_TX0_5G_G0_TARGET_POWER = 0x245,
+ MT7663_EE_TX1_5G_G0_TARGET_POWER = 0x2b5,
+
+ MT7615_EE_MAX = 0x3bf,
+ MT7622_EE_MAX = 0x3db,
+ MT7663_EE_MAX = 0x400,
+};
+
+#define MT_EE_RATE_POWER_MASK GENMASK(5, 0)
+#define MT_EE_RATE_POWER_SIGN BIT(6)
+#define MT_EE_RATE_POWER_EN BIT(7)
+
+#define MT_EE_CALDATA_FLASH_TX_DPD BIT(0)
+#define MT_EE_CALDATA_FLASH_RX_CAL BIT(1)
+
+#define MT_EE_NIC_CONF_TX_MASK GENMASK(7, 4)
+#define MT_EE_NIC_CONF_RX_MASK GENMASK(3, 0)
+
+#define MT_EE_HW_CONF1_TX_MASK GENMASK(2, 0)
+
+#define MT_EE_NIC_CONF_TSSI_2G BIT(5)
+#define MT_EE_NIC_CONF_TSSI_5G BIT(6)
+
+#define MT_EE_NIC_WIFI_CONF_BAND_SEL GENMASK(5, 4)
+enum mt7615_eeprom_band {
+ MT_EE_DUAL_BAND,
+ MT_EE_5GHZ,
+ MT_EE_2GHZ,
+ MT_EE_DBDC,
+};
+
+enum mt7615_channel_group {
+ MT_CH_5G_JAPAN,
+ MT_CH_5G_UNII_1,
+ MT_CH_5G_UNII_2A,
+ MT_CH_5G_UNII_2B,
+ MT_CH_5G_UNII_2E_1,
+ MT_CH_5G_UNII_2E_2,
+ MT_CH_5G_UNII_2E_3,
+ MT_CH_5G_UNII_3,
+ __MT_CH_MAX
+};
+
+static inline enum mt7615_channel_group
+mt7615_get_channel_group(int channel)
+{
+ if (channel >= 184 && channel <= 196)
+ return MT_CH_5G_JAPAN;
+ if (channel <= 48)
+ return MT_CH_5G_UNII_1;
+ if (channel <= 64)
+ return MT_CH_5G_UNII_2A;
+ if (channel <= 114)
+ return MT_CH_5G_UNII_2E_1;
+ if (channel <= 144)
+ return MT_CH_5G_UNII_2E_2;
+ if (channel <= 161)
+ return MT_CH_5G_UNII_2E_3;
+ return MT_CH_5G_UNII_3;
+}
+
+static inline bool
+mt7615_ext_pa_enabled(struct mt7615_dev *dev, enum nl80211_band band)
+{
+ u8 *eep = dev->mt76.eeprom.data;
+
+ if (band == NL80211_BAND_5GHZ)
+ return !(eep[MT_EE_NIC_CONF_1 + 1] & MT_EE_NIC_CONF_TSSI_5G);
+ else
+ return !(eep[MT_EE_NIC_CONF_1 + 1] & MT_EE_NIC_CONF_TSSI_2G);
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
new file mode 100644
index 000000000..e194259c8
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -0,0 +1,493 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Roy Luo <royluo@google.com>
+ * Ryder Lee <ryder.lee@mediatek.com>
+ * Felix Fietkau <nbd@nbd.name>
+ * Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/etherdevice.h>
+#include "mt7615.h"
+#include "mac.h"
+#include "eeprom.h"
+
+void mt7615_phy_init(struct mt7615_dev *dev)
+{
+ /* disable rf low power beacon mode */
+ mt76_set(dev, MT_WF_PHY_WF2_RFCTRL0(0), MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN);
+ mt76_set(dev, MT_WF_PHY_WF2_RFCTRL0(1), MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN);
+}
+EXPORT_SYMBOL_GPL(mt7615_phy_init);
+
+static void
+mt7615_init_mac_chain(struct mt7615_dev *dev, int chain)
+{
+ u32 val;
+
+ if (!chain)
+ val = MT_CFG_CCR_MAC_D0_1X_GC_EN | MT_CFG_CCR_MAC_D0_2X_GC_EN;
+ else
+ val = MT_CFG_CCR_MAC_D1_1X_GC_EN | MT_CFG_CCR_MAC_D1_2X_GC_EN;
+
+ /* enable band 0/1 clk */
+ mt76_set(dev, MT_CFG_CCR, val);
+
+ mt76_rmw(dev, MT_TMAC_TRCR(chain),
+ MT_TMAC_TRCR_CCA_SEL | MT_TMAC_TRCR_SEC_CCA_SEL,
+ FIELD_PREP(MT_TMAC_TRCR_CCA_SEL, 2) |
+ FIELD_PREP(MT_TMAC_TRCR_SEC_CCA_SEL, 0));
+
+ mt76_wr(dev, MT_AGG_ACR(chain),
+ MT_AGG_ACR_PKT_TIME_EN | MT_AGG_ACR_NO_BA_AR_RULE |
+ FIELD_PREP(MT_AGG_ACR_CFEND_RATE, MT7615_CFEND_RATE_DEFAULT) |
+ FIELD_PREP(MT_AGG_ACR_BAR_RATE, MT7615_BAR_RATE_DEFAULT));
+
+ mt76_wr(dev, MT_AGG_ARUCR(chain),
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 7) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(1), 2) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), 2) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), 2) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(5), 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), 1));
+
+ mt76_wr(dev, MT_AGG_ARDCR(chain),
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), MT7615_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(1), MT7615_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), MT7615_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), MT7615_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), MT7615_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(5), MT7615_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), MT7615_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), MT7615_RATE_RETRY - 1));
+
+ mt76_clear(dev, MT_DMA_RCFR0(chain), MT_DMA_RCFR0_MCU_RX_TDLS);
+ if (!mt7615_firmware_offload(dev)) {
+ u32 mask, set;
+
+ mask = MT_DMA_RCFR0_MCU_RX_MGMT |
+ MT_DMA_RCFR0_MCU_RX_CTL_NON_BAR |
+ MT_DMA_RCFR0_MCU_RX_CTL_BAR |
+ MT_DMA_RCFR0_MCU_RX_BYPASS |
+ MT_DMA_RCFR0_RX_DROPPED_UCAST |
+ MT_DMA_RCFR0_RX_DROPPED_MCAST;
+ set = FIELD_PREP(MT_DMA_RCFR0_RX_DROPPED_UCAST, 2) |
+ FIELD_PREP(MT_DMA_RCFR0_RX_DROPPED_MCAST, 2);
+ mt76_rmw(dev, MT_DMA_RCFR0(chain), mask, set);
+ }
+}
+
+void mt7615_mac_init(struct mt7615_dev *dev)
+{
+ int i;
+
+ mt7615_init_mac_chain(dev, 0);
+
+ mt76_rmw_field(dev, MT_TMAC_CTCR0,
+ MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f);
+ mt76_rmw_field(dev, MT_TMAC_CTCR0,
+ MT_TMAC_CTCR0_INS_DDLMT_DENSITY, 0x3);
+ mt76_rmw(dev, MT_TMAC_CTCR0,
+ MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
+ MT_TMAC_CTCR0_INS_DDLMT_EN,
+ MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
+ MT_TMAC_CTCR0_INS_DDLMT_EN);
+
+ mt7615_mcu_set_rts_thresh(&dev->phy, 0x92b);
+ mt7615_mac_set_scs(&dev->phy, true);
+
+ mt76_rmw(dev, MT_AGG_SCR, MT_AGG_SCR_NLNAV_MID_PTEC_DIS,
+ MT_AGG_SCR_NLNAV_MID_PTEC_DIS);
+
+ mt76_wr(dev, MT_AGG_ARCR,
+ FIELD_PREP(MT_AGG_ARCR_RTS_RATE_THR, 2) |
+ MT_AGG_ARCR_RATE_DOWN_RATIO_EN |
+ FIELD_PREP(MT_AGG_ARCR_RATE_DOWN_RATIO, 1) |
+ FIELD_PREP(MT_AGG_ARCR_RATE_UP_EXTRA_TH, 4));
+
+ for (i = 0; i < MT7615_WTBL_SIZE; i++)
+ mt7615_mac_wtbl_update(dev, i,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
+ mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_EN);
+ mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_EN);
+
+ /* disable hdr translation and hw AMSDU */
+ mt76_wr(dev, MT_DMA_DCR0,
+ FIELD_PREP(MT_DMA_DCR0_MAX_RX_LEN, 3072) |
+ MT_DMA_DCR0_RX_VEC_DROP);
+ /* disable TDLS filtering */
+ mt76_clear(dev, MT_WF_PFCR, MT_WF_PFCR_TDLS_EN);
+ mt76_set(dev, MT_WF_MIB_SCR0, MT_MIB_SCR0_AGG_CNT_RANGE_EN);
+ if (is_mt7663(&dev->mt76)) {
+ mt76_wr(dev, MT_WF_AGG(0x160), 0x5c341c02);
+ mt76_wr(dev, MT_WF_AGG(0x164), 0x70708040);
+ } else {
+ mt7615_init_mac_chain(dev, 1);
+ }
+}
+EXPORT_SYMBOL_GPL(mt7615_mac_init);
+
+void mt7615_check_offload_capability(struct mt7615_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct wiphy *wiphy = hw->wiphy;
+
+ if (mt7615_firmware_offload(dev)) {
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+
+ wiphy->max_remain_on_channel_duration = 5000;
+ wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+ NL80211_FEATURE_P2P_GO_CTWIN |
+ NL80211_FEATURE_P2P_GO_OPPPS;
+ } else {
+ dev->ops->hw_scan = NULL;
+ dev->ops->cancel_hw_scan = NULL;
+ dev->ops->sched_scan_start = NULL;
+ dev->ops->sched_scan_stop = NULL;
+ dev->ops->set_rekey_data = NULL;
+ dev->ops->remain_on_channel = NULL;
+ dev->ops->cancel_remain_on_channel = NULL;
+
+ wiphy->max_sched_scan_plan_interval = 0;
+ wiphy->max_sched_scan_ie_len = 0;
+ wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+ wiphy->max_sched_scan_ssids = 0;
+ wiphy->max_match_sets = 0;
+ wiphy->max_sched_scan_reqs = 0;
+ }
+}
+EXPORT_SYMBOL_GPL(mt7615_check_offload_capability);
+
+bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev)
+{
+ flush_work(&dev->mcu_work);
+
+ return test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+}
+EXPORT_SYMBOL_GPL(mt7615_wait_for_mcu_init);
+
+#define CCK_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
+ .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \
+ .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + (_idx)), \
+}
+
+#define OFDM_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
+ .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
+}
+
+struct ieee80211_rate mt7615_rates[] = {
+ CCK_RATE(0, 10),
+ CCK_RATE(1, 20),
+ CCK_RATE(2, 55),
+ CCK_RATE(3, 110),
+ OFDM_RATE(11, 60),
+ OFDM_RATE(15, 90),
+ OFDM_RATE(10, 120),
+ OFDM_RATE(14, 180),
+ OFDM_RATE(9, 240),
+ OFDM_RATE(13, 360),
+ OFDM_RATE(8, 480),
+ OFDM_RATE(12, 540),
+};
+EXPORT_SYMBOL_GPL(mt7615_rates);
+
+static const struct ieee80211_iface_limit if_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_ADHOC)
+ }, {
+ .max = MT7615_MAX_INTERFACES,
+ .types = BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_STATION)
+ }
+};
+
+static const struct ieee80211_iface_combination if_comb_radar[] = {
+ {
+ .limits = if_limits,
+ .n_limits = ARRAY_SIZE(if_limits),
+ .max_interfaces = 4,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160) |
+ BIT(NL80211_CHAN_WIDTH_80P80),
+ }
+};
+
+static const struct ieee80211_iface_combination if_comb[] = {
+ {
+ .limits = if_limits,
+ .n_limits = ARRAY_SIZE(if_limits),
+ .max_interfaces = 4,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ }
+};
+
+void mt7615_init_txpower(struct mt7615_dev *dev,
+ struct ieee80211_supported_band *sband)
+{
+ int i, n_chains = hweight8(dev->mphy.antenna_mask), target_chains;
+ int delta_idx, delta = mt76_tx_power_nss_delta(n_chains);
+ u8 *eep = (u8 *)dev->mt76.eeprom.data;
+ enum nl80211_band band = sband->band;
+ u8 rate_val;
+
+ delta_idx = mt7615_eeprom_get_power_delta_index(dev, band);
+ rate_val = eep[delta_idx];
+ if ((rate_val & ~MT_EE_RATE_POWER_MASK) ==
+ (MT_EE_RATE_POWER_EN | MT_EE_RATE_POWER_SIGN))
+ delta += rate_val & MT_EE_RATE_POWER_MASK;
+
+ if (!is_mt7663(&dev->mt76) && mt7615_ext_pa_enabled(dev, band))
+ target_chains = 1;
+ else
+ target_chains = n_chains;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ struct ieee80211_channel *chan = &sband->channels[i];
+ u8 target_power = 0;
+ int j;
+
+ for (j = 0; j < target_chains; j++) {
+ int index;
+
+ index = mt7615_eeprom_get_target_power_index(dev, chan, j);
+ if (index < 0)
+ continue;
+
+ target_power = max(target_power, eep[index]);
+ }
+
+ target_power = DIV_ROUND_UP(target_power + delta, 2);
+ chan->max_power = min_t(int, chan->max_reg_power,
+ target_power);
+ chan->orig_mpwr = target_power;
+ }
+}
+EXPORT_SYMBOL_GPL(mt7615_init_txpower);
+
+static void
+mt7615_regd_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt76_phy *mphy = hw->priv;
+ struct mt7615_phy *phy = mphy->priv;
+ struct cfg80211_chan_def *chandef = &mphy->chandef;
+
+ dev->mt76.region = request->dfs_region;
+
+ if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
+ return;
+
+ mt7615_mutex_acquire(dev);
+ mt7615_dfs_init_radar_detector(phy);
+ mt7615_mutex_release(dev);
+}
+
+static void
+mt7615_init_wiphy(struct ieee80211_hw *hw)
+{
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ struct wiphy *wiphy = hw->wiphy;
+
+ hw->queues = 4;
+ hw->max_rates = 3;
+ hw->max_report_rates = 7;
+ hw->max_rate_tries = 11;
+
+ phy->slottime = 9;
+
+ hw->sta_data_size = sizeof(struct mt7615_sta);
+ hw->vif_data_size = sizeof(struct mt7615_vif);
+
+ if (is_mt7663(&phy->dev->mt76)) {
+ wiphy->iface_combinations = if_comb;
+ wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+ } else {
+ wiphy->iface_combinations = if_comb_radar;
+ wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_radar);
+ }
+ wiphy->reg_notifier = mt7615_regd_notifier;
+
+ wiphy->max_sched_scan_plan_interval = MT7615_MAX_SCHED_SCAN_INTERVAL;
+ wiphy->max_sched_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+ wiphy->max_scan_ie_len = MT7615_SCAN_IE_LEN;
+ wiphy->max_sched_scan_ssids = MT7615_MAX_SCHED_SCAN_SSID;
+ wiphy->max_match_sets = MT7615_MAX_SCAN_MATCH;
+ wiphy->max_sched_scan_reqs = 1;
+ wiphy->max_scan_ssids = 4;
+
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+
+ ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
+ ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
+ ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+
+ if (is_mt7615(&phy->dev->mt76))
+ hw->max_tx_fragments = MT_TXP_MAX_BUF_NUM;
+ else
+ hw->max_tx_fragments = MT_HW_TXP_MAX_BUF_NUM;
+}
+
+static void
+mt7615_cap_dbdc_enable(struct mt7615_dev *dev)
+{
+ dev->mphy.sband_5g.sband.vht_cap.cap &=
+ ~(IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ);
+ if (dev->chainmask == 0xf)
+ dev->mphy.antenna_mask = dev->chainmask >> 2;
+ else
+ dev->mphy.antenna_mask = dev->chainmask >> 1;
+ dev->phy.chainmask = dev->mphy.antenna_mask;
+ dev->mphy.hw->wiphy->available_antennas_rx = dev->phy.chainmask;
+ dev->mphy.hw->wiphy->available_antennas_tx = dev->phy.chainmask;
+ mt76_set_stream_caps(&dev->mphy, true);
+}
+
+static void
+mt7615_cap_dbdc_disable(struct mt7615_dev *dev)
+{
+ dev->mphy.sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
+ dev->mphy.antenna_mask = dev->chainmask;
+ dev->phy.chainmask = dev->chainmask;
+ dev->mphy.hw->wiphy->available_antennas_rx = dev->chainmask;
+ dev->mphy.hw->wiphy->available_antennas_tx = dev->chainmask;
+ mt76_set_stream_caps(&dev->mphy, true);
+}
+
+int mt7615_register_ext_phy(struct mt7615_dev *dev)
+{
+ struct mt7615_phy *phy = mt7615_ext_phy(dev);
+ struct mt76_phy *mphy;
+ int ret;
+
+ if (!is_mt7615(&dev->mt76))
+ return -EOPNOTSUPP;
+
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
+ return -EINVAL;
+
+ if (phy)
+ return 0;
+
+ mt7615_cap_dbdc_enable(dev);
+ mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7615_ops);
+ if (!mphy)
+ return -ENOMEM;
+
+ phy = mphy->priv;
+ phy->dev = dev;
+ phy->mt76 = mphy;
+ phy->chainmask = dev->chainmask & ~dev->phy.chainmask;
+ mphy->antenna_mask = BIT(hweight8(phy->chainmask)) - 1;
+ mt7615_init_wiphy(mphy->hw);
+
+ INIT_DELAYED_WORK(&phy->mac_work, mt7615_mac_work);
+ INIT_DELAYED_WORK(&phy->scan_work, mt7615_scan_work);
+ skb_queue_head_init(&phy->scan_event_list);
+
+ INIT_WORK(&phy->roc_work, mt7615_roc_work);
+ timer_setup(&phy->roc_timer, mt7615_roc_timer, 0);
+ init_waitqueue_head(&phy->roc_wait);
+
+ mt7615_mac_set_scs(phy, true);
+
+ /*
+ * Make the secondary PHY MAC address local without overlapping with
+ * the usual MAC address allocation scheme on multiple virtual interfaces
+ */
+ mphy->hw->wiphy->perm_addr[0] |= 2;
+ mphy->hw->wiphy->perm_addr[0] ^= BIT(7);
+
+ /* second phy can only handle 5 GHz */
+ mphy->sband_2g.sband.n_channels = 0;
+ mphy->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
+
+ ret = mt76_register_phy(mphy);
+ if (ret)
+ ieee80211_free_hw(mphy->hw);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt7615_register_ext_phy);
+
+void mt7615_unregister_ext_phy(struct mt7615_dev *dev)
+{
+ struct mt7615_phy *phy = mt7615_ext_phy(dev);
+ struct mt76_phy *mphy = dev->mt76.phy2;
+
+ if (!phy)
+ return;
+
+ mt7615_cap_dbdc_disable(dev);
+ mt76_unregister_phy(mphy);
+ ieee80211_free_hw(mphy->hw);
+}
+EXPORT_SYMBOL_GPL(mt7615_unregister_ext_phy);
+
+void mt7615_init_device(struct mt7615_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+
+ dev->phy.dev = dev;
+ dev->phy.mt76 = &dev->mt76.phy;
+ dev->mt76.phy.priv = &dev->phy;
+
+ INIT_DELAYED_WORK(&dev->pm.ps_work, mt7615_pm_power_save_work);
+ INIT_WORK(&dev->pm.wake_work, mt7615_pm_wake_work);
+ init_completion(&dev->pm.wake_cmpl);
+ spin_lock_init(&dev->pm.txq_lock);
+ set_bit(MT76_STATE_PM, &dev->mphy.state);
+ INIT_DELAYED_WORK(&dev->phy.mac_work, mt7615_mac_work);
+ INIT_DELAYED_WORK(&dev->phy.scan_work, mt7615_scan_work);
+ skb_queue_head_init(&dev->phy.scan_event_list);
+ INIT_LIST_HEAD(&dev->sta_poll_list);
+ spin_lock_init(&dev->sta_poll_lock);
+ init_waitqueue_head(&dev->reset_wait);
+ init_waitqueue_head(&dev->phy.roc_wait);
+
+ INIT_WORK(&dev->reset_work, mt7615_mac_reset_work);
+ INIT_WORK(&dev->phy.roc_work, mt7615_roc_work);
+ timer_setup(&dev->phy.roc_timer, mt7615_roc_timer, 0);
+
+ mt7615_init_wiphy(hw);
+ dev->pm.idle_timeout = MT7615_PM_TIMEOUT;
+ dev->mphy.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ dev->mphy.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ dev->mphy.sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+ mt7615_cap_dbdc_disable(dev);
+ dev->phy.dfs_state = -1;
+
+#ifdef CONFIG_NL80211_TESTMODE
+ dev->mt76.test_ops = &mt7615_testmode_ops;
+#endif
+}
+EXPORT_SYMBOL_GPL(mt7615_init_device);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
new file mode 100644
index 000000000..4364f73b5
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -0,0 +1,2336 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ * Roy Luo <royluo@google.com>
+ * Felix Fietkau <nbd@nbd.name>
+ * Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/timekeeping.h>
+#include "mt7615.h"
+#include "../trace.h"
+#include "../dma.h"
+#include "mt7615_trace.h"
+#include "mac.h"
+
+#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2)
+
+static const struct mt7615_dfs_radar_spec etsi_radar_specs = {
+ .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 },
+ .radar_pattern = {
+ [5] = { 1, 0, 6, 32, 28, 0, 17, 990, 5010, 1, 1 },
+ [6] = { 1, 0, 9, 32, 28, 0, 27, 615, 5010, 1, 1 },
+ [7] = { 1, 0, 15, 32, 28, 0, 27, 240, 445, 1, 1 },
+ [8] = { 1, 0, 12, 32, 28, 0, 42, 240, 510, 1, 1 },
+ [9] = { 1, 1, 0, 0, 0, 0, 14, 2490, 3343, 0, 0, 12, 32, 28 },
+ [10] = { 1, 1, 0, 0, 0, 0, 14, 2490, 3343, 0, 0, 15, 32, 24 },
+ [11] = { 1, 1, 0, 0, 0, 0, 14, 823, 2510, 0, 0, 18, 32, 28 },
+ [12] = { 1, 1, 0, 0, 0, 0, 14, 823, 2510, 0, 0, 27, 32, 24 },
+ },
+};
+
+static const struct mt7615_dfs_radar_spec fcc_radar_specs = {
+ .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 },
+ .radar_pattern = {
+ [0] = { 1, 0, 9, 32, 28, 0, 13, 508, 3076, 1, 1 },
+ [1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 },
+ [2] = { 1, 0, 8, 32, 28, 0, 22, 190, 510, 1, 1 },
+ [3] = { 1, 0, 6, 32, 28, 0, 32, 190, 510, 1, 1 },
+ [4] = { 1, 0, 9, 255, 28, 0, 13, 323, 343, 1, 32 },
+ },
+};
+
+static const struct mt7615_dfs_radar_spec jp_radar_specs = {
+ .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 },
+ .radar_pattern = {
+ [0] = { 1, 0, 8, 32, 28, 0, 13, 508, 3076, 1, 1 },
+ [1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 },
+ [2] = { 1, 0, 8, 32, 28, 0, 22, 190, 510, 1, 1 },
+ [3] = { 1, 0, 6, 32, 28, 0, 32, 190, 510, 1, 1 },
+ [4] = { 1, 0, 9, 32, 28, 0, 13, 323, 343, 1, 32 },
+ [13] = { 1, 0, 8, 32, 28, 0, 14, 3836, 3856, 1, 1 },
+ [14] = { 1, 0, 8, 32, 28, 0, 14, 3990, 4010, 1, 1 },
+ },
+};
+
+static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
+ u8 idx, bool unicast)
+{
+ struct mt7615_sta *sta;
+ struct mt76_wcid *wcid;
+
+ if (idx >= MT7615_WTBL_SIZE)
+ return NULL;
+
+ wcid = rcu_dereference(dev->mt76.wcid[idx]);
+ if (unicast || !wcid)
+ return wcid;
+
+ if (!wcid->sta)
+ return NULL;
+
+ sta = container_of(wcid, struct mt7615_sta, wcid);
+ if (!sta->vif)
+ return NULL;
+
+ return &sta->vif->sta.wcid;
+}
+
+void mt7615_mac_reset_counters(struct mt7615_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ mt76_rr(dev, MT_TX_AGG_CNT(0, i));
+ mt76_rr(dev, MT_TX_AGG_CNT(1, i));
+ }
+
+ memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
+ dev->mt76.phy.survey_time = ktime_get_boottime();
+ if (dev->mt76.phy2)
+ dev->mt76.phy2->survey_time = ktime_get_boottime();
+
+ /* reset airtime counters */
+ mt76_rr(dev, MT_MIB_SDR9(0));
+ mt76_rr(dev, MT_MIB_SDR9(1));
+
+ mt76_rr(dev, MT_MIB_SDR36(0));
+ mt76_rr(dev, MT_MIB_SDR36(1));
+
+ mt76_rr(dev, MT_MIB_SDR37(0));
+ mt76_rr(dev, MT_MIB_SDR37(1));
+
+ mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
+ mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
+}
+
+void mt7615_mac_set_timing(struct mt7615_phy *phy)
+{
+ s16 coverage_class = phy->coverage_class;
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ u32 val, reg_offset;
+ u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
+ u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
+ int sifs, offset;
+ bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ;
+
+ if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
+ return;
+
+ if (is_5ghz)
+ sifs = 16;
+ else
+ sifs = 10;
+
+ if (ext_phy) {
+ coverage_class = max_t(s16, dev->phy.coverage_class,
+ coverage_class);
+ mt76_set(dev, MT_ARB_SCR,
+ MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE);
+ } else {
+ struct mt7615_phy *phy_ext = mt7615_ext_phy(dev);
+
+ if (phy_ext)
+ coverage_class = max_t(s16, phy_ext->coverage_class,
+ coverage_class);
+ mt76_set(dev, MT_ARB_SCR,
+ MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE);
+ }
+ udelay(1);
+
+ offset = 3 * coverage_class;
+ reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
+ mt76_wr(dev, MT_TMAC_CDTR, cck + reg_offset);
+ mt76_wr(dev, MT_TMAC_ODTR, ofdm + reg_offset);
+
+ mt76_wr(dev, MT_TMAC_ICR(ext_phy),
+ FIELD_PREP(MT_IFS_EIFS, 360) |
+ FIELD_PREP(MT_IFS_RIFS, 2) |
+ FIELD_PREP(MT_IFS_SIFS, sifs) |
+ FIELD_PREP(MT_IFS_SLOT, phy->slottime));
+
+ if (phy->slottime < 20 || is_5ghz)
+ val = MT7615_CFEND_RATE_DEFAULT;
+ else
+ val = MT7615_CFEND_RATE_11B;
+
+ mt76_rmw_field(dev, MT_AGG_ACR(ext_phy), MT_AGG_ACR_CFEND_RATE, val);
+ if (ext_phy)
+ mt76_clear(dev, MT_ARB_SCR,
+ MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE);
+ else
+ mt76_clear(dev, MT_ARB_SCR,
+ MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE);
+
+}
+
+static void
+mt7615_get_status_freq_info(struct mt7615_dev *dev, struct mt76_phy *mphy,
+ struct mt76_rx_status *status, u8 chfreq)
+{
+ if (!test_bit(MT76_HW_SCANNING, &mphy->state) &&
+ !test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) &&
+ !test_bit(MT76_STATE_ROC, &mphy->state)) {
+ status->freq = mphy->chandef.chan->center_freq;
+ status->band = mphy->chandef.chan->band;
+ return;
+ }
+
+ status->band = chfreq <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+ status->freq = ieee80211_channel_to_frequency(chfreq, status->band);
+}
+
+static void mt7615_mac_fill_tm_rx(struct mt7615_dev *dev, __le32 *rxv)
+{
+#ifdef CONFIG_NL80211_TESTMODE
+ u32 rxv1 = le32_to_cpu(rxv[0]);
+ u32 rxv3 = le32_to_cpu(rxv[2]);
+ u32 rxv4 = le32_to_cpu(rxv[3]);
+ u32 rxv5 = le32_to_cpu(rxv[4]);
+ u8 cbw = FIELD_GET(MT_RXV1_FRAME_MODE, rxv1);
+ u8 mode = FIELD_GET(MT_RXV1_TX_MODE, rxv1);
+ s16 foe = FIELD_GET(MT_RXV5_FOE, rxv5);
+ u32 foe_const = (BIT(cbw + 1) & 0xf) * 10000;
+
+ if (!mode) {
+ /* CCK */
+ foe &= ~BIT(11);
+ foe *= 1000;
+ foe >>= 11;
+ } else {
+ if (foe > 2048)
+ foe -= 4096;
+
+ foe = (foe * foe_const) >> 15;
+ }
+
+ dev->test.last_freq_offset = foe;
+ dev->test.last_rcpi[0] = FIELD_GET(MT_RXV4_RCPI0, rxv4);
+ dev->test.last_rcpi[1] = FIELD_GET(MT_RXV4_RCPI1, rxv4);
+ dev->test.last_rcpi[2] = FIELD_GET(MT_RXV4_RCPI2, rxv4);
+ dev->test.last_rcpi[3] = FIELD_GET(MT_RXV4_RCPI3, rxv4);
+ dev->test.last_ib_rssi = FIELD_GET(MT_RXV3_IB_RSSI, rxv3);
+ dev->test.last_wb_rssi = FIELD_GET(MT_RXV3_WB_RSSI, rxv3);
+#endif
+}
+
+static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7615_phy *phy = &dev->phy;
+ struct mt7615_phy *phy2 = dev->mt76.phy2 ? dev->mt76.phy2->priv : NULL;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_hdr *hdr;
+ __le32 *rxd = (__le32 *)skb->data;
+ u32 rxd0 = le32_to_cpu(rxd[0]);
+ u32 rxd1 = le32_to_cpu(rxd[1]);
+ u32 rxd2 = le32_to_cpu(rxd[2]);
+ __le32 rxd12 = rxd[12];
+ bool unicast, remove_pad, insert_ccmp_hdr = false;
+ int phy_idx;
+ int i, idx;
+ u8 chfreq;
+
+ memset(status, 0, sizeof(*status));
+
+ chfreq = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1);
+ if (!phy2)
+ phy_idx = 0;
+ else if (phy2->chfreq == phy->chfreq)
+ phy_idx = -1;
+ else if (phy->chfreq == chfreq)
+ phy_idx = 0;
+ else if (phy2->chfreq == chfreq)
+ phy_idx = 1;
+ else
+ phy_idx = -1;
+
+ unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M;
+ idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
+ status->wcid = mt7615_rx_get_wcid(dev, idx, unicast);
+
+ if (status->wcid) {
+ struct mt7615_sta *msta;
+
+ msta = container_of(status->wcid, struct mt7615_sta, wcid);
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&msta->poll_list))
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+ }
+
+ if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
+ status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
+ !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) {
+ status->flag |= RX_FLAG_DECRYPTED;
+ status->flag |= RX_FLAG_IV_STRIPPED;
+ status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
+ }
+
+ remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
+
+ if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
+ return -EINVAL;
+
+ rxd += 4;
+ if (rxd0 & MT_RXD0_NORMAL_GROUP_4) {
+ rxd += 4;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ if (rxd0 & MT_RXD0_NORMAL_GROUP_1) {
+ u8 *data = (u8 *)rxd;
+
+ if (status->flag & RX_FLAG_DECRYPTED) {
+ status->iv[0] = data[5];
+ status->iv[1] = data[4];
+ status->iv[2] = data[3];
+ status->iv[3] = data[2];
+ status->iv[4] = data[1];
+ status->iv[5] = data[0];
+
+ insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+ }
+ rxd += 4;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ if (rxd0 & MT_RXD0_NORMAL_GROUP_2) {
+ rxd += 2;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
+ u32 rxdg5 = le32_to_cpu(rxd[5]);
+
+ /*
+ * If both PHYs are on the same channel and we don't have a WCID,
+ * we need to figure out which PHY this packet was received on.
+ * On the primary PHY, the noise value for the chains belonging to the
+ * second PHY will be set to the noise value of the last packet from
+ * that PHY.
+ */
+ if (phy_idx < 0) {
+ int first_chain = ffs(phy2->chainmask) - 1;
+
+ phy_idx = ((rxdg5 >> (first_chain * 8)) & 0xff) == 0;
+ }
+ }
+
+ if (phy_idx == 1 && phy2) {
+ mphy = dev->mt76.phy2;
+ phy = phy2;
+ status->ext_phy = true;
+ }
+
+ if (!mt7615_firmware_offload(dev) && chfreq != phy->chfreq)
+ return -EINVAL;
+
+ mt7615_get_status_freq_info(dev, mphy, status, chfreq);
+ if (status->band == NL80211_BAND_5GHZ)
+ sband = &mphy->sband_5g.sband;
+ else
+ sband = &mphy->sband_2g.sband;
+
+ if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
+ return -EINVAL;
+
+ if (!sband->channels)
+ return -EINVAL;
+
+ if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB |
+ MT_RXD2_NORMAL_NON_AMPDU))) {
+ status->flag |= RX_FLAG_AMPDU_DETAILS;
+
+ /* all subframes of an A-MPDU have the same timestamp */
+ if (phy->rx_ampdu_ts != rxd12) {
+ if (!++phy->ampdu_ref)
+ phy->ampdu_ref++;
+ }
+ phy->rx_ampdu_ts = rxd12;
+
+ status->ampdu_ref = phy->ampdu_ref;
+ }
+
+ if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
+ u32 rxdg0 = le32_to_cpu(rxd[0]);
+ u32 rxdg1 = le32_to_cpu(rxd[1]);
+ u32 rxdg3 = le32_to_cpu(rxd[3]);
+ u8 stbc = FIELD_GET(MT_RXV1_HT_STBC, rxdg0);
+ bool cck = false;
+
+ i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0);
+ switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) {
+ case MT_PHY_TYPE_CCK:
+ cck = true;
+ fallthrough;
+ case MT_PHY_TYPE_OFDM:
+ i = mt76_get_rate(&dev->mt76, sband, i, cck);
+ break;
+ case MT_PHY_TYPE_HT_GF:
+ case MT_PHY_TYPE_HT:
+ status->encoding = RX_ENC_HT;
+ if (i > 31)
+ return -EINVAL;
+ break;
+ case MT_PHY_TYPE_VHT:
+ status->nss = FIELD_GET(MT_RXV2_NSTS, rxdg1) + 1;
+ status->encoding = RX_ENC_VHT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ status->rate_idx = i;
+
+ switch (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0)) {
+ case MT_PHY_BW_20:
+ break;
+ case MT_PHY_BW_40:
+ status->bw = RATE_INFO_BW_40;
+ break;
+ case MT_PHY_BW_80:
+ status->bw = RATE_INFO_BW_80;
+ break;
+ case MT_PHY_BW_160:
+ status->bw = RATE_INFO_BW_160;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (rxdg0 & MT_RXV1_HT_SHORT_GI)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ if (rxdg0 & MT_RXV1_HT_AD_CODE)
+ status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+ status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
+
+ status->chains = mphy->antenna_mask;
+ status->chain_signal[0] = to_rssi(MT_RXV4_RCPI0, rxdg3);
+ status->chain_signal[1] = to_rssi(MT_RXV4_RCPI1, rxdg3);
+ status->chain_signal[2] = to_rssi(MT_RXV4_RCPI2, rxdg3);
+ status->chain_signal[3] = to_rssi(MT_RXV4_RCPI3, rxdg3);
+ status->signal = status->chain_signal[0];
+
+ for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
+ if (!(status->chains & BIT(i)))
+ continue;
+
+ status->signal = max(status->signal,
+ status->chain_signal[i]);
+ }
+
+ mt7615_mac_fill_tm_rx(dev, rxd);
+
+ rxd += 6;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
+
+ if (insert_ccmp_hdr) {
+ u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
+
+ mt76_insert_ccmp_hdr(skb, key_id);
+ }
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
+ return 0;
+
+ status->aggr = unicast &&
+ !ieee80211_is_qos_nullfunc(hdr->frame_control);
+ status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+ status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+
+ return 0;
+}
+
+void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
+{
+}
+EXPORT_SYMBOL_GPL(mt7615_sta_ps);
+
+static u16
+mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
+ struct mt76_phy *mphy,
+ const struct ieee80211_tx_rate *rate,
+ bool stbc, u8 *bw)
+{
+ u8 phy, nss, rate_idx;
+ u16 rateval = 0;
+
+ *bw = 0;
+
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ rate_idx = ieee80211_rate_get_vht_mcs(rate);
+ nss = ieee80211_rate_get_vht_nss(rate);
+ phy = MT_PHY_TYPE_VHT;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ *bw = 1;
+ else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ *bw = 2;
+ else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+ *bw = 3;
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 3);
+ phy = MT_PHY_TYPE_HT;
+ if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+ phy = MT_PHY_TYPE_HT_GF;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ *bw = 1;
+ } else {
+ const struct ieee80211_rate *r;
+ int band = mphy->chandef.chan->band;
+ u16 val;
+
+ nss = 1;
+ r = &mphy->hw->wiphy->bands[band]->bitrates[rate->idx];
+ if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ val = r->hw_value_short;
+ else
+ val = r->hw_value;
+
+ phy = val >> 8;
+ rate_idx = val & 0xff;
+ }
+
+ if (stbc && nss == 1) {
+ nss++;
+ rateval |= MT_TX_RATE_STBC;
+ }
+
+ rateval |= (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
+ FIELD_PREP(MT_TX_RATE_MODE, phy) |
+ FIELD_PREP(MT_TX_RATE_NSS, nss - 1));
+
+ return rateval;
+}
+
+int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta, int pid,
+ struct ieee80211_key_conf *key, bool beacon)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rate = &info->control.rates[0];
+ bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY;
+ bool multicast = is_multicast_ether_addr(hdr->addr1);
+ struct ieee80211_vif *vif = info->control.vif;
+ bool is_mmio = mt76_is_mmio(&dev->mt76);
+ u32 val, sz_txd = is_mmio ? MT_TXD_SIZE : MT_USB_TXD_SIZE;
+ struct mt76_phy *mphy = &dev->mphy;
+ __le16 fc = hdr->frame_control;
+ int tx_count = 8;
+ u16 seqno = 0;
+
+ if (vif) {
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+
+ omac_idx = mvif->omac_idx;
+ wmm_idx = mvif->wmm_idx;
+ }
+
+ if (sta) {
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+
+ tx_count = msta->rate_count;
+ }
+
+ if (ext_phy && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
+ fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
+ fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
+
+ if (beacon) {
+ p_fmt = MT_TX_TYPE_FW;
+ q_idx = ext_phy ? MT_LMAC_BCN1 : MT_LMAC_BCN0;
+ } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
+ p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
+ q_idx = ext_phy ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0;
+ } else {
+ p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
+ q_idx = wmm_idx * MT7615_MAX_WMM_SETS +
+ mt7615_lmac_mapping(dev, skb_get_queue_mapping(skb));
+ }
+
+ val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) |
+ FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_LMAC) |
+ FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
+ txwi[0] = cpu_to_le32(val);
+
+ val = MT_TXD1_LONG_FORMAT |
+ FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
+ FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
+ FIELD_PREP(MT_TXD1_HDR_INFO,
+ ieee80211_get_hdrlen_from_skb(skb) / 2) |
+ FIELD_PREP(MT_TXD1_TID,
+ skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
+ FIELD_PREP(MT_TXD1_PKT_FMT, p_fmt) |
+ FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
+ txwi[1] = cpu_to_le32(val);
+
+ val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
+ FIELD_PREP(MT_TXD2_MULTICAST, multicast);
+ if (key) {
+ if (multicast && ieee80211_is_robust_mgmt_frame(skb) &&
+ key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
+ val |= MT_TXD2_BIP;
+ txwi[3] = 0;
+ } else {
+ txwi[3] = cpu_to_le32(MT_TXD3_PROTECT_FRAME);
+ }
+ } else {
+ txwi[3] = 0;
+ }
+ txwi[2] = cpu_to_le32(val);
+
+ if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
+ txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
+
+ txwi[4] = 0;
+ txwi[6] = 0;
+
+ if (rate->idx >= 0 && rate->count &&
+ !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
+ bool stbc = info->flags & IEEE80211_TX_CTL_STBC;
+ u8 bw;
+ u16 rateval = mt7615_mac_tx_rate_val(dev, mphy, rate, stbc,
+ &bw);
+
+ txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
+
+ val = MT_TXD6_FIXED_BW |
+ FIELD_PREP(MT_TXD6_BW, bw) |
+ FIELD_PREP(MT_TXD6_TX_RATE, rateval);
+ txwi[6] |= cpu_to_le32(val);
+
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ txwi[6] |= cpu_to_le32(MT_TXD6_SGI);
+
+ if (info->flags & IEEE80211_TX_CTL_LDPC)
+ txwi[6] |= cpu_to_le32(MT_TXD6_LDPC);
+
+ if (!(rate->flags & (IEEE80211_TX_RC_MCS |
+ IEEE80211_TX_RC_VHT_MCS)))
+ txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
+
+ tx_count = rate->count;
+ }
+
+ if (!ieee80211_is_beacon(fc)) {
+ struct ieee80211_hw *hw = mt76_hw(dev);
+
+ val = MT_TXD5_TX_STATUS_HOST | FIELD_PREP(MT_TXD5_PID, pid);
+ if (!ieee80211_hw_check(hw, SUPPORTS_PS))
+ val |= MT_TXD5_SW_POWER_MGMT;
+ txwi[5] = cpu_to_le32(val);
+ } else {
+ txwi[5] = 0;
+ /* use maximum tx count for beacons */
+ tx_count = 0x1f;
+ }
+
+ val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
+ if (info->flags & IEEE80211_TX_CTL_INJECTED) {
+ seqno = le16_to_cpu(hdr->seq_ctrl);
+
+ if (ieee80211_is_back_req(hdr->frame_control)) {
+ struct ieee80211_bar *bar;
+
+ bar = (struct ieee80211_bar *)skb->data;
+ seqno = le16_to_cpu(bar->start_seq_num);
+ }
+
+ val |= MT_TXD3_SN_VALID |
+ FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
+ }
+
+ txwi[3] |= cpu_to_le32(val);
+
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK);
+
+ val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype) |
+ FIELD_PREP(MT_TXD7_SPE_IDX, 0x18);
+ txwi[7] = cpu_to_le32(val);
+ if (!is_mmio) {
+ val = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype);
+ txwi[8] = cpu_to_le32(val);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt7615_mac_write_txwi);
+
+static void
+mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp)
+{
+ int i;
+
+ for (i = 0; i < txp->nbuf; i++)
+ dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
+ le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
+}
+
+static void
+mt7615_txp_skb_unmap_hw(struct mt76_dev *dev, struct mt7615_hw_txp *txp)
+{
+ u32 last_mask;
+ int i;
+
+ last_mask = is_mt7663(dev) ? MT_TXD_LEN_LAST : MT_TXD_LEN_MSDU_LAST;
+
+ for (i = 0; i < ARRAY_SIZE(txp->ptr); i++) {
+ struct mt7615_txp_ptr *ptr = &txp->ptr[i];
+ bool last;
+ u16 len;
+
+ len = le16_to_cpu(ptr->len0);
+ last = len & last_mask;
+ len &= MT_TXD_LEN_MASK;
+ dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len,
+ DMA_TO_DEVICE);
+ if (last)
+ break;
+
+ len = le16_to_cpu(ptr->len1);
+ last = len & last_mask;
+ len &= MT_TXD_LEN_MASK;
+ dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len,
+ DMA_TO_DEVICE);
+ if (last)
+ break;
+ }
+}
+
+void mt7615_txp_skb_unmap(struct mt76_dev *dev,
+ struct mt76_txwi_cache *t)
+{
+ struct mt7615_txp_common *txp;
+
+ txp = mt7615_txwi_to_txp(dev, t);
+ if (is_mt7615(dev))
+ mt7615_txp_skb_unmap_fw(dev, &txp->fw);
+ else
+ mt7615_txp_skb_unmap_hw(dev, &txp->hw);
+}
+EXPORT_SYMBOL_GPL(mt7615_txp_skb_unmap);
+
+bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask)
+{
+ mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
+ FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
+
+ return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
+ 0, 5000);
+}
+
+void mt7615_mac_sta_poll(struct mt7615_dev *dev)
+{
+ static const u8 ac_to_tid[4] = {
+ [IEEE80211_AC_BE] = 0,
+ [IEEE80211_AC_BK] = 1,
+ [IEEE80211_AC_VI] = 4,
+ [IEEE80211_AC_VO] = 6
+ };
+ static const u8 hw_queue_map[] = {
+ [IEEE80211_AC_BK] = 0,
+ [IEEE80211_AC_BE] = 1,
+ [IEEE80211_AC_VI] = 2,
+ [IEEE80211_AC_VO] = 3,
+ };
+ struct ieee80211_sta *sta;
+ struct mt7615_sta *msta;
+ u32 addr, tx_time[4], rx_time[4];
+ struct list_head sta_poll_list;
+ int i;
+
+ INIT_LIST_HEAD(&sta_poll_list);
+ spin_lock_bh(&dev->sta_poll_lock);
+ list_splice_init(&dev->sta_poll_list, &sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ while (!list_empty(&sta_poll_list)) {
+ bool clear = false;
+
+ msta = list_first_entry(&sta_poll_list, struct mt7615_sta,
+ poll_list);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4;
+
+ for (i = 0; i < 4; i++, addr += 8) {
+ u32 tx_last = msta->airtime_ac[i];
+ u32 rx_last = msta->airtime_ac[i + 4];
+
+ msta->airtime_ac[i] = mt76_rr(dev, addr);
+ msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
+ tx_time[i] = msta->airtime_ac[i] - tx_last;
+ rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
+
+ if ((tx_last | rx_last) & BIT(30))
+ clear = true;
+ }
+
+ if (clear) {
+ mt7615_mac_wtbl_update(dev, msta->wcid.idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
+ }
+
+ if (!msta->wcid.sta)
+ continue;
+
+ sta = container_of((void *)msta, struct ieee80211_sta,
+ drv_priv);
+ for (i = 0; i < 4; i++) {
+ u32 tx_cur = tx_time[i];
+ u32 rx_cur = rx_time[hw_queue_map[i]];
+ u8 tid = ac_to_tid[i];
+
+ if (!tx_cur && !rx_cur)
+ continue;
+
+ ieee80211_sta_register_airtime(sta, tid, tx_cur,
+ rx_cur);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(mt7615_mac_sta_poll);
+
+static void
+mt7615_mac_update_rate_desc(struct mt7615_phy *phy, struct mt7615_sta *sta,
+ struct ieee80211_tx_rate *probe_rate,
+ struct ieee80211_tx_rate *rates,
+ struct mt7615_rate_desc *rd)
+{
+ struct mt7615_dev *dev = phy->dev;
+ struct mt76_phy *mphy = phy->mt76;
+ struct ieee80211_tx_rate *ref;
+ bool rateset, stbc = false;
+ int n_rates = sta->n_rates;
+ u8 bw, bw_prev;
+ int i, j;
+
+ for (i = n_rates; i < 4; i++)
+ rates[i] = rates[n_rates - 1];
+
+ rateset = !(sta->rate_set_tsf & BIT(0));
+ memcpy(sta->rateset[rateset].rates, rates,
+ sizeof(sta->rateset[rateset].rates));
+ if (probe_rate) {
+ sta->rateset[rateset].probe_rate = *probe_rate;
+ ref = &sta->rateset[rateset].probe_rate;
+ } else {
+ sta->rateset[rateset].probe_rate.idx = -1;
+ ref = &sta->rateset[rateset].rates[0];
+ }
+
+ rates = sta->rateset[rateset].rates;
+ for (i = 0; i < ARRAY_SIZE(sta->rateset[rateset].rates); i++) {
+ /*
+ * We don't support switching between short and long GI
+ * within the rate set. For accurate tx status reporting, we
+ * need to make sure that flags match.
+ * For improved performance, avoid duplicate entries by
+ * decrementing the MCS index if necessary
+ */
+ if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI)
+ rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI;
+
+ for (j = 0; j < i; j++) {
+ if (rates[i].idx != rates[j].idx)
+ continue;
+ if ((rates[i].flags ^ rates[j].flags) &
+ (IEEE80211_TX_RC_40_MHZ_WIDTH |
+ IEEE80211_TX_RC_80_MHZ_WIDTH |
+ IEEE80211_TX_RC_160_MHZ_WIDTH))
+ continue;
+
+ if (!rates[i].idx)
+ continue;
+
+ rates[i].idx--;
+ }
+ }
+
+ rd->val[0] = mt7615_mac_tx_rate_val(dev, mphy, &rates[0], stbc, &bw);
+ bw_prev = bw;
+
+ if (probe_rate) {
+ rd->probe_val = mt7615_mac_tx_rate_val(dev, mphy, probe_rate,
+ stbc, &bw);
+ if (bw)
+ rd->bw_idx = 1;
+ else
+ bw_prev = 0;
+ } else {
+ rd->probe_val = rd->val[0];
+ }
+
+ rd->val[1] = mt7615_mac_tx_rate_val(dev, mphy, &rates[1], stbc, &bw);
+ if (bw_prev) {
+ rd->bw_idx = 3;
+ bw_prev = bw;
+ }
+
+ rd->val[2] = mt7615_mac_tx_rate_val(dev, mphy, &rates[2], stbc, &bw);
+ if (bw_prev) {
+ rd->bw_idx = 5;
+ bw_prev = bw;
+ }
+
+ rd->val[3] = mt7615_mac_tx_rate_val(dev, mphy, &rates[3], stbc, &bw);
+ if (bw_prev)
+ rd->bw_idx = 7;
+
+ rd->rateset = rateset;
+ rd->bw = bw;
+}
+
+static int
+mt7615_mac_queue_rate_update(struct mt7615_phy *phy, struct mt7615_sta *sta,
+ struct ieee80211_tx_rate *probe_rate,
+ struct ieee80211_tx_rate *rates)
+{
+ struct mt7615_dev *dev = phy->dev;
+ struct mt7615_wtbl_desc *wd;
+
+ if (work_pending(&dev->wtbl_work))
+ return -EBUSY;
+
+ wd = kzalloc(sizeof(*wd), GFP_ATOMIC);
+ if (!wd)
+ return -ENOMEM;
+
+ wd->type = MT7615_WTBL_RATE_DESC;
+ wd->sta = sta;
+
+ mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates,
+ &wd->rate);
+ list_add_tail(&wd->node, &dev->wd_head);
+ queue_work(dev->mt76.wq, &dev->wtbl_work);
+
+ return 0;
+}
+
+u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid)
+{
+ u32 addr, val, val2;
+ u8 offset;
+
+ addr = mt7615_mac_wtbl_addr(dev, wcid) + 11 * 4;
+
+ offset = tid * 12;
+ addr += 4 * (offset / 32);
+ offset %= 32;
+
+ val = mt76_rr(dev, addr);
+ val >>= offset;
+
+ if (offset > 20) {
+ addr += 4;
+ val2 = mt76_rr(dev, addr);
+ val |= val2 << (32 - offset);
+ }
+
+ return val & GENMASK(11, 0);
+}
+
+void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
+ struct ieee80211_tx_rate *probe_rate,
+ struct ieee80211_tx_rate *rates)
+{
+ int wcid = sta->wcid.idx, n_rates = sta->n_rates;
+ struct mt7615_dev *dev = phy->dev;
+ struct mt7615_rate_desc rd;
+ u32 w5, w27, addr;
+
+ if (!mt76_is_mmio(&dev->mt76)) {
+ mt7615_mac_queue_rate_update(phy, sta, probe_rate, rates);
+ return;
+ }
+
+ if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
+ return;
+
+ memset(&rd, 0, sizeof(struct mt7615_rate_desc));
+ mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates, &rd);
+
+ addr = mt7615_mac_wtbl_addr(dev, wcid);
+ w27 = mt76_rr(dev, addr + 27 * 4);
+ w27 &= ~MT_WTBL_W27_CC_BW_SEL;
+ w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, rd.bw);
+
+ w5 = mt76_rr(dev, addr + 5 * 4);
+ w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE |
+ MT_WTBL_W5_MPDU_OK_COUNT |
+ MT_WTBL_W5_MPDU_FAIL_COUNT |
+ MT_WTBL_W5_RATE_IDX);
+ w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, rd.bw) |
+ FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE,
+ rd.bw_idx ? rd.bw_idx - 1 : 7);
+
+ mt76_wr(dev, MT_WTBL_RIUCR0, w5);
+
+ mt76_wr(dev, MT_WTBL_RIUCR1,
+ FIELD_PREP(MT_WTBL_RIUCR1_RATE0, rd.probe_val) |
+ FIELD_PREP(MT_WTBL_RIUCR1_RATE1, rd.val[0]) |
+ FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, rd.val[1]));
+
+ mt76_wr(dev, MT_WTBL_RIUCR2,
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, rd.val[1] >> 8) |
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE3, rd.val[1]) |
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE4, rd.val[2]) |
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, rd.val[2]));
+
+ mt76_wr(dev, MT_WTBL_RIUCR3,
+ FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, rd.val[2] >> 4) |
+ FIELD_PREP(MT_WTBL_RIUCR3_RATE6, rd.val[3]) |
+ FIELD_PREP(MT_WTBL_RIUCR3_RATE7, rd.val[3]));
+
+ mt76_wr(dev, MT_WTBL_UPDATE,
+ FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) |
+ MT_WTBL_UPDATE_RATE_UPDATE |
+ MT_WTBL_UPDATE_TX_COUNT_CLEAR);
+
+ mt76_wr(dev, addr + 27 * 4, w27);
+
+ mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
+ sta->rate_set_tsf = mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0);
+ sta->rate_set_tsf |= rd.rateset;
+
+ if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET))
+ mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
+
+ sta->rate_count = 2 * MT7615_RATE_RETRY * n_rates;
+ sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ sta->rate_probe = !!probe_rate;
+}
+EXPORT_SYMBOL_GPL(mt7615_mac_set_rates);
+
+int mt7615_mac_wtbl_update_key(struct mt7615_dev *dev,
+ struct mt76_wcid *wcid,
+ u8 *key, u8 keylen,
+ enum mt7615_cipher_type cipher,
+ enum set_key_cmd cmd)
+{
+ u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
+ u8 data[32] = {};
+
+ if (keylen > sizeof(data))
+ return -EINVAL;
+
+ mt76_rr_copy(dev, addr, data, sizeof(data));
+ if (cmd == SET_KEY) {
+ if (cipher == MT_CIPHER_TKIP) {
+ /* Rx/Tx MIC keys are swapped */
+ memcpy(data, key, 16);
+ memcpy(data + 16, key + 24, 8);
+ memcpy(data + 24, key + 16, 8);
+ } else {
+ if (cipher != MT_CIPHER_BIP_CMAC_128 && wcid->cipher)
+ memmove(data + 16, data, 16);
+ if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher)
+ memcpy(data, key, keylen);
+ else if (cipher == MT_CIPHER_BIP_CMAC_128)
+ memcpy(data + 16, key, 16);
+ }
+ } else {
+ if (wcid->cipher & ~BIT(cipher)) {
+ if (cipher != MT_CIPHER_BIP_CMAC_128)
+ memmove(data, data + 16, 16);
+ memset(data + 16, 0, 16);
+ } else {
+ memset(data, 0, sizeof(data));
+ }
+ }
+ mt76_wr_copy(dev, addr, data, sizeof(data));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt7615_mac_wtbl_update_key);
+
+int mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev,
+ struct mt76_wcid *wcid,
+ enum mt7615_cipher_type cipher,
+ int keyidx, enum set_key_cmd cmd)
+{
+ u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
+
+ if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
+ return -ETIMEDOUT;
+
+ w0 = mt76_rr(dev, addr);
+ w1 = mt76_rr(dev, addr + 4);
+ if (cmd == SET_KEY) {
+ w0 |= MT_WTBL_W0_RX_KEY_VALID |
+ FIELD_PREP(MT_WTBL_W0_RX_IK_VALID,
+ cipher == MT_CIPHER_BIP_CMAC_128);
+ if (cipher != MT_CIPHER_BIP_CMAC_128 ||
+ !wcid->cipher)
+ w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
+ } else {
+ if (!(wcid->cipher & ~BIT(cipher)))
+ w0 &= ~(MT_WTBL_W0_RX_KEY_VALID |
+ MT_WTBL_W0_KEY_IDX);
+ if (cipher == MT_CIPHER_BIP_CMAC_128)
+ w0 &= ~MT_WTBL_W0_RX_IK_VALID;
+ }
+ mt76_wr(dev, MT_WTBL_RICR0, w0);
+ mt76_wr(dev, MT_WTBL_RICR1, w1);
+
+ if (!mt7615_mac_wtbl_update(dev, wcid->idx,
+ MT_WTBL_UPDATE_RXINFO_UPDATE))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt7615_mac_wtbl_update_pk);
+
+void mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev,
+ struct mt76_wcid *wcid,
+ enum mt7615_cipher_type cipher,
+ enum set_key_cmd cmd)
+{
+ u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
+
+ if (cmd == SET_KEY) {
+ if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher)
+ mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
+ FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher));
+ } else {
+ if (cipher != MT_CIPHER_BIP_CMAC_128 &&
+ wcid->cipher & BIT(MT_CIPHER_BIP_CMAC_128))
+ mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
+ FIELD_PREP(MT_WTBL_W2_KEY_TYPE,
+ MT_CIPHER_BIP_CMAC_128));
+ else if (!(wcid->cipher & ~BIT(cipher)))
+ mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
+ }
+}
+EXPORT_SYMBOL_GPL(mt7615_mac_wtbl_update_cipher);
+
+int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
+ struct mt76_wcid *wcid,
+ struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd)
+{
+ enum mt7615_cipher_type cipher;
+ int err;
+
+ cipher = mt7615_mac_get_cipher(key->cipher);
+ if (cipher == MT_CIPHER_NONE)
+ return -EOPNOTSUPP;
+
+ spin_lock_bh(&dev->mt76.lock);
+
+ mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cmd);
+ err = mt7615_mac_wtbl_update_key(dev, wcid, key->key, key->keylen,
+ cipher, cmd);
+ if (err < 0)
+ goto out;
+
+ err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx,
+ cmd);
+ if (err < 0)
+ goto out;
+
+ if (cmd == SET_KEY)
+ wcid->cipher |= BIT(cipher);
+ else
+ wcid->cipher &= ~BIT(cipher);
+
+out:
+ spin_unlock_bh(&dev->mt76.lock);
+
+ return err;
+}
+
+static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
+ struct ieee80211_tx_info *info, __le32 *txs_data)
+{
+ struct ieee80211_supported_band *sband;
+ struct mt7615_rate_set *rs;
+ struct mt76_phy *mphy;
+ int first_idx = 0, last_idx;
+ int i, idx, count;
+ bool fixed_rate, ack_timeout;
+ bool ampdu, cck = false;
+ bool rs_idx;
+ u32 rate_set_tsf;
+ u32 final_rate, final_rate_flags, final_nss, txs;
+
+ txs = le32_to_cpu(txs_data[1]);
+ ampdu = txs & MT_TXS1_AMPDU;
+
+ txs = le32_to_cpu(txs_data[3]);
+ count = FIELD_GET(MT_TXS3_TX_COUNT, txs);
+ last_idx = FIELD_GET(MT_TXS3_LAST_TX_RATE, txs);
+
+ txs = le32_to_cpu(txs_data[0]);
+ fixed_rate = txs & MT_TXS0_FIXED_RATE;
+ final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
+ ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
+
+ if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT))
+ return false;
+
+ if (txs & MT_TXS0_QUEUE_TIMEOUT)
+ return false;
+
+ if (!ack_timeout)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ info->status.ampdu_len = 1;
+ info->status.ampdu_ack_len = !!(info->flags &
+ IEEE80211_TX_STAT_ACK);
+
+ if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU))
+ info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU;
+
+ first_idx = max_t(int, 0, last_idx - (count - 1) / MT7615_RATE_RETRY);
+
+ if (fixed_rate) {
+ info->status.rates[0].count = count;
+ i = 0;
+ goto out;
+ }
+
+ rate_set_tsf = READ_ONCE(sta->rate_set_tsf);
+ rs_idx = !((u32)(FIELD_GET(MT_TXS4_F0_TIMESTAMP, le32_to_cpu(txs_data[4])) -
+ rate_set_tsf) < 1000000);
+ rs_idx ^= rate_set_tsf & BIT(0);
+ rs = &sta->rateset[rs_idx];
+
+ if (!first_idx && rs->probe_rate.idx >= 0) {
+ info->status.rates[0] = rs->probe_rate;
+
+ spin_lock_bh(&dev->mt76.lock);
+ if (sta->rate_probe) {
+ struct mt7615_phy *phy = &dev->phy;
+
+ if (sta->wcid.ext_phy && dev->mt76.phy2)
+ phy = dev->mt76.phy2->priv;
+
+ mt7615_mac_set_rates(phy, sta, NULL, sta->rates);
+ }
+ spin_unlock_bh(&dev->mt76.lock);
+ } else {
+ info->status.rates[0] = rs->rates[first_idx / 2];
+ }
+ info->status.rates[0].count = 0;
+
+ for (i = 0, idx = first_idx; count && idx <= last_idx; idx++) {
+ struct ieee80211_tx_rate *cur_rate;
+ int cur_count;
+
+ cur_rate = &rs->rates[idx / 2];
+ cur_count = min_t(int, MT7615_RATE_RETRY, count);
+ count -= cur_count;
+
+ if (idx && (cur_rate->idx != info->status.rates[i].idx ||
+ cur_rate->flags != info->status.rates[i].flags)) {
+ i++;
+ if (i == ARRAY_SIZE(info->status.rates)) {
+ i--;
+ break;
+ }
+
+ info->status.rates[i] = *cur_rate;
+ info->status.rates[i].count = 0;
+ }
+
+ info->status.rates[i].count += cur_count;
+ }
+
+out:
+ final_rate_flags = info->status.rates[i].flags;
+
+ switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) {
+ case MT_PHY_TYPE_CCK:
+ cck = true;
+ fallthrough;
+ case MT_PHY_TYPE_OFDM:
+ mphy = &dev->mphy;
+ if (sta->wcid.ext_phy && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
+ if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
+ sband = &mphy->sband_5g.sband;
+ else
+ sband = &mphy->sband_2g.sband;
+ final_rate &= MT_TX_RATE_IDX;
+ final_rate = mt76_get_rate(&dev->mt76, sband, final_rate,
+ cck);
+ final_rate_flags = 0;
+ break;
+ case MT_PHY_TYPE_HT_GF:
+ case MT_PHY_TYPE_HT:
+ final_rate_flags |= IEEE80211_TX_RC_MCS;
+ final_rate &= MT_TX_RATE_IDX;
+ if (final_rate > 31)
+ return false;
+ break;
+ case MT_PHY_TYPE_VHT:
+ final_nss = FIELD_GET(MT_TX_RATE_NSS, final_rate);
+
+ if ((final_rate & MT_TX_RATE_STBC) && final_nss)
+ final_nss--;
+
+ final_rate_flags |= IEEE80211_TX_RC_VHT_MCS;
+ final_rate = (final_rate & MT_TX_RATE_IDX) | (final_nss << 4);
+ break;
+ default:
+ return false;
+ }
+
+ info->status.rates[i].idx = final_rate;
+ info->status.rates[i].flags = final_rate_flags;
+
+ return true;
+}
+
+static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev,
+ struct mt7615_sta *sta, int pid,
+ __le32 *txs_data)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ struct sk_buff_head list;
+ struct sk_buff *skb;
+
+ if (pid < MT_PACKET_ID_FIRST)
+ return false;
+
+ trace_mac_txdone(mdev, sta->wcid.idx, pid);
+
+ mt76_tx_status_lock(mdev, &list);
+ skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
+ if (skb) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (!mt7615_fill_txs(dev, sta, info, txs_data)) {
+ ieee80211_tx_info_clear_status(info);
+ info->status.rates[0].idx = -1;
+ }
+
+ mt76_tx_status_skb_done(mdev, skb, &list);
+ }
+ mt76_tx_status_unlock(mdev, &list);
+
+ return !!skb;
+}
+
+static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
+{
+ struct ieee80211_tx_info info = {};
+ struct ieee80211_sta *sta = NULL;
+ struct mt7615_sta *msta = NULL;
+ struct mt76_wcid *wcid;
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ __le32 *txs_data = data;
+ u32 txs;
+ u8 wcidx;
+ u8 pid;
+
+ txs = le32_to_cpu(txs_data[0]);
+ pid = FIELD_GET(MT_TXS0_PID, txs);
+ txs = le32_to_cpu(txs_data[2]);
+ wcidx = FIELD_GET(MT_TXS2_WCID, txs);
+
+ if (pid == MT_PACKET_ID_NO_ACK)
+ return;
+
+ if (wcidx >= MT7615_WTBL_SIZE)
+ return;
+
+ rcu_read_lock();
+
+ wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
+ if (!wcid)
+ goto out;
+
+ msta = container_of(wcid, struct mt7615_sta, wcid);
+ sta = wcid_to_sta(wcid);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&msta->poll_list))
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ if (mt7615_mac_add_txs_skb(dev, msta, pid, txs_data))
+ goto out;
+
+ if (wcidx >= MT7615_WTBL_STA || !sta)
+ goto out;
+
+ if (wcid->ext_phy && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
+ if (mt7615_fill_txs(dev, msta, &info, txs_data))
+ ieee80211_tx_status_noskb(mphy->hw, sta, &info);
+
+out:
+ rcu_read_unlock();
+}
+
+static void
+mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt76_txwi_cache *txwi;
+ __le32 *txwi_data;
+ u32 val;
+ u8 wcid;
+
+ trace_mac_tx_free(dev, token);
+
+ spin_lock_bh(&dev->token_lock);
+ txwi = idr_remove(&dev->token, token);
+ spin_unlock_bh(&dev->token_lock);
+
+ if (!txwi)
+ return;
+
+ txwi_data = (__le32 *)mt76_get_txwi_ptr(mdev, txwi);
+ val = le32_to_cpu(txwi_data[1]);
+ wcid = FIELD_GET(MT_TXD1_WLAN_IDX, val);
+
+ mt7615_txp_skb_unmap(mdev, txwi);
+ if (txwi->skb) {
+ mt76_tx_complete_skb(mdev, wcid, txwi->skb);
+ txwi->skb = NULL;
+ }
+
+ mt76_put_txwi(mdev, txwi);
+}
+
+static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
+ u8 i, count;
+
+ mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false);
+ if (is_mt7615(&dev->mt76)) {
+ mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false);
+ } else {
+ for (i = 0; i < IEEE80211_NUM_ACS; i++)
+ mt76_queue_tx_cleanup(dev, i, false);
+ }
+
+ count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl));
+ if (is_mt7615(&dev->mt76)) {
+ __le16 *token = &free->token[0];
+
+ for (i = 0; i < count; i++)
+ mt7615_mac_tx_free_token(dev, le16_to_cpu(token[i]));
+ } else {
+ __le32 *token = (__le32 *)&free->token[0];
+
+ for (i = 0; i < count; i++)
+ mt7615_mac_tx_free_token(dev, le32_to_cpu(token[i]));
+ }
+
+ dev_kfree_skb(skb);
+
+ if (test_bit(MT76_STATE_PM, &dev->phy.mt76->state))
+ return;
+
+ rcu_read_lock();
+ mt7615_mac_sta_poll(dev);
+ rcu_read_unlock();
+
+ mt7615_pm_power_save_sched(dev);
+ mt76_worker_schedule(&dev->mt76.tx_worker);
+}
+
+void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ __le32 *rxd = (__le32 *)skb->data;
+ __le32 *end = (__le32 *)&skb->data[skb->len];
+ enum rx_pkt_type type;
+ u16 flag;
+
+ type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+ flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0]));
+ if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
+ type = PKT_TYPE_NORMAL_MCU;
+
+ switch (type) {
+ case PKT_TYPE_TXS:
+ for (rxd++; rxd + 7 <= end; rxd += 7)
+ mt7615_mac_add_txs(dev, rxd);
+ dev_kfree_skb(skb);
+ break;
+ case PKT_TYPE_TXRX_NOTIFY:
+ mt7615_mac_tx_free(dev, skb);
+ break;
+ case PKT_TYPE_RX_EVENT:
+ mt7615_mcu_rx_event(dev, skb);
+ break;
+ case PKT_TYPE_NORMAL_MCU:
+ case PKT_TYPE_NORMAL:
+ if (!mt7615_mac_fill_rx(dev, skb)) {
+ mt76_rx(&dev->mt76, q, skb);
+ return;
+ }
+ fallthrough;
+ default:
+ dev_kfree_skb(skb);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(mt7615_queue_rx_skb);
+
+static void
+mt7615_mac_set_sensitivity(struct mt7615_phy *phy, int val, bool ofdm)
+{
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+
+ if (is_mt7663(&dev->mt76)) {
+ if (ofdm)
+ mt76_rmw(dev, MT7663_WF_PHY_MIN_PRI_PWR(ext_phy),
+ MT_WF_PHY_PD_OFDM_MASK(0),
+ MT_WF_PHY_PD_OFDM(0, val));
+ else
+ mt76_rmw(dev, MT7663_WF_PHY_RXTD_CCK_PD(ext_phy),
+ MT_WF_PHY_PD_CCK_MASK(ext_phy),
+ MT_WF_PHY_PD_CCK(ext_phy, val));
+ return;
+ }
+
+ if (ofdm)
+ mt76_rmw(dev, MT_WF_PHY_MIN_PRI_PWR(ext_phy),
+ MT_WF_PHY_PD_OFDM_MASK(ext_phy),
+ MT_WF_PHY_PD_OFDM(ext_phy, val));
+ else
+ mt76_rmw(dev, MT_WF_PHY_RXTD_CCK_PD(ext_phy),
+ MT_WF_PHY_PD_CCK_MASK(ext_phy),
+ MT_WF_PHY_PD_CCK(ext_phy, val));
+}
+
+static void
+mt7615_mac_set_default_sensitivity(struct mt7615_phy *phy)
+{
+ /* ofdm */
+ mt7615_mac_set_sensitivity(phy, 0x13c, true);
+ /* cck */
+ mt7615_mac_set_sensitivity(phy, 0x92, false);
+
+ phy->ofdm_sensitivity = -98;
+ phy->cck_sensitivity = -110;
+ phy->last_cca_adj = jiffies;
+}
+
+void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable)
+{
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ u32 reg, mask;
+
+ mt7615_mutex_acquire(dev);
+
+ if (phy->scs_en == enable)
+ goto out;
+
+ if (is_mt7663(&dev->mt76)) {
+ reg = MT7663_WF_PHY_MIN_PRI_PWR(ext_phy);
+ mask = MT_WF_PHY_PD_BLK(0);
+ } else {
+ reg = MT_WF_PHY_MIN_PRI_PWR(ext_phy);
+ mask = MT_WF_PHY_PD_BLK(ext_phy);
+ }
+
+ if (enable) {
+ mt76_set(dev, reg, mask);
+ if (is_mt7622(&dev->mt76)) {
+ mt76_set(dev, MT_MIB_M0_MISC_CR(0), 0x7 << 8);
+ mt76_set(dev, MT_MIB_M0_MISC_CR(0), 0x7);
+ }
+ } else {
+ mt76_clear(dev, reg, mask);
+ }
+
+ mt7615_mac_set_default_sensitivity(phy);
+ phy->scs_en = enable;
+
+out:
+ mt7615_mutex_release(dev);
+}
+
+void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy)
+{
+ u32 rxtd, reg;
+
+ if (is_mt7663(&dev->mt76))
+ reg = MT7663_WF_PHY_R0_PHYMUX_5;
+ else
+ reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy);
+
+ if (ext_phy)
+ rxtd = MT_WF_PHY_RXTD2(10);
+ else
+ rxtd = MT_WF_PHY_RXTD(12);
+
+ mt76_set(dev, rxtd, BIT(18) | BIT(29));
+ mt76_set(dev, reg, 0x5 << 12);
+}
+
+void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy)
+{
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ u32 reg;
+
+ if (is_mt7663(&dev->mt76))
+ reg = MT7663_WF_PHY_R0_PHYMUX_5;
+ else
+ reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy);
+
+ /* reset PD and MDRDY counters */
+ mt76_clear(dev, reg, GENMASK(22, 20));
+ mt76_set(dev, reg, BIT(22) | BIT(20));
+}
+
+static void
+mt7615_mac_adjust_sensitivity(struct mt7615_phy *phy,
+ u32 rts_err_rate, bool ofdm)
+{
+ struct mt7615_dev *dev = phy->dev;
+ int false_cca = ofdm ? phy->false_cca_ofdm : phy->false_cca_cck;
+ bool ext_phy = phy != &dev->phy;
+ s16 def_th = ofdm ? -98 : -110;
+ bool update = false;
+ s8 *sensitivity;
+ int signal;
+
+ sensitivity = ofdm ? &phy->ofdm_sensitivity : &phy->cck_sensitivity;
+ signal = mt76_get_min_avg_rssi(&dev->mt76, ext_phy);
+ if (!signal) {
+ mt7615_mac_set_default_sensitivity(phy);
+ return;
+ }
+
+ signal = min(signal, -72);
+ if (false_cca > 500) {
+ if (rts_err_rate > MT_FRAC(40, 100))
+ return;
+
+ /* decrease coverage */
+ if (*sensitivity == def_th && signal > -90) {
+ *sensitivity = -90;
+ update = true;
+ } else if (*sensitivity + 2 < signal) {
+ *sensitivity += 2;
+ update = true;
+ }
+ } else if ((false_cca > 0 && false_cca < 50) ||
+ rts_err_rate > MT_FRAC(60, 100)) {
+ /* increase coverage */
+ if (*sensitivity - 2 >= def_th) {
+ *sensitivity -= 2;
+ update = true;
+ }
+ }
+
+ if (*sensitivity > signal) {
+ *sensitivity = signal;
+ update = true;
+ }
+
+ if (update) {
+ u16 val = ofdm ? *sensitivity * 2 + 512 : *sensitivity + 256;
+
+ mt7615_mac_set_sensitivity(phy, val, ofdm);
+ phy->last_cca_adj = jiffies;
+ }
+}
+
+static void
+mt7615_mac_scs_check(struct mt7615_phy *phy)
+{
+ struct mt7615_dev *dev = phy->dev;
+ struct mib_stats *mib = &phy->mib;
+ u32 val, rts_err_rate = 0;
+ u32 mdrdy_cck, mdrdy_ofdm, pd_cck, pd_ofdm;
+ bool ext_phy = phy != &dev->phy;
+
+ if (!phy->scs_en)
+ return;
+
+ if (is_mt7663(&dev->mt76))
+ val = mt76_rr(dev, MT7663_WF_PHY_R0_PHYCTRL_STS0(ext_phy));
+ else
+ val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS0(ext_phy));
+ pd_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_CCK, val);
+ pd_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_OFDM, val);
+
+ if (is_mt7663(&dev->mt76))
+ val = mt76_rr(dev, MT7663_WF_PHY_R0_PHYCTRL_STS5(ext_phy));
+ else
+ val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS5(ext_phy));
+ mdrdy_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_CCK, val);
+ mdrdy_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_OFDM, val);
+
+ phy->false_cca_ofdm = pd_ofdm - mdrdy_ofdm;
+ phy->false_cca_cck = pd_cck - mdrdy_cck;
+ mt7615_mac_cca_stats_reset(phy);
+
+ if (mib->rts_cnt + mib->rts_retries_cnt)
+ rts_err_rate = MT_FRAC(mib->rts_retries_cnt,
+ mib->rts_cnt + mib->rts_retries_cnt);
+
+ /* cck */
+ mt7615_mac_adjust_sensitivity(phy, rts_err_rate, false);
+ /* ofdm */
+ mt7615_mac_adjust_sensitivity(phy, rts_err_rate, true);
+
+ if (time_after(jiffies, phy->last_cca_adj + 10 * HZ))
+ mt7615_mac_set_default_sensitivity(phy);
+}
+
+static u8
+mt7615_phy_get_nf(struct mt7615_dev *dev, int idx)
+{
+ static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
+ u32 reg, val, sum = 0, n = 0;
+ int i;
+
+ if (is_mt7663(&dev->mt76))
+ reg = MT7663_WF_PHY_RXTD(20);
+ else
+ reg = idx ? MT_WF_PHY_RXTD2(17) : MT_WF_PHY_RXTD(20);
+
+ for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
+ val = mt76_rr(dev, reg);
+ sum += val * nf_power[i];
+ n += val;
+ }
+
+ if (!n)
+ return 0;
+
+ return sum / n;
+}
+
+static void
+mt7615_phy_update_channel(struct mt76_phy *mphy, int idx)
+{
+ struct mt7615_dev *dev = container_of(mphy->dev, struct mt7615_dev, mt76);
+ struct mt7615_phy *phy = mphy->priv;
+ struct mt76_channel_state *state;
+ u64 busy_time, tx_time, rx_time, obss_time;
+ u32 obss_reg = idx ? MT_WF_RMAC_MIB_TIME6 : MT_WF_RMAC_MIB_TIME5;
+ int nf;
+
+ busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx),
+ MT_MIB_SDR9_BUSY_MASK);
+ tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx),
+ MT_MIB_SDR36_TXTIME_MASK);
+ rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx),
+ MT_MIB_SDR37_RXTIME_MASK);
+ obss_time = mt76_get_field(dev, obss_reg, MT_MIB_OBSSTIME_MASK);
+
+ nf = mt7615_phy_get_nf(dev, idx);
+ if (!phy->noise)
+ phy->noise = nf << 4;
+ else if (nf)
+ phy->noise += nf - (phy->noise >> 4);
+
+ state = mphy->chan_state;
+ state->cc_busy += busy_time;
+ state->cc_tx += tx_time;
+ state->cc_rx += rx_time + obss_time;
+ state->cc_bss_rx += rx_time;
+ state->noise = -(phy->noise >> 4);
+}
+
+static void __mt7615_update_channel(struct mt7615_dev *dev)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+
+ mt7615_phy_update_channel(&mdev->phy, 0);
+ if (mdev->phy2)
+ mt7615_phy_update_channel(mdev->phy2, 1);
+
+ /* reset obss airtime */
+ mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
+}
+
+void mt7615_update_channel(struct mt76_dev *mdev)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+
+ if (mt7615_pm_wake(dev))
+ return;
+
+ __mt7615_update_channel(dev);
+ mt7615_pm_power_save_sched(dev);
+}
+EXPORT_SYMBOL_GPL(mt7615_update_channel);
+
+static void mt7615_update_survey(struct mt7615_dev *dev)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ ktime_t cur_time;
+
+ __mt7615_update_channel(dev);
+ cur_time = ktime_get_boottime();
+
+ mt76_update_survey_active_time(&mdev->phy, cur_time);
+ if (mdev->phy2)
+ mt76_update_survey_active_time(mdev->phy2, cur_time);
+}
+
+static void
+mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
+{
+ struct mt7615_dev *dev = phy->dev;
+ struct mib_stats *mib = &phy->mib;
+ bool ext_phy = phy != &dev->phy;
+ int i, aggr;
+ u32 val, val2;
+
+ mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
+ MT_MIB_SDR3_FCS_ERR_MASK);
+
+ val = mt76_get_field(dev, MT_MIB_SDR14(ext_phy),
+ MT_MIB_AMPDU_MPDU_COUNT);
+ if (val) {
+ val2 = mt76_get_field(dev, MT_MIB_SDR15(ext_phy),
+ MT_MIB_AMPDU_ACK_COUNT);
+ mib->aggr_per = 1000 * (val - val2) / val;
+ }
+
+ aggr = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
+ for (i = 0; i < 4; i++) {
+ val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
+ mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
+ mib->ack_fail_cnt += FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK,
+ val);
+
+ val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
+ mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
+ mib->rts_retries_cnt += FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK,
+ val);
+
+ val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
+ dev->mt76.aggr_stats[aggr++] += val & 0xffff;
+ dev->mt76.aggr_stats[aggr++] += val >> 16;
+ }
+}
+
+void mt7615_pm_wake_work(struct work_struct *work)
+{
+ struct mt7615_dev *dev;
+ struct mt76_phy *mphy;
+ int i;
+
+ dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
+ pm.wake_work);
+ mphy = dev->phy.mt76;
+
+ if (mt7615_mcu_set_drv_ctrl(dev)) {
+ dev_err(mphy->dev->dev, "failed to wake device\n");
+ goto out;
+ }
+
+ spin_lock_bh(&dev->pm.txq_lock);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ struct mt7615_sta *msta = dev->pm.tx_q[i].msta;
+ struct ieee80211_sta *sta = NULL;
+ struct mt76_wcid *wcid;
+
+ if (!dev->pm.tx_q[i].skb)
+ continue;
+
+ wcid = msta ? &msta->wcid : &dev->mt76.global_wcid;
+ if (msta && wcid->sta)
+ sta = container_of((void *)msta, struct ieee80211_sta,
+ drv_priv);
+
+ mt76_tx(mphy, sta, wcid, dev->pm.tx_q[i].skb);
+ dev->pm.tx_q[i].skb = NULL;
+ }
+ spin_unlock_bh(&dev->pm.txq_lock);
+
+ mt76_worker_schedule(&dev->mt76.tx_worker);
+
+out:
+ ieee80211_wake_queues(mphy->hw);
+ complete_all(&dev->pm.wake_cmpl);
+}
+
+int mt7615_pm_wake(struct mt7615_dev *dev)
+{
+ struct mt76_phy *mphy = dev->phy.mt76;
+
+ if (!mt7615_firmware_offload(dev))
+ return 0;
+
+ if (!mt76_is_mmio(mphy->dev))
+ return 0;
+
+ if (!test_bit(MT76_STATE_PM, &mphy->state))
+ return 0;
+
+ if (test_bit(MT76_HW_SCANNING, &mphy->state) ||
+ test_bit(MT76_HW_SCHED_SCANNING, &mphy->state))
+ return 0;
+
+ if (queue_work(dev->mt76.wq, &dev->pm.wake_work))
+ reinit_completion(&dev->pm.wake_cmpl);
+
+ if (!wait_for_completion_timeout(&dev->pm.wake_cmpl, 3 * HZ)) {
+ ieee80211_wake_queues(mphy->hw);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt7615_pm_wake);
+
+void mt7615_pm_power_save_sched(struct mt7615_dev *dev)
+{
+ struct mt76_phy *mphy = dev->phy.mt76;
+
+ if (!mt7615_firmware_offload(dev))
+ return;
+
+ if (!mt76_is_mmio(mphy->dev))
+ return;
+
+ if (!dev->pm.enable || !test_bit(MT76_STATE_RUNNING, &mphy->state))
+ return;
+
+ dev->pm.last_activity = jiffies;
+
+ if (test_bit(MT76_HW_SCANNING, &mphy->state) ||
+ test_bit(MT76_HW_SCHED_SCANNING, &mphy->state))
+ return;
+
+ if (!test_bit(MT76_STATE_PM, &mphy->state))
+ queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work,
+ dev->pm.idle_timeout);
+}
+EXPORT_SYMBOL_GPL(mt7615_pm_power_save_sched);
+
+void mt7615_pm_power_save_work(struct work_struct *work)
+{
+ struct mt7615_dev *dev;
+ unsigned long delta;
+
+ dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
+ pm.ps_work.work);
+
+ delta = dev->pm.idle_timeout;
+ if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
+ delta = dev->pm.last_activity + delta - jiffies;
+ goto out;
+ }
+
+ if (!mt7615_mcu_set_fw_ctrl(dev))
+ return;
+out:
+ queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta);
+}
+
+static void
+mt7615_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt7615_phy *phy = priv;
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+
+ if (mt7615_mcu_set_bss_pm(dev, vif, dev->pm.enable))
+ return;
+
+ if (dev->pm.enable) {
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+ mt76_set(dev, MT_WF_RFCR(ext_phy),
+ MT_WF_RFCR_DROP_OTHER_BEACON);
+ } else {
+ vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
+ mt76_clear(dev, MT_WF_RFCR(ext_phy),
+ MT_WF_RFCR_DROP_OTHER_BEACON);
+ }
+}
+
+int mt7615_pm_set_enable(struct mt7615_dev *dev, bool enable)
+{
+ struct mt76_phy *mphy = dev->phy.mt76;
+
+ if (!mt7615_firmware_offload(dev) || !mt76_is_mmio(&dev->mt76))
+ return -EOPNOTSUPP;
+
+ mt7615_mutex_acquire(dev);
+
+ if (dev->pm.enable == enable)
+ goto out;
+
+ dev->pm.enable = enable;
+ ieee80211_iterate_active_interfaces(mphy->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7615_pm_interface_iter, mphy->priv);
+out:
+ mt7615_mutex_release(dev);
+
+ return 0;
+}
+
+void mt7615_mac_work(struct work_struct *work)
+{
+ struct mt7615_phy *phy;
+ struct mt76_dev *mdev;
+
+ phy = (struct mt7615_phy *)container_of(work, struct mt7615_phy,
+ mac_work.work);
+ mdev = &phy->dev->mt76;
+
+ mt7615_mutex_acquire(phy->dev);
+
+ mt7615_update_survey(phy->dev);
+ if (++phy->mac_work_count == 5) {
+ phy->mac_work_count = 0;
+
+ mt7615_mac_update_mib_stats(phy);
+ mt7615_mac_scs_check(phy);
+ }
+
+ mt7615_mutex_release(phy->dev);
+
+ mt76_tx_status_check(mdev, NULL, false);
+ ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work,
+ MT7615_WATCHDOG_TIME);
+}
+
+static bool
+mt7615_wait_reset_state(struct mt7615_dev *dev, u32 state)
+{
+ bool ret;
+
+ ret = wait_event_timeout(dev->reset_wait,
+ (READ_ONCE(dev->reset_state) & state),
+ MT7615_RESET_TIMEOUT);
+ WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
+ return ret;
+}
+
+static void
+mt7615_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ieee80211_hw *hw = priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+
+ mt7615_mcu_add_beacon(dev, hw, vif, vif->bss_conf.enable_beacon);
+}
+
+static void
+mt7615_update_beacons(struct mt7615_dev *dev)
+{
+ ieee80211_iterate_active_interfaces(dev->mt76.hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7615_update_vif_beacon, dev->mt76.hw);
+
+ if (!dev->mt76.phy2)
+ return;
+
+ ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7615_update_vif_beacon, dev->mt76.phy2->hw);
+}
+
+void mt7615_dma_reset(struct mt7615_dev *dev)
+{
+ int i;
+
+ mt76_clear(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+ usleep_range(1000, 2000);
+
+ for (i = 0; i < __MT_TXQ_MAX; i++)
+ mt76_queue_tx_cleanup(dev, i, true);
+
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ mt76_queue_rx_reset(dev, i);
+ }
+
+ mt76_set(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+}
+EXPORT_SYMBOL_GPL(mt7615_dma_reset);
+
+void mt7615_tx_token_put(struct mt7615_dev *dev)
+{
+ struct mt76_txwi_cache *txwi;
+ int id;
+
+ spin_lock_bh(&dev->token_lock);
+ idr_for_each_entry(&dev->token, txwi, id) {
+ mt7615_txp_skb_unmap(&dev->mt76, txwi);
+ if (txwi->skb) {
+ struct ieee80211_hw *hw;
+
+ hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
+ ieee80211_free_txskb(hw, txwi->skb);
+ }
+ mt76_put_txwi(&dev->mt76, txwi);
+ }
+ spin_unlock_bh(&dev->token_lock);
+ idr_destroy(&dev->token);
+}
+EXPORT_SYMBOL_GPL(mt7615_tx_token_put);
+
+void mt7615_mac_reset_work(struct work_struct *work)
+{
+ struct mt7615_phy *phy2;
+ struct mt76_phy *ext_phy;
+ struct mt7615_dev *dev;
+
+ dev = container_of(work, struct mt7615_dev, reset_work);
+ ext_phy = dev->mt76.phy2;
+ phy2 = ext_phy ? ext_phy->priv : NULL;
+
+ if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_PDMA))
+ return;
+
+ ieee80211_stop_queues(mt76_hw(dev));
+ if (ext_phy)
+ ieee80211_stop_queues(ext_phy->hw);
+
+ set_bit(MT76_RESET, &dev->mphy.state);
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ wake_up(&dev->mt76.mcu.wait);
+ cancel_delayed_work_sync(&dev->phy.mac_work);
+ del_timer_sync(&dev->phy.roc_timer);
+ cancel_work_sync(&dev->phy.roc_work);
+ if (phy2) {
+ cancel_delayed_work_sync(&phy2->mac_work);
+ del_timer_sync(&phy2->roc_timer);
+ cancel_work_sync(&phy2->roc_work);
+ }
+
+ /* lock/unlock all queues to ensure that no tx is pending */
+ mt76_txq_schedule_all(&dev->mphy);
+ if (ext_phy)
+ mt76_txq_schedule_all(ext_phy);
+
+ mt76_worker_disable(&dev->mt76.tx_worker);
+ napi_disable(&dev->mt76.napi[0]);
+ napi_disable(&dev->mt76.napi[1]);
+ napi_disable(&dev->mt76.tx_napi);
+
+ mt7615_mutex_acquire(dev);
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_STOPPED);
+
+ mt7615_tx_token_put(dev);
+ idr_init(&dev->token);
+
+ if (mt7615_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
+ mt7615_dma_reset(dev);
+
+ mt76_wr(dev, MT_WPDMA_MEM_RNG_ERR, 0);
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_INIT);
+ mt7615_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
+ }
+
+ clear_bit(MT76_MCU_RESET, &dev->mphy.state);
+ clear_bit(MT76_RESET, &dev->mphy.state);
+
+ mt76_worker_enable(&dev->mt76.tx_worker);
+ napi_enable(&dev->mt76.tx_napi);
+ napi_schedule(&dev->mt76.tx_napi);
+
+ napi_enable(&dev->mt76.napi[0]);
+ napi_schedule(&dev->mt76.napi[0]);
+
+ napi_enable(&dev->mt76.napi[1]);
+ napi_schedule(&dev->mt76.napi[1]);
+
+ ieee80211_wake_queues(mt76_hw(dev));
+ if (ext_phy)
+ ieee80211_wake_queues(ext_phy->hw);
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
+ mt7615_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
+
+ mt7615_update_beacons(dev);
+
+ mt7615_mutex_release(dev);
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->phy.mac_work,
+ MT7615_WATCHDOG_TIME);
+ if (phy2)
+ ieee80211_queue_delayed_work(ext_phy->hw, &phy2->mac_work,
+ MT7615_WATCHDOG_TIME);
+
+}
+
+static void mt7615_dfs_stop_radar_detector(struct mt7615_phy *phy)
+{
+ struct mt7615_dev *dev = phy->dev;
+
+ if (phy->rdd_state & BIT(0))
+ mt7615_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0);
+ if (phy->rdd_state & BIT(1))
+ mt7615_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0);
+}
+
+static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain)
+{
+ int err;
+
+ err = mt7615_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0);
+ if (err < 0)
+ return err;
+
+ return mt7615_mcu_rdd_cmd(dev, RDD_DET_MODE, chain,
+ MT_RX_SEL0, 1);
+}
+
+static int mt7615_dfs_start_radar_detector(struct mt7615_phy *phy)
+{
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ int err;
+
+ /* start CAC */
+ err = mt7615_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0);
+ if (err < 0)
+ return err;
+
+ err = mt7615_dfs_start_rdd(dev, ext_phy);
+ if (err < 0)
+ return err;
+
+ phy->rdd_state |= BIT(ext_phy);
+
+ if (chandef->width == NL80211_CHAN_WIDTH_160 ||
+ chandef->width == NL80211_CHAN_WIDTH_80P80) {
+ err = mt7615_dfs_start_rdd(dev, 1);
+ if (err < 0)
+ return err;
+
+ phy->rdd_state |= BIT(1);
+ }
+
+ return 0;
+}
+
+static int
+mt7615_dfs_init_radar_specs(struct mt7615_phy *phy)
+{
+ const struct mt7615_dfs_radar_spec *radar_specs;
+ struct mt7615_dev *dev = phy->dev;
+ int err, i;
+
+ switch (dev->mt76.region) {
+ case NL80211_DFS_FCC:
+ radar_specs = &fcc_radar_specs;
+ err = mt7615_mcu_set_fcc5_lpn(dev, 8);
+ if (err < 0)
+ return err;
+ break;
+ case NL80211_DFS_ETSI:
+ radar_specs = &etsi_radar_specs;
+ break;
+ case NL80211_DFS_JP:
+ radar_specs = &jp_radar_specs;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
+ err = mt7615_mcu_set_radar_th(dev, i,
+ &radar_specs->radar_pattern[i]);
+ if (err < 0)
+ return err;
+ }
+
+ return mt7615_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
+}
+
+int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy)
+{
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ int err;
+
+ if (is_mt7663(&dev->mt76))
+ return 0;
+
+ if (dev->mt76.region == NL80211_DFS_UNSET) {
+ phy->dfs_state = -1;
+ if (phy->rdd_state)
+ goto stop;
+
+ return 0;
+ }
+
+ if (test_bit(MT76_SCANNING, &phy->mt76->state))
+ return 0;
+
+ if (phy->dfs_state == chandef->chan->dfs_state)
+ return 0;
+
+ err = mt7615_dfs_init_radar_specs(phy);
+ if (err < 0) {
+ phy->dfs_state = -1;
+ goto stop;
+ }
+
+ phy->dfs_state = chandef->chan->dfs_state;
+
+ if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
+ if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
+ return mt7615_dfs_start_radar_detector(phy);
+
+ return mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy,
+ MT_RX_SEL0, 0);
+ }
+
+stop:
+ err = mt7615_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy, MT_RX_SEL0, 0);
+ if (err < 0)
+ return err;
+
+ mt7615_dfs_stop_radar_detector(phy);
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
new file mode 100644
index 000000000..169f4e17b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
@@ -0,0 +1,438 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2019 MediaTek Inc. */
+
+#ifndef __MT7615_MAC_H
+#define __MT7615_MAC_H
+
+#define MT_CT_PARSE_LEN 72
+#define MT_CT_DMA_BUF_NUM 2
+
+#define MT_RXD0_LENGTH GENMASK(15, 0)
+#define MT_RXD0_PKT_FLAG GENMASK(19, 16)
+#define MT_RXD0_PKT_TYPE GENMASK(31, 29)
+
+#define MT_RXD0_NORMAL_ETH_TYPE_OFS GENMASK(22, 16)
+#define MT_RXD0_NORMAL_IP_SUM BIT(23)
+#define MT_RXD0_NORMAL_UDP_TCP_SUM BIT(24)
+#define MT_RXD0_NORMAL_GROUP_1 BIT(25)
+#define MT_RXD0_NORMAL_GROUP_2 BIT(26)
+#define MT_RXD0_NORMAL_GROUP_3 BIT(27)
+#define MT_RXD0_NORMAL_GROUP_4 BIT(28)
+
+enum rx_pkt_type {
+ PKT_TYPE_TXS,
+ PKT_TYPE_TXRXV,
+ PKT_TYPE_NORMAL,
+ PKT_TYPE_RX_DUP_RFB,
+ PKT_TYPE_RX_TMR,
+ PKT_TYPE_RETRIEVE,
+ PKT_TYPE_TXRX_NOTIFY,
+ PKT_TYPE_RX_EVENT,
+ PKT_TYPE_NORMAL_MCU,
+};
+
+#define MT_RXD1_NORMAL_BSSID GENMASK(31, 26)
+#define MT_RXD1_NORMAL_PAYLOAD_FORMAT GENMASK(25, 24)
+#define MT_RXD1_NORMAL_HDR_TRANS BIT(23)
+#define MT_RXD1_NORMAL_HDR_OFFSET BIT(22)
+#define MT_RXD1_NORMAL_MAC_HDR_LEN GENMASK(21, 16)
+#define MT_RXD1_NORMAL_CH_FREQ GENMASK(15, 8)
+#define MT_RXD1_NORMAL_KEY_ID GENMASK(7, 6)
+#define MT_RXD1_NORMAL_BEACON_UC BIT(5)
+#define MT_RXD1_NORMAL_BEACON_MC BIT(4)
+#define MT_RXD1_NORMAL_BF_REPORT BIT(3)
+#define MT_RXD1_NORMAL_ADDR_TYPE GENMASK(2, 1)
+#define MT_RXD1_NORMAL_BCAST GENMASK(2, 1)
+#define MT_RXD1_NORMAL_MCAST BIT(2)
+#define MT_RXD1_NORMAL_U2M BIT(1)
+#define MT_RXD1_NORMAL_HTC_VLD BIT(0)
+
+#define MT_RXD2_NORMAL_NON_AMPDU BIT(31)
+#define MT_RXD2_NORMAL_NON_AMPDU_SUB BIT(30)
+#define MT_RXD2_NORMAL_NDATA BIT(29)
+#define MT_RXD2_NORMAL_NULL_FRAME BIT(28)
+#define MT_RXD2_NORMAL_FRAG BIT(27)
+#define MT_RXD2_NORMAL_INT_FRAME BIT(26)
+#define MT_RXD2_NORMAL_HDR_TRANS_ERROR BIT(25)
+#define MT_RXD2_NORMAL_MAX_LEN_ERROR BIT(24)
+#define MT_RXD2_NORMAL_AMSDU_ERR BIT(23)
+#define MT_RXD2_NORMAL_LEN_MISMATCH BIT(22)
+#define MT_RXD2_NORMAL_TKIP_MIC_ERR BIT(21)
+#define MT_RXD2_NORMAL_ICV_ERR BIT(20)
+#define MT_RXD2_NORMAL_CLM BIT(19)
+#define MT_RXD2_NORMAL_CM BIT(18)
+#define MT_RXD2_NORMAL_FCS_ERR BIT(17)
+#define MT_RXD2_NORMAL_SW_BIT BIT(16)
+#define MT_RXD2_NORMAL_SEC_MODE GENMASK(15, 12)
+#define MT_RXD2_NORMAL_TID GENMASK(11, 8)
+#define MT_RXD2_NORMAL_WLAN_IDX GENMASK(7, 0)
+
+#define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30)
+#define MT_RXD3_NORMAL_PF_MODE BIT(29)
+#define MT_RXD3_NORMAL_CLS_BITMAP GENMASK(28, 19)
+#define MT_RXD3_NORMAL_WOL GENMASK(18, 14)
+#define MT_RXD3_NORMAL_MAGIC_PKT BIT(13)
+#define MT_RXD3_NORMAL_OFLD GENMASK(12, 11)
+#define MT_RXD3_NORMAL_CLS BIT(10)
+#define MT_RXD3_NORMAL_PATTERN_DROP BIT(9)
+#define MT_RXD3_NORMAL_TSF_COMPARE_LOSS BIT(8)
+#define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0)
+
+#define MT_RXV1_ACID_DET_H BIT(31)
+#define MT_RXV1_ACID_DET_L BIT(30)
+#define MT_RXV1_VHTA2_B8_B3 GENMASK(29, 24)
+#define MT_RXV1_NUM_RX GENMASK(23, 22)
+#define MT_RXV1_HT_NO_SOUND BIT(21)
+#define MT_RXV1_HT_SMOOTH BIT(20)
+#define MT_RXV1_HT_SHORT_GI BIT(19)
+#define MT_RXV1_HT_AGGR BIT(18)
+#define MT_RXV1_VHTA1_B22 BIT(17)
+#define MT_RXV1_FRAME_MODE GENMASK(16, 15)
+#define MT_RXV1_TX_MODE GENMASK(14, 12)
+#define MT_RXV1_HT_EXT_LTF GENMASK(11, 10)
+#define MT_RXV1_HT_AD_CODE BIT(9)
+#define MT_RXV1_HT_STBC GENMASK(8, 7)
+#define MT_RXV1_TX_RATE GENMASK(6, 0)
+
+#define MT_RXV2_SEL_ANT BIT(31)
+#define MT_RXV2_VALID_BIT BIT(30)
+#define MT_RXV2_NSTS GENMASK(29, 27)
+#define MT_RXV2_GROUP_ID GENMASK(26, 21)
+#define MT_RXV2_LENGTH GENMASK(20, 0)
+
+#define MT_RXV3_WB_RSSI GENMASK(31, 24)
+#define MT_RXV3_IB_RSSI GENMASK(23, 16)
+
+#define MT_RXV4_RCPI3 GENMASK(31, 24)
+#define MT_RXV4_RCPI2 GENMASK(23, 16)
+#define MT_RXV4_RCPI1 GENMASK(15, 8)
+#define MT_RXV4_RCPI0 GENMASK(7, 0)
+
+#define MT_RXV5_FOE GENMASK(11, 0)
+
+#define MT_RXV6_NF3 GENMASK(31, 24)
+#define MT_RXV6_NF2 GENMASK(23, 16)
+#define MT_RXV6_NF1 GENMASK(15, 8)
+#define MT_RXV6_NF0 GENMASK(7, 0)
+
+enum tx_header_format {
+ MT_HDR_FORMAT_802_3,
+ MT_HDR_FORMAT_CMD,
+ MT_HDR_FORMAT_802_11,
+ MT_HDR_FORMAT_802_11_EXT,
+};
+
+enum tx_pkt_type {
+ MT_TX_TYPE_CT,
+ MT_TX_TYPE_SF,
+ MT_TX_TYPE_CMD,
+ MT_TX_TYPE_FW,
+};
+
+enum tx_port_idx {
+ MT_TX_PORT_IDX_LMAC,
+ MT_TX_PORT_IDX_MCU
+};
+
+enum tx_mcu_port_q_idx {
+ MT_TX_MCU_PORT_RX_Q0 = 0,
+ MT_TX_MCU_PORT_RX_Q1,
+ MT_TX_MCU_PORT_RX_Q2,
+ MT_TX_MCU_PORT_RX_Q3,
+ MT_TX_MCU_PORT_RX_FWDL = 0x1e
+};
+
+enum tx_phy_bandwidth {
+ MT_PHY_BW_20,
+ MT_PHY_BW_40,
+ MT_PHY_BW_80,
+ MT_PHY_BW_160,
+};
+
+#define MT_CT_INFO_APPLY_TXD BIT(0)
+#define MT_CT_INFO_COPY_HOST_TXD_ALL BIT(1)
+#define MT_CT_INFO_MGMT_FRAME BIT(2)
+#define MT_CT_INFO_NONE_CIPHER_FRAME BIT(3)
+#define MT_CT_INFO_HSR2_TX BIT(4)
+
+#define MT_TXD_SIZE (8 * 4)
+
+#define MT_USB_TXD_SIZE (MT_TXD_SIZE + 8 * 4)
+#define MT_USB_HDR_SIZE 4
+#define MT_USB_TAIL_SIZE 4
+
+#define MT_TXD0_P_IDX BIT(31)
+#define MT_TXD0_Q_IDX GENMASK(30, 26)
+#define MT_TXD0_UDP_TCP_SUM BIT(24)
+#define MT_TXD0_IP_SUM BIT(23)
+#define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16)
+#define MT_TXD0_TX_BYTES GENMASK(15, 0)
+
+#define MT_TXD1_OWN_MAC GENMASK(31, 26)
+#define MT_TXD1_PKT_FMT GENMASK(25, 24)
+#define MT_TXD1_TID GENMASK(23, 21)
+#define MT_TXD1_AMSDU BIT(20)
+#define MT_TXD1_UNXV BIT(19)
+#define MT_TXD1_HDR_PAD GENMASK(18, 17)
+#define MT_TXD1_TXD_LEN BIT(16)
+#define MT_TXD1_LONG_FORMAT BIT(15)
+#define MT_TXD1_HDR_FORMAT GENMASK(14, 13)
+#define MT_TXD1_HDR_INFO GENMASK(12, 8)
+#define MT_TXD1_WLAN_IDX GENMASK(7, 0)
+
+#define MT_TXD2_FIX_RATE BIT(31)
+#define MT_TXD2_TIMING_MEASURE BIT(30)
+#define MT_TXD2_BA_DISABLE BIT(29)
+#define MT_TXD2_POWER_OFFSET GENMASK(28, 24)
+#define MT_TXD2_MAX_TX_TIME GENMASK(23, 16)
+#define MT_TXD2_FRAG GENMASK(15, 14)
+#define MT_TXD2_HTC_VLD BIT(13)
+#define MT_TXD2_DURATION BIT(12)
+#define MT_TXD2_BIP BIT(11)
+#define MT_TXD2_MULTICAST BIT(10)
+#define MT_TXD2_RTS BIT(9)
+#define MT_TXD2_SOUNDING BIT(8)
+#define MT_TXD2_NDPA BIT(7)
+#define MT_TXD2_NDP BIT(6)
+#define MT_TXD2_FRAME_TYPE GENMASK(5, 4)
+#define MT_TXD2_SUB_TYPE GENMASK(3, 0)
+
+#define MT_TXD3_SN_VALID BIT(31)
+#define MT_TXD3_PN_VALID BIT(30)
+#define MT_TXD3_SEQ GENMASK(27, 16)
+#define MT_TXD3_REM_TX_COUNT GENMASK(15, 11)
+#define MT_TXD3_TX_COUNT GENMASK(10, 6)
+#define MT_TXD3_PROTECT_FRAME BIT(1)
+#define MT_TXD3_NO_ACK BIT(0)
+
+#define MT_TXD4_PN_LOW GENMASK(31, 0)
+
+#define MT_TXD5_PN_HIGH GENMASK(31, 16)
+#define MT_TXD5_SW_POWER_MGMT BIT(13)
+#define MT_TXD5_DA_SELECT BIT(11)
+#define MT_TXD5_TX_STATUS_HOST BIT(10)
+#define MT_TXD5_TX_STATUS_MCU BIT(9)
+#define MT_TXD5_TX_STATUS_FMT BIT(8)
+#define MT_TXD5_PID GENMASK(7, 0)
+
+#define MT_TXD6_FIXED_RATE BIT(31)
+#define MT_TXD6_SGI BIT(30)
+#define MT_TXD6_LDPC BIT(29)
+#define MT_TXD6_TX_BF BIT(28)
+#define MT_TXD6_TX_RATE GENMASK(27, 16)
+#define MT_TXD6_ANT_ID GENMASK(15, 4)
+#define MT_TXD6_DYN_BW BIT(3)
+#define MT_TXD6_FIXED_BW BIT(2)
+#define MT_TXD6_BW GENMASK(1, 0)
+
+/* MT7663 DW7 HW-AMSDU */
+#define MT_TXD7_HW_AMSDU_CAP BIT(30)
+#define MT_TXD7_TYPE GENMASK(21, 20)
+#define MT_TXD7_SUB_TYPE GENMASK(19, 16)
+#define MT_TXD7_SPE_IDX GENMASK(15, 11)
+#define MT_TXD7_SPE_IDX_SLE BIT(10)
+
+#define MT_TXD8_L_TYPE GENMASK(5, 4)
+#define MT_TXD8_L_SUB_TYPE GENMASK(3, 0)
+
+#define MT_TX_RATE_STBC BIT(11)
+#define MT_TX_RATE_NSS GENMASK(10, 9)
+#define MT_TX_RATE_MODE GENMASK(8, 6)
+#define MT_TX_RATE_IDX GENMASK(5, 0)
+
+#define MT_TXP_MAX_BUF_NUM 6
+#define MT_HW_TXP_MAX_MSDU_NUM 4
+#define MT_HW_TXP_MAX_BUF_NUM 4
+
+#define MT_MSDU_ID_VALID BIT(15)
+
+#define MT_TXD_LEN_MASK GENMASK(11, 0)
+#define MT_TXD_LEN_MSDU_LAST BIT(14)
+#define MT_TXD_LEN_AMSDU_LAST BIT(15)
+/* mt7663 */
+#define MT_TXD_LEN_LAST BIT(15)
+
+struct mt7615_txp_ptr {
+ __le32 buf0;
+ __le16 len0;
+ __le16 len1;
+ __le32 buf1;
+} __packed __aligned(4);
+
+struct mt7615_hw_txp {
+ __le16 msdu_id[MT_HW_TXP_MAX_MSDU_NUM];
+ struct mt7615_txp_ptr ptr[MT_HW_TXP_MAX_BUF_NUM / 2];
+} __packed __aligned(4);
+
+struct mt7615_fw_txp {
+ __le16 flags;
+ __le16 token;
+ u8 bss_idx;
+ u8 rept_wds_wcid;
+ u8 rsv;
+ u8 nbuf;
+ __le32 buf[MT_TXP_MAX_BUF_NUM];
+ __le16 len[MT_TXP_MAX_BUF_NUM];
+} __packed __aligned(4);
+
+struct mt7615_txp_common {
+ union {
+ struct mt7615_fw_txp fw;
+ struct mt7615_hw_txp hw;
+ };
+};
+
+struct mt7615_tx_free {
+ __le16 rx_byte_cnt;
+ __le16 ctrl;
+ u8 txd_cnt;
+ u8 rsv[3];
+ __le16 token[];
+} __packed __aligned(4);
+
+#define MT_TX_FREE_MSDU_ID_CNT GENMASK(6, 0)
+
+#define MT_TXS0_PID GENMASK(31, 24)
+#define MT_TXS0_BA_ERROR BIT(22)
+#define MT_TXS0_PS_FLAG BIT(21)
+#define MT_TXS0_TXOP_TIMEOUT BIT(20)
+#define MT_TXS0_BIP_ERROR BIT(19)
+
+#define MT_TXS0_QUEUE_TIMEOUT BIT(18)
+#define MT_TXS0_RTS_TIMEOUT BIT(17)
+#define MT_TXS0_ACK_TIMEOUT BIT(16)
+#define MT_TXS0_ACK_ERROR_MASK GENMASK(18, 16)
+
+#define MT_TXS0_TX_STATUS_HOST BIT(15)
+#define MT_TXS0_TX_STATUS_MCU BIT(14)
+#define MT_TXS0_TXS_FORMAT BIT(13)
+#define MT_TXS0_FIXED_RATE BIT(12)
+#define MT_TXS0_TX_RATE GENMASK(11, 0)
+
+#define MT_TXS1_ANT_ID GENMASK(31, 20)
+#define MT_TXS1_RESP_RATE GENMASK(19, 16)
+#define MT_TXS1_BW GENMASK(15, 14)
+#define MT_TXS1_I_TXBF BIT(13)
+#define MT_TXS1_E_TXBF BIT(12)
+#define MT_TXS1_TID GENMASK(11, 9)
+#define MT_TXS1_AMPDU BIT(8)
+#define MT_TXS1_ACKED_MPDU BIT(7)
+#define MT_TXS1_TX_POWER_DBM GENMASK(6, 0)
+
+#define MT_TXS2_WCID GENMASK(31, 24)
+#define MT_TXS2_RXV_SEQNO GENMASK(23, 16)
+#define MT_TXS2_TX_DELAY GENMASK(15, 0)
+
+#define MT_TXS3_LAST_TX_RATE GENMASK(31, 29)
+#define MT_TXS3_TX_COUNT GENMASK(28, 24)
+#define MT_TXS3_F1_TSSI1 GENMASK(23, 12)
+#define MT_TXS3_F1_TSSI0 GENMASK(11, 0)
+#define MT_TXS3_F0_SEQNO GENMASK(11, 0)
+
+#define MT_TXS4_F0_TIMESTAMP GENMASK(31, 0)
+#define MT_TXS4_F1_TSSI3 GENMASK(23, 12)
+#define MT_TXS4_F1_TSSI2 GENMASK(11, 0)
+
+#define MT_TXS5_F0_FRONT_TIME GENMASK(24, 0)
+#define MT_TXS5_F1_NOISE_2 GENMASK(23, 16)
+#define MT_TXS5_F1_NOISE_1 GENMASK(15, 8)
+#define MT_TXS5_F1_NOISE_0 GENMASK(7, 0)
+
+#define MT_TXS6_F1_RCPI_3 GENMASK(31, 24)
+#define MT_TXS6_F1_RCPI_2 GENMASK(23, 16)
+#define MT_TXS6_F1_RCPI_1 GENMASK(15, 8)
+#define MT_TXS6_F1_RCPI_0 GENMASK(7, 0)
+
+struct mt7615_dfs_pulse {
+ u32 max_width; /* us */
+ int max_pwr; /* dbm */
+ int min_pwr; /* dbm */
+ u32 min_stgr_pri; /* us */
+ u32 max_stgr_pri; /* us */
+ u32 min_cr_pri; /* us */
+ u32 max_cr_pri; /* us */
+};
+
+struct mt7615_dfs_pattern {
+ u8 enb;
+ u8 stgr;
+ u8 min_crpn;
+ u8 max_crpn;
+ u8 min_crpr;
+ u8 min_pw;
+ u8 max_pw;
+ u32 min_pri;
+ u32 max_pri;
+ u8 min_crbn;
+ u8 max_crbn;
+ u8 min_stgpn;
+ u8 max_stgpn;
+ u8 min_stgpr;
+};
+
+struct mt7615_dfs_radar_spec {
+ struct mt7615_dfs_pulse pulse_th;
+ struct mt7615_dfs_pattern radar_pattern[16];
+};
+
+enum mt7615_cipher_type {
+ MT_CIPHER_NONE,
+ MT_CIPHER_WEP40,
+ MT_CIPHER_TKIP,
+ MT_CIPHER_TKIP_NO_MIC,
+ MT_CIPHER_AES_CCMP,
+ MT_CIPHER_WEP104,
+ MT_CIPHER_BIP_CMAC_128,
+ MT_CIPHER_WEP128,
+ MT_CIPHER_WAPI,
+ MT_CIPHER_CCMP_256 = 10,
+ MT_CIPHER_GCMP,
+ MT_CIPHER_GCMP_256,
+};
+
+static inline enum mt7615_cipher_type
+mt7615_mac_get_cipher(int cipher)
+{
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MT_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MT_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return MT_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ return MT_CIPHER_BIP_CMAC_128;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MT_CIPHER_AES_CCMP;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ return MT_CIPHER_CCMP_256;
+ case WLAN_CIPHER_SUITE_GCMP:
+ return MT_CIPHER_GCMP;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return MT_CIPHER_GCMP_256;
+ case WLAN_CIPHER_SUITE_SMS4:
+ return MT_CIPHER_WAPI;
+ default:
+ return MT_CIPHER_NONE;
+ }
+}
+
+static inline struct mt7615_txp_common *
+mt7615_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+{
+ u8 *txwi;
+
+ if (!t)
+ return NULL;
+
+ txwi = mt76_get_txwi_ptr(dev, t);
+
+ return (struct mt7615_txp_common *)(txwi + MT_TXD_SIZE);
+}
+
+static inline u32 mt7615_mac_wtbl_addr(struct mt7615_dev *dev, int wcid)
+{
+ return MT_WTBL_BASE(dev) + wcid * MT_WTBL_ENTRY_SIZE;
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
new file mode 100644
index 000000000..defa207f5
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -0,0 +1,1268 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Roy Luo <royluo@google.com>
+ * Ryder Lee <ryder.lee@mediatek.com>
+ * Felix Fietkau <nbd@nbd.name>
+ * Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include "mt7615.h"
+#include "mcu.h"
+
+static bool mt7615_dev_running(struct mt7615_dev *dev)
+{
+ struct mt7615_phy *phy;
+
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
+ return true;
+
+ phy = mt7615_ext_phy(dev);
+
+ return phy && test_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+}
+
+static void mt7615_free_pending_tx_skbs(struct mt7615_dev *dev,
+ struct mt7615_sta *msta)
+{
+ int i;
+
+ spin_lock_bh(&dev->pm.txq_lock);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ if (msta && dev->pm.tx_q[i].msta != msta)
+ continue;
+
+ dev_kfree_skb(dev->pm.tx_q[i].skb);
+ dev->pm.tx_q[i].skb = NULL;
+ }
+ spin_unlock_bh(&dev->pm.txq_lock);
+}
+
+static int mt7615_start(struct ieee80211_hw *hw)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ bool running;
+
+ if (!mt7615_wait_for_mcu_init(dev))
+ return -EIO;
+
+ mt7615_mutex_acquire(dev);
+
+ running = mt7615_dev_running(dev);
+
+ if (!running) {
+ mt7615_mcu_set_pm(dev, 0, 0);
+ mt7615_mcu_set_mac_enable(dev, 0, true);
+ mt7615_mac_enable_nf(dev, 0);
+ }
+
+ if (phy != &dev->phy) {
+ mt7615_mcu_set_pm(dev, 1, 0);
+ mt7615_mcu_set_mac_enable(dev, 1, true);
+ mt7615_mac_enable_nf(dev, 1);
+ }
+
+ mt7615_mcu_set_channel_domain(phy);
+ mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH);
+
+ set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+
+ ieee80211_queue_delayed_work(hw, &phy->mac_work,
+ MT7615_WATCHDOG_TIME);
+
+ if (!running)
+ mt7615_mac_reset_counters(dev);
+
+ mt7615_mutex_release(dev);
+
+ return 0;
+}
+
+static void mt7615_stop(struct ieee80211_hw *hw)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+
+ cancel_delayed_work_sync(&phy->mac_work);
+ del_timer_sync(&phy->roc_timer);
+ cancel_work_sync(&phy->roc_work);
+
+ cancel_delayed_work_sync(&dev->pm.ps_work);
+ cancel_work_sync(&dev->pm.wake_work);
+
+ mt7615_free_pending_tx_skbs(dev, NULL);
+
+ mt7615_mutex_acquire(dev);
+
+ mt76_testmode_reset(&dev->mt76, true);
+
+ clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+ cancel_delayed_work_sync(&phy->scan_work);
+
+ if (phy != &dev->phy) {
+ mt7615_mcu_set_pm(dev, 1, 1);
+ mt7615_mcu_set_mac_enable(dev, 1, false);
+ }
+
+ if (!mt7615_dev_running(dev)) {
+ mt7615_mcu_set_pm(dev, 0, 1);
+ mt7615_mcu_set_mac_enable(dev, 0, false);
+ }
+
+ mt7615_mutex_release(dev);
+}
+
+static int get_omac_idx(enum nl80211_iftype type, u32 mask)
+{
+ int i;
+
+ switch (type) {
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_ADHOC:
+ /* ap use hw bssid 0 and ext bssid */
+ if (~mask & BIT(HW_BSSID_0))
+ return HW_BSSID_0;
+
+ for (i = EXT_BSSID_1; i < EXT_BSSID_END; i++)
+ if (~mask & BIT(i))
+ return i;
+
+ break;
+ case NL80211_IFTYPE_STATION:
+ /* sta use hw bssid other than 0 */
+ for (i = HW_BSSID_1; i < HW_BSSID_MAX; i++)
+ if (~mask & BIT(i))
+ return i;
+
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return -1;
+}
+
+static int mt7615_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ struct mt76_txq *mtxq;
+ bool ext_phy = phy != &dev->phy;
+ int idx, ret = 0;
+
+ mt7615_mutex_acquire(dev);
+
+ mt76_testmode_reset(&dev->mt76, true);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR &&
+ is_zero_ether_addr(vif->addr))
+ phy->monitor_vif = vif;
+
+ mvif->idx = ffs(~dev->mphy.vif_mask) - 1;
+ if (mvif->idx >= MT7615_MAX_INTERFACES) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ idx = get_omac_idx(vif->type, dev->omac_mask);
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out;
+ }
+ mvif->omac_idx = idx;
+
+ mvif->band_idx = ext_phy;
+ if (mt7615_ext_phy(dev))
+ mvif->wmm_idx = ext_phy * (MT7615_MAX_WMM_SETS / 2) +
+ mvif->idx % (MT7615_MAX_WMM_SETS / 2);
+ else
+ mvif->wmm_idx = mvif->idx % MT7615_MAX_WMM_SETS;
+
+ dev->mphy.vif_mask |= BIT(mvif->idx);
+ dev->omac_mask |= BIT(mvif->omac_idx);
+ phy->omac_mask |= BIT(mvif->omac_idx);
+
+ mt7615_mcu_set_dbdc(dev);
+
+ idx = MT7615_WTBL_RESERVED - mvif->idx;
+
+ INIT_LIST_HEAD(&mvif->sta.poll_list);
+ mvif->sta.wcid.idx = idx;
+ mvif->sta.wcid.ext_phy = mvif->band_idx;
+ mvif->sta.wcid.hw_key_idx = -1;
+ mt7615_mac_wtbl_update(dev, idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
+ rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
+ if (vif->txq) {
+ mtxq = (struct mt76_txq *)vif->txq->drv_priv;
+ mtxq->wcid = &mvif->sta.wcid;
+ }
+
+ ret = mt7615_mcu_add_dev_info(dev, vif, true);
+ if (ret)
+ goto out;
+
+ if (dev->pm.enable) {
+ ret = mt7615_mcu_set_bss_pm(dev, vif, true);
+ if (ret)
+ goto out;
+
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+ mt76_set(dev, MT_WF_RFCR(ext_phy),
+ MT_WF_RFCR_DROP_OTHER_BEACON);
+ }
+out:
+ mt7615_mutex_release(dev);
+
+ return ret;
+}
+
+static void mt7615_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_sta *msta = &mvif->sta;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ int idx = msta->wcid.idx;
+
+ /* TODO: disable beacon for the bss */
+
+ mt7615_mutex_acquire(dev);
+
+ mt76_testmode_reset(&dev->mt76, true);
+ if (vif == phy->monitor_vif)
+ phy->monitor_vif = NULL;
+
+ mt7615_free_pending_tx_skbs(dev, msta);
+
+ if (dev->pm.enable) {
+ bool ext_phy = phy != &dev->phy;
+
+ mt7615_mcu_set_bss_pm(dev, vif, false);
+ mt76_clear(dev, MT_WF_RFCR(ext_phy),
+ MT_WF_RFCR_DROP_OTHER_BEACON);
+ }
+ mt7615_mcu_add_dev_info(dev, vif, false);
+
+ rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
+
+ dev->mphy.vif_mask &= ~BIT(mvif->idx);
+ dev->omac_mask &= ~BIT(mvif->omac_idx);
+ phy->omac_mask &= ~BIT(mvif->omac_idx);
+
+ mt7615_mutex_release(dev);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (!list_empty(&msta->poll_list))
+ list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+}
+
+static void mt7615_init_dfs_state(struct mt7615_phy *phy)
+{
+ struct mt76_phy *mphy = phy->mt76;
+ struct ieee80211_hw *hw = mphy->hw;
+ struct cfg80211_chan_def *chandef = &hw->conf.chandef;
+
+ if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ return;
+
+ if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
+ return;
+
+ if (mphy->chandef.chan->center_freq == chandef->chan->center_freq &&
+ mphy->chandef.width == chandef->width)
+ return;
+
+ phy->dfs_state = -1;
+}
+
+int mt7615_set_channel(struct mt7615_phy *phy)
+{
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ int ret;
+
+ cancel_delayed_work_sync(&phy->mac_work);
+
+ mt7615_mutex_acquire(dev);
+
+ set_bit(MT76_RESET, &phy->mt76->state);
+
+ mt7615_init_dfs_state(phy);
+ mt76_set_channel(phy->mt76);
+
+ if (is_mt7615(&dev->mt76) && dev->flash_eeprom) {
+ mt7615_mcu_apply_rx_dcoc(phy);
+ mt7615_mcu_apply_tx_dpd(phy);
+ }
+
+ ret = mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_CHANNEL_SWITCH);
+ if (ret)
+ goto out;
+
+ mt7615_mac_set_timing(phy);
+ ret = mt7615_dfs_init_radar_detector(phy);
+ mt7615_mac_cca_stats_reset(phy);
+ mt7615_mcu_set_sku_en(phy, !mt76_testmode_enabled(&dev->mt76));
+
+ mt7615_mac_reset_counters(dev);
+ phy->noise = 0;
+ phy->chfreq = mt76_rr(dev, MT_CHFREQ(ext_phy));
+
+out:
+ clear_bit(MT76_RESET, &phy->mt76->state);
+
+ mt7615_mutex_release(dev);
+
+ mt76_txq_schedule_all(phy->mt76);
+
+ if (!mt76_testmode_enabled(&dev->mt76))
+ ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work,
+ MT7615_WATCHDOG_TIME);
+
+ return ret;
+}
+
+static int
+mt7615_queue_key_update(struct mt7615_dev *dev, enum set_key_cmd cmd,
+ struct mt7615_sta *msta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt7615_wtbl_desc *wd;
+
+ wd = kzalloc(sizeof(*wd), GFP_KERNEL);
+ if (!wd)
+ return -ENOMEM;
+
+ wd->type = MT7615_WTBL_KEY_DESC;
+ wd->sta = msta;
+
+ wd->key.key = kmemdup(key->key, key->keylen, GFP_KERNEL);
+ if (!wd->key.key) {
+ kfree(wd);
+ return -ENOMEM;
+ }
+ wd->key.cipher = key->cipher;
+ wd->key.keyidx = key->keyidx;
+ wd->key.keylen = key->keylen;
+ wd->key.cmd = cmd;
+
+ spin_lock_bh(&dev->mt76.lock);
+ list_add_tail(&wd->node, &dev->wd_head);
+ spin_unlock_bh(&dev->mt76.lock);
+
+ queue_work(dev->mt76.wq, &dev->wtbl_work);
+
+ return 0;
+}
+
+static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv :
+ &mvif->sta;
+ struct mt76_wcid *wcid = &msta->wcid;
+ int idx = key->keyidx, err;
+
+ /* The hardware does not support per-STA RX GTK, fallback
+ * to software mode for these.
+ */
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
+ (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+ key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ /* fall back to sw encryption for unsupported ciphers */
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ case WLAN_CIPHER_SUITE_SMS4:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ mt7615_mutex_acquire(dev);
+
+ if (cmd == SET_KEY) {
+ key->hw_key_idx = wcid->idx;
+ wcid->hw_key_idx = idx;
+ } else if (idx == wcid->hw_key_idx) {
+ wcid->hw_key_idx = -1;
+ }
+ mt76_wcid_key_setup(&dev->mt76, wcid,
+ cmd == SET_KEY ? key : NULL);
+
+ if (mt76_is_mmio(&dev->mt76))
+ err = mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
+ else
+ err = mt7615_queue_key_update(dev, cmd, msta, key);
+
+ mt7615_mutex_release(dev);
+
+ return err;
+}
+
+static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ bool band = phy != &dev->phy;
+ int ret = 0;
+
+ if (changed & (IEEE80211_CONF_CHANGE_CHANNEL |
+ IEEE80211_CONF_CHANGE_POWER)) {
+#ifdef CONFIG_NL80211_TESTMODE
+ if (dev->mt76.test.state != MT76_TM_STATE_OFF) {
+ mt7615_mutex_acquire(dev);
+ mt76_testmode_reset(&dev->mt76, false);
+ mt7615_mutex_release(dev);
+ }
+#endif
+ ieee80211_stop_queues(hw);
+ ret = mt7615_set_channel(phy);
+ ieee80211_wake_queues(hw);
+ }
+
+ mt7615_mutex_acquire(dev);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ mt76_testmode_reset(&dev->mt76, true);
+
+ if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+ phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
+ else
+ phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
+
+ mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter);
+ }
+
+ mt7615_mutex_release(dev);
+
+ return ret;
+}
+
+static int
+mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ int err;
+
+ mt7615_mutex_acquire(dev);
+
+ queue = mt7615_lmac_mapping(dev, queue);
+ queue += mvif->wmm_idx * MT7615_MAX_WMM_SETS;
+ err = mt7615_mcu_set_wmm(dev, queue, params);
+
+ mt7615_mutex_release(dev);
+
+ return err;
+}
+
+static void mt7615_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ bool band = phy != &dev->phy;
+
+ u32 ctl_flags = MT_WF_RFCR1_DROP_ACK |
+ MT_WF_RFCR1_DROP_BF_POLL |
+ MT_WF_RFCR1_DROP_BA |
+ MT_WF_RFCR1_DROP_CFEND |
+ MT_WF_RFCR1_DROP_CFACK;
+ u32 flags = 0;
+
+ mt7615_mutex_acquire(dev);
+
+#define MT76_FILTER(_flag, _hw) do { \
+ flags |= *total_flags & FIF_##_flag; \
+ phy->rxfilter &= ~(_hw); \
+ if (!mt76_testmode_enabled(&dev->mt76)) \
+ phy->rxfilter |= !(flags & FIF_##_flag) * (_hw);\
+ } while (0)
+
+ phy->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
+ MT_WF_RFCR_DROP_OTHER_BEACON |
+ MT_WF_RFCR_DROP_FRAME_REPORT |
+ MT_WF_RFCR_DROP_PROBEREQ |
+ MT_WF_RFCR_DROP_MCAST_FILTERED |
+ MT_WF_RFCR_DROP_MCAST |
+ MT_WF_RFCR_DROP_BCAST |
+ MT_WF_RFCR_DROP_DUPLICATE |
+ MT_WF_RFCR_DROP_A2_BSSID |
+ MT_WF_RFCR_DROP_UNWANTED_CTL |
+ MT_WF_RFCR_DROP_STBC_MULTI);
+
+ MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM |
+ MT_WF_RFCR_DROP_A3_MAC |
+ MT_WF_RFCR_DROP_A3_BSSID);
+
+ MT76_FILTER(FCSFAIL, MT_WF_RFCR_DROP_FCSFAIL);
+
+ MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS |
+ MT_WF_RFCR_DROP_RTS |
+ MT_WF_RFCR_DROP_CTL_RSV |
+ MT_WF_RFCR_DROP_NDPA);
+
+ *total_flags = flags;
+ mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter);
+
+ if (*total_flags & FIF_CONTROL)
+ mt76_clear(dev, MT_WF_RFCR1(band), ctl_flags);
+ else
+ mt76_set(dev, MT_WF_RFCR1(band), ctl_flags);
+
+ mt7615_mutex_release(dev);
+}
+
+static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u32 changed)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+
+ mt7615_mutex_acquire(dev);
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ int slottime = info->use_short_slot ? 9 : 20;
+
+ if (slottime != phy->slottime) {
+ phy->slottime = slottime;
+ mt7615_mac_set_timing(phy);
+ }
+ }
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ mt7615_mcu_add_bss_info(phy, vif, NULL, info->enable_beacon);
+ mt7615_mcu_sta_add(dev, vif, NULL, info->enable_beacon);
+
+ if (vif->p2p && info->enable_beacon)
+ mt7615_mcu_set_p2p_oppps(hw, vif);
+ }
+
+ if (changed & (BSS_CHANGED_BEACON |
+ BSS_CHANGED_BEACON_ENABLED))
+ mt7615_mcu_add_beacon(dev, hw, vif, info->enable_beacon);
+
+ if (changed & BSS_CHANGED_PS)
+ mt7615_mcu_set_vif_ps(dev, vif);
+
+ if (changed & BSS_CHANGED_ARP_FILTER)
+ mt7615_mcu_update_arp_filter(hw, vif, info);
+
+ mt7615_mutex_release(dev);
+}
+
+static void
+mt7615_channel_switch_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *chandef)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+
+ mt7615_mutex_acquire(dev);
+ mt7615_mcu_add_beacon(dev, hw, vif, true);
+ mt7615_mutex_release(dev);
+}
+
+int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ int idx, err;
+
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
+ if (idx < 0)
+ return -ENOSPC;
+
+ INIT_LIST_HEAD(&msta->poll_list);
+ msta->vif = mvif;
+ msta->wcid.sta = 1;
+ msta->wcid.idx = idx;
+ msta->wcid.ext_phy = mvif->band_idx;
+
+ err = mt7615_pm_wake(dev);
+ if (err)
+ return err;
+
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
+ struct mt7615_phy *phy;
+
+ phy = mvif->band_idx ? mt7615_ext_phy(dev) : &dev->phy;
+ mt7615_mcu_add_bss_info(phy, vif, sta, true);
+ }
+ mt7615_mac_wtbl_update(dev, idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ mt7615_mcu_sta_add(dev, vif, sta, true);
+
+ mt7615_pm_power_save_sched(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt7615_mac_sta_add);
+
+void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+
+ mt7615_free_pending_tx_skbs(dev, msta);
+ mt7615_pm_wake(dev);
+
+ mt7615_mcu_sta_add(dev, vif, sta, false);
+ mt7615_mac_wtbl_update(dev, msta->wcid.idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_phy *phy;
+
+ phy = mvif->band_idx ? mt7615_ext_phy(dev) : &dev->phy;
+ mt7615_mcu_add_bss_info(phy, vif, sta, false);
+ }
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (!list_empty(&msta->poll_list))
+ list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ mt7615_pm_power_save_sched(dev);
+}
+EXPORT_SYMBOL_GPL(mt7615_mac_sta_remove);
+
+static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates);
+ int i;
+
+ if (!sta_rates)
+ return;
+
+ spin_lock_bh(&dev->mt76.lock);
+ for (i = 0; i < ARRAY_SIZE(msta->rates); i++) {
+ msta->rates[i].idx = sta_rates->rate[i].idx;
+ msta->rates[i].count = sta_rates->rate[i].count;
+ msta->rates[i].flags = sta_rates->rate[i].flags;
+
+ if (msta->rates[i].idx < 0 || !msta->rates[i].count)
+ break;
+ }
+ msta->n_rates = i;
+ if (!test_bit(MT76_STATE_PM, &phy->mt76->state))
+ mt7615_mac_set_rates(phy, msta, NULL, msta->rates);
+ spin_unlock_bh(&dev->mt76.lock);
+}
+
+static void
+mt7615_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ struct mt76_phy *mphy = phy->mt76;
+
+ if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
+ return;
+
+ if (test_bit(MT76_STATE_PM, &mphy->state)) {
+ queue_work(dev->mt76.wq, &dev->pm.wake_work);
+ return;
+ }
+
+ dev->pm.last_activity = jiffies;
+ mt76_worker_schedule(&dev->mt76.tx_worker);
+}
+
+static void mt7615_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt76_phy *mphy = hw->priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ struct mt7615_sta *msta = NULL;
+ int qid;
+
+ if (control->sta) {
+ msta = (struct mt7615_sta *)control->sta->drv_priv;
+ wcid = &msta->wcid;
+ }
+
+ if (vif && !control->sta) {
+ struct mt7615_vif *mvif;
+
+ mvif = (struct mt7615_vif *)vif->drv_priv;
+ msta = &mvif->sta;
+ wcid = &msta->wcid;
+ }
+
+ if (!test_bit(MT76_STATE_PM, &mphy->state)) {
+ dev->pm.last_activity = jiffies;
+ mt76_tx(mphy, control->sta, wcid, skb);
+ return;
+ }
+
+ qid = skb_get_queue_mapping(skb);
+ if (qid >= MT_TXQ_PSD) {
+ qid = IEEE80211_AC_BE;
+ skb_set_queue_mapping(skb, qid);
+ }
+
+ spin_lock_bh(&dev->pm.txq_lock);
+ if (!dev->pm.tx_q[qid].skb) {
+ ieee80211_stop_queues(hw);
+ dev->pm.tx_q[qid].msta = msta;
+ dev->pm.tx_q[qid].skb = skb;
+ queue_work(dev->mt76.wq, &dev->pm.wake_work);
+ } else {
+ dev_kfree_skb(skb);
+ }
+ spin_unlock_bh(&dev->pm.txq_lock);
+}
+
+static int mt7615_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+
+ mt7615_mutex_acquire(dev);
+ mt7615_mcu_set_rts_thresh(phy, val);
+ mt7615_mutex_release(dev);
+
+ return 0;
+}
+
+static int
+mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct ieee80211_sta *sta = params->sta;
+ struct ieee80211_txq *txq = sta->txq[params->tid];
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ u16 tid = params->tid;
+ u16 ssn = params->ssn;
+ struct mt76_txq *mtxq;
+ int ret = 0;
+
+ if (!txq)
+ return -EINVAL;
+
+ mtxq = (struct mt76_txq *)txq->drv_priv;
+
+ mt7615_mutex_acquire(dev);
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn,
+ params->buf_size);
+ mt7615_mcu_add_rx_ba(dev, params, true);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+ mt7615_mcu_add_rx_ba(dev, params, false);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ mtxq->aggr = true;
+ mtxq->send_bar = false;
+ mt7615_mcu_add_tx_ba(dev, params, true);
+ ssn = mt7615_mac_get_sta_tid_sn(dev, msta->wcid.idx, tid);
+ ieee80211_send_bar(vif, sta->addr, tid,
+ IEEE80211_SN_TO_SEQ(ssn));
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ mtxq->aggr = false;
+ mt7615_mcu_add_tx_ba(dev, params, false);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ ssn = mt7615_mac_get_sta_tid_sn(dev, msta->wcid.idx, tid);
+ params->ssn = ssn;
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ mtxq->aggr = false;
+ mt7615_mcu_add_tx_ba(dev, params, false);
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
+ mt7615_mutex_release(dev);
+
+ return ret;
+}
+
+static int
+mt7615_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NOTEXIST,
+ IEEE80211_STA_NONE);
+}
+
+static int
+mt7615_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NONE,
+ IEEE80211_STA_NOTEXIST);
+}
+
+static int
+mt7615_get_stats(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats)
+{
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ struct mib_stats *mib = &phy->mib;
+
+ mt7615_mutex_acquire(phy->dev);
+
+ stats->dot11RTSSuccessCount = mib->rts_cnt;
+ stats->dot11RTSFailureCount = mib->rts_retries_cnt;
+ stats->dot11FCSErrorCount = mib->fcs_err_cnt;
+ stats->dot11ACKFailureCount = mib->ack_fail_cnt;
+
+ memset(mib, 0, sizeof(*mib));
+
+ mt7615_mutex_release(phy->dev);
+
+ return 0;
+}
+
+static u64
+mt7615_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ union {
+ u64 t64;
+ u32 t32[2];
+ } tsf;
+
+ mt7615_mutex_acquire(dev);
+
+ mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
+ tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0);
+ tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1);
+
+ mt7615_mutex_release(dev);
+
+ return tsf.t64;
+}
+
+static void
+mt7615_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u64 timestamp)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ union {
+ u64 t64;
+ u32 t32[2];
+ } tsf = { .t64 = timestamp, };
+
+ mt7615_mutex_acquire(dev);
+
+ mt76_wr(dev, MT_LPON_UTTR0, tsf.t32[0]);
+ mt76_wr(dev, MT_LPON_UTTR1, tsf.t32[1]);
+ /* TSF software overwrite */
+ mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_WRITE);
+
+ mt7615_mutex_release(dev);
+}
+
+static void
+mt7615_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
+{
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ struct mt7615_dev *dev = phy->dev;
+
+ mt7615_mutex_acquire(dev);
+ phy->coverage_class = max_t(s16, coverage_class, 0);
+ mt7615_mac_set_timing(phy);
+ mt7615_mutex_release(dev);
+}
+
+static int
+mt7615_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ int max_nss = hweight8(hw->wiphy->available_antennas_tx);
+ bool ext_phy = phy != &dev->phy;
+
+ if (!tx_ant || tx_ant != rx_ant || ffs(tx_ant) > max_nss)
+ return -EINVAL;
+
+ if ((BIT(hweight8(tx_ant)) - 1) != tx_ant)
+ tx_ant = BIT(ffs(tx_ant) - 1) - 1;
+
+ mt7615_mutex_acquire(dev);
+
+ phy->mt76->antenna_mask = tx_ant;
+ if (ext_phy) {
+ if (dev->chainmask == 0xf)
+ tx_ant <<= 2;
+ else
+ tx_ant <<= 1;
+ }
+ phy->chainmask = tx_ant;
+
+ mt76_set_stream_caps(phy->mt76, true);
+
+ mt7615_mutex_release(dev);
+
+ return 0;
+}
+
+static void mt7615_roc_iter(void *priv, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct mt7615_phy *phy = priv;
+
+ mt7615_mcu_set_roc(phy, vif, NULL, 0);
+}
+
+void mt7615_roc_work(struct work_struct *work)
+{
+ struct mt7615_phy *phy;
+
+ phy = (struct mt7615_phy *)container_of(work, struct mt7615_phy,
+ roc_work);
+
+ if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
+ return;
+
+ mt7615_mutex_acquire(phy->dev);
+ ieee80211_iterate_active_interfaces(phy->mt76->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7615_roc_iter, phy);
+ mt7615_mutex_release(phy->dev);
+ ieee80211_remain_on_channel_expired(phy->mt76->hw);
+}
+
+void mt7615_roc_timer(struct timer_list *timer)
+{
+ struct mt7615_phy *phy = from_timer(phy, timer, roc_timer);
+
+ ieee80211_queue_work(phy->mt76->hw, &phy->roc_work);
+}
+
+void mt7615_scan_work(struct work_struct *work)
+{
+ struct mt7615_phy *phy;
+
+ phy = (struct mt7615_phy *)container_of(work, struct mt7615_phy,
+ scan_work.work);
+
+ while (true) {
+ struct mt7615_mcu_rxd *rxd;
+ struct sk_buff *skb;
+
+ spin_lock_bh(&phy->dev->mt76.lock);
+ skb = __skb_dequeue(&phy->scan_event_list);
+ spin_unlock_bh(&phy->dev->mt76.lock);
+
+ if (!skb)
+ break;
+
+ rxd = (struct mt7615_mcu_rxd *)skb->data;
+ if (rxd->eid == MCU_EVENT_SCHED_SCAN_DONE) {
+ ieee80211_sched_scan_results(phy->mt76->hw);
+ } else if (test_and_clear_bit(MT76_HW_SCANNING,
+ &phy->mt76->state)) {
+ struct cfg80211_scan_info info = {
+ .aborted = false,
+ };
+
+ ieee80211_scan_completed(phy->mt76->hw, &info);
+ }
+ dev_kfree_skb(skb);
+ }
+}
+
+static int
+mt7615_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *req)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt76_phy *mphy = hw->priv;
+ int err;
+
+ mt7615_mutex_acquire(dev);
+ err = mt7615_mcu_hw_scan(mphy->priv, vif, req);
+ mt7615_mutex_release(dev);
+
+ return err;
+}
+
+static void
+mt7615_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt76_phy *mphy = hw->priv;
+
+ mt7615_mutex_acquire(dev);
+ mt7615_mcu_cancel_hw_scan(mphy->priv, vif);
+ mt7615_mutex_release(dev);
+}
+
+static int
+mt7615_start_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt76_phy *mphy = hw->priv;
+ int err;
+
+ mt7615_mutex_acquire(dev);
+
+ err = mt7615_mcu_sched_scan_req(mphy->priv, vif, req);
+ if (err < 0)
+ goto out;
+
+ err = mt7615_mcu_sched_scan_enable(mphy->priv, vif, true);
+out:
+ mt7615_mutex_release(dev);
+
+ return err;
+}
+
+static int
+mt7615_stop_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt76_phy *mphy = hw->priv;
+ int err;
+
+ mt7615_mutex_acquire(dev);
+ err = mt7615_mcu_sched_scan_enable(mphy->priv, vif, false);
+ mt7615_mutex_release(dev);
+
+ return err;
+}
+
+static int mt7615_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan,
+ int duration,
+ enum ieee80211_roc_type type)
+{
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ int err;
+
+ if (test_and_set_bit(MT76_STATE_ROC, &phy->mt76->state))
+ return 0;
+
+ mt7615_mutex_acquire(phy->dev);
+
+ err = mt7615_mcu_set_roc(phy, vif, chan, duration);
+ if (err < 0) {
+ clear_bit(MT76_STATE_ROC, &phy->mt76->state);
+ goto out;
+ }
+
+ if (!wait_event_timeout(phy->roc_wait, phy->roc_grant, HZ)) {
+ mt7615_mcu_set_roc(phy, vif, NULL, 0);
+ clear_bit(MT76_STATE_ROC, &phy->mt76->state);
+ err = -ETIMEDOUT;
+ }
+
+out:
+ mt7615_mutex_release(phy->dev);
+
+ return err;
+}
+
+static int mt7615_cancel_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+
+ if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
+ return 0;
+
+ del_timer_sync(&phy->roc_timer);
+ cancel_work_sync(&phy->roc_work);
+
+ mt7615_mutex_acquire(phy->dev);
+ mt7615_mcu_set_roc(phy, vif, NULL, 0);
+ mt7615_mutex_release(phy->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int mt7615_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ bool ext_phy = phy != &dev->phy;
+ int err = 0;
+
+ cancel_delayed_work_sync(&dev->pm.ps_work);
+ mt7615_free_pending_tx_skbs(dev, NULL);
+
+ mt7615_mutex_acquire(dev);
+
+ clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+ cancel_delayed_work_sync(&phy->scan_work);
+ cancel_delayed_work_sync(&phy->mac_work);
+
+ mt76_set(dev, MT_WF_RFCR(ext_phy), MT_WF_RFCR_DROP_OTHER_BEACON);
+
+ set_bit(MT76_STATE_SUSPEND, &phy->mt76->state);
+ ieee80211_iterate_active_interfaces(hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7615_mcu_set_suspend_iter, phy);
+
+ if (!mt7615_dev_running(dev))
+ err = mt7615_mcu_set_hif_suspend(dev, true);
+
+ mt7615_mutex_release(dev);
+
+ return err;
+}
+
+static int mt7615_resume(struct ieee80211_hw *hw)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ bool running, ext_phy = phy != &dev->phy;
+
+ mt7615_mutex_acquire(dev);
+
+ running = mt7615_dev_running(dev);
+ set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+
+ if (!running) {
+ int err;
+
+ err = mt7615_mcu_set_hif_suspend(dev, false);
+ if (err < 0) {
+ mt7615_mutex_release(dev);
+ return err;
+ }
+ }
+
+ clear_bit(MT76_STATE_SUSPEND, &phy->mt76->state);
+ ieee80211_iterate_active_interfaces(hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7615_mcu_set_suspend_iter, phy);
+
+ ieee80211_queue_delayed_work(hw, &phy->mac_work,
+ MT7615_WATCHDOG_TIME);
+ mt76_clear(dev, MT_WF_RFCR(ext_phy), MT_WF_RFCR_DROP_OTHER_BEACON);
+
+ mt7615_mutex_release(dev);
+
+ return 0;
+}
+
+static void mt7615_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt76_dev *mdev = &dev->mt76;
+
+ device_set_wakeup_enable(mdev->dev, enabled);
+}
+
+static void mt7615_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+
+ mt7615_mutex_acquire(dev);
+ mt7615_mcu_update_gtk_rekey(hw, vif, data);
+ mt7615_mutex_release(dev);
+}
+#endif /* CONFIG_PM */
+
+const struct ieee80211_ops mt7615_ops = {
+ .tx = mt7615_tx,
+ .start = mt7615_start,
+ .stop = mt7615_stop,
+ .add_interface = mt7615_add_interface,
+ .remove_interface = mt7615_remove_interface,
+ .config = mt7615_config,
+ .conf_tx = mt7615_conf_tx,
+ .configure_filter = mt7615_configure_filter,
+ .bss_info_changed = mt7615_bss_info_changed,
+ .sta_add = mt7615_sta_add,
+ .sta_remove = mt7615_sta_remove,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
+ .set_key = mt7615_set_key,
+ .ampdu_action = mt7615_ampdu_action,
+ .set_rts_threshold = mt7615_set_rts_threshold,
+ .wake_tx_queue = mt7615_wake_tx_queue,
+ .sta_rate_tbl_update = mt7615_sta_rate_tbl_update,
+ .sw_scan_start = mt76_sw_scan,
+ .sw_scan_complete = mt76_sw_scan_complete,
+ .release_buffered_frames = mt76_release_buffered_frames,
+ .get_txpower = mt76_get_txpower,
+ .channel_switch_beacon = mt7615_channel_switch_beacon,
+ .get_stats = mt7615_get_stats,
+ .get_tsf = mt7615_get_tsf,
+ .set_tsf = mt7615_set_tsf,
+ .get_survey = mt76_get_survey,
+ .get_antenna = mt76_get_antenna,
+ .set_antenna = mt7615_set_antenna,
+ .set_coverage_class = mt7615_set_coverage_class,
+ .hw_scan = mt7615_hw_scan,
+ .cancel_hw_scan = mt7615_cancel_hw_scan,
+ .sched_scan_start = mt7615_start_sched_scan,
+ .sched_scan_stop = mt7615_stop_sched_scan,
+ .remain_on_channel = mt7615_remain_on_channel,
+ .cancel_remain_on_channel = mt7615_cancel_remain_on_channel,
+ CFG80211_TESTMODE_CMD(mt76_testmode_cmd)
+ CFG80211_TESTMODE_DUMP(mt76_testmode_dump)
+#ifdef CONFIG_PM
+ .suspend = mt7615_suspend,
+ .resume = mt7615_resume,
+ .set_wakeup = mt7615_set_wakeup,
+ .set_rekey_data = mt7615_set_rekey_data,
+#endif /* CONFIG_PM */
+};
+EXPORT_SYMBOL_GPL(mt7615_ops);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
new file mode 100644
index 000000000..62a971660
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -0,0 +1,3997 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Roy Luo <royluo@google.com>
+ * Ryder Lee <ryder.lee@mediatek.com>
+ */
+
+#include <linux/firmware.h>
+#include "mt7615.h"
+#include "mcu.h"
+#include "mac.h"
+#include "eeprom.h"
+
+static bool prefer_offload_fw = true;
+module_param(prefer_offload_fw, bool, 0644);
+MODULE_PARM_DESC(prefer_offload_fw,
+ "Prefer client mode offload firmware (MT7663)");
+
+struct mt7615_patch_hdr {
+ char build_date[16];
+ char platform[4];
+ __be32 hw_sw_ver;
+ __be32 patch_ver;
+ __be16 checksum;
+} __packed;
+
+struct mt7615_fw_trailer {
+ __le32 addr;
+ u8 chip_id;
+ u8 feature_set;
+ u8 eco_code;
+ char fw_ver[10];
+ char build_date[15];
+ __le32 len;
+} __packed;
+
+#define FW_V3_COMMON_TAILER_SIZE 36
+#define FW_V3_REGION_TAILER_SIZE 40
+#define FW_START_OVERRIDE BIT(0)
+#define FW_START_DLYCAL BIT(1)
+#define FW_START_WORKING_PDA_CR4 BIT(2)
+
+struct mt7663_fw_trailer {
+ u8 chip_id;
+ u8 eco_code;
+ u8 n_region;
+ u8 format_ver;
+ u8 format_flag;
+ u8 reserv[2];
+ char fw_ver[10];
+ char build_date[15];
+ __le32 crc;
+} __packed;
+
+struct mt7663_fw_buf {
+ __le32 crc;
+ __le32 d_img_size;
+ __le32 block_size;
+ u8 rsv[4];
+ __le32 img_dest_addr;
+ __le32 img_size;
+ u8 feature_set;
+};
+
+#define MT7615_PATCH_ADDRESS 0x80000
+#define MT7622_PATCH_ADDRESS 0x9c000
+#define MT7663_PATCH_ADDRESS 0xdc000
+
+#define N9_REGION_NUM 2
+#define CR4_REGION_NUM 1
+
+#define IMG_CRC_LEN 4
+
+#define FW_FEATURE_SET_ENCRYPT BIT(0)
+#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
+
+#define DL_MODE_ENCRYPT BIT(0)
+#define DL_MODE_KEY_IDX GENMASK(2, 1)
+#define DL_MODE_RESET_SEC_IV BIT(3)
+#define DL_MODE_WORKING_PDA_CR4 BIT(4)
+#define DL_MODE_VALID_RAM_ENTRY BIT(5)
+#define DL_MODE_NEED_RSP BIT(31)
+
+#define FW_START_OVERRIDE BIT(0)
+#define FW_START_WORKING_PDA_CR4 BIT(2)
+
+void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
+ int cmd, int *wait_seq)
+{
+ int txd_len, mcu_cmd = cmd & MCU_CMD_MASK;
+ struct mt7615_uni_txd *uni_txd;
+ struct mt7615_mcu_txd *mcu_txd;
+ u8 seq, q_idx, pkt_fmt;
+ __le32 *txd;
+ u32 val;
+
+ seq = ++dev->mt76.mcu.msg_seq & 0xf;
+ if (!seq)
+ seq = ++dev->mt76.mcu.msg_seq & 0xf;
+ if (wait_seq)
+ *wait_seq = seq;
+
+ txd_len = cmd & MCU_UNI_PREFIX ? sizeof(*uni_txd) : sizeof(*mcu_txd);
+ txd = (__le32 *)skb_push(skb, txd_len);
+
+ if (cmd != MCU_CMD_FW_SCATTER) {
+ q_idx = MT_TX_MCU_PORT_RX_Q0;
+ pkt_fmt = MT_TX_TYPE_CMD;
+ } else {
+ q_idx = MT_TX_MCU_PORT_RX_FWDL;
+ pkt_fmt = MT_TX_TYPE_FW;
+ }
+
+ val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len) |
+ FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_MCU) |
+ FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
+ txd[0] = cpu_to_le32(val);
+
+ val = MT_TXD1_LONG_FORMAT |
+ FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_CMD) |
+ FIELD_PREP(MT_TXD1_PKT_FMT, pkt_fmt);
+ txd[1] = cpu_to_le32(val);
+
+ if (cmd & MCU_UNI_PREFIX) {
+ uni_txd = (struct mt7615_uni_txd *)txd;
+ uni_txd->len = cpu_to_le16(skb->len - sizeof(uni_txd->txd));
+ uni_txd->option = MCU_CMD_UNI_EXT_ACK;
+ uni_txd->cid = cpu_to_le16(mcu_cmd);
+ uni_txd->s2d_index = MCU_S2D_H2N;
+ uni_txd->pkt_type = MCU_PKT_ID;
+ uni_txd->seq = seq;
+
+ return;
+ }
+
+ mcu_txd = (struct mt7615_mcu_txd *)txd;
+ mcu_txd->len = cpu_to_le16(skb->len - sizeof(mcu_txd->txd));
+ mcu_txd->pq_id = cpu_to_le16(MCU_PQ_ID(MT_TX_PORT_IDX_MCU, q_idx));
+ mcu_txd->s2d_index = MCU_S2D_H2N;
+ mcu_txd->pkt_type = MCU_PKT_ID;
+ mcu_txd->seq = seq;
+
+ switch (cmd & ~MCU_CMD_MASK) {
+ case MCU_FW_PREFIX:
+ mcu_txd->set_query = MCU_Q_NA;
+ mcu_txd->cid = mcu_cmd;
+ break;
+ case MCU_CE_PREFIX:
+ if (cmd & MCU_QUERY_MASK)
+ mcu_txd->set_query = MCU_Q_QUERY;
+ else
+ mcu_txd->set_query = MCU_Q_SET;
+ mcu_txd->cid = mcu_cmd;
+ break;
+ default:
+ mcu_txd->cid = MCU_CMD_EXT_CID;
+ if (cmd & MCU_QUERY_PREFIX)
+ mcu_txd->set_query = MCU_Q_QUERY;
+ else
+ mcu_txd->set_query = MCU_Q_SET;
+ mcu_txd->ext_cid = mcu_cmd;
+ mcu_txd->ext_cid_ack = 1;
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(mt7615_mcu_fill_msg);
+
+static int __mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb,
+ int cmd, int *wait_seq)
+{
+ enum mt76_txq_id qid;
+
+ mt7615_mcu_fill_msg(dev, skb, cmd, wait_seq);
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state))
+ qid = MT_TXQ_MCU;
+ else
+ qid = MT_TXQ_FWDL;
+
+ return mt76_tx_queue_skb_raw(dev, qid, skb, 0);
+}
+
+static int
+mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd,
+ struct sk_buff *skb, int seq)
+{
+ struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
+ int ret = 0;
+
+ if (seq != rxd->seq) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ switch (cmd) {
+ case MCU_CMD_PATCH_SEM_CONTROL:
+ skb_pull(skb, sizeof(*rxd) - 4);
+ ret = *skb->data;
+ break;
+ case MCU_EXT_CMD_GET_TEMP:
+ skb_pull(skb, sizeof(*rxd));
+ ret = le32_to_cpu(*(__le32 *)skb->data);
+ break;
+ case MCU_EXT_CMD_RF_REG_ACCESS | MCU_QUERY_PREFIX:
+ skb_pull(skb, sizeof(*rxd));
+ ret = le32_to_cpu(*(__le32 *)&skb->data[8]);
+ break;
+ case MCU_UNI_CMD_DEV_INFO_UPDATE:
+ case MCU_UNI_CMD_BSS_INFO_UPDATE:
+ case MCU_UNI_CMD_STA_REC_UPDATE:
+ case MCU_UNI_CMD_HIF_CTRL:
+ case MCU_UNI_CMD_OFFLOAD:
+ case MCU_UNI_CMD_SUSPEND: {
+ struct mt7615_mcu_uni_event *event;
+
+ skb_pull(skb, sizeof(*rxd));
+ event = (struct mt7615_mcu_uni_event *)skb->data;
+ ret = le32_to_cpu(event->status);
+ break;
+ }
+ case MCU_CMD_REG_READ: {
+ struct mt7615_mcu_reg_event *event;
+
+ skb_pull(skb, sizeof(*rxd));
+ event = (struct mt7615_mcu_reg_event *)skb->data;
+ ret = (int)le32_to_cpu(event->val);
+ break;
+ }
+ default:
+ break;
+ }
+out:
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
+int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq)
+{
+ unsigned long expires = jiffies + 20 * HZ;
+ struct sk_buff *skb;
+ int ret = 0;
+
+ while (true) {
+ skb = mt76_mcu_get_response(&dev->mt76, expires);
+ if (!skb) {
+ dev_err(dev->mt76.dev, "Message %ld (seq %d) timeout\n",
+ cmd & MCU_CMD_MASK, seq);
+ return -ETIMEDOUT;
+ }
+
+ ret = mt7615_mcu_parse_response(dev, cmd, skb, seq);
+ if (ret != -EAGAIN)
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt7615_mcu_wait_response);
+
+static int
+mt7615_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
+ int cmd, bool wait_resp)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ int ret, seq;
+
+ mutex_lock(&mdev->mcu.mutex);
+
+ ret = __mt7615_mcu_msg_send(dev, skb, cmd, &seq);
+ if (ret)
+ goto out;
+
+ if (wait_resp)
+ ret = mt7615_mcu_wait_response(dev, cmd, seq);
+
+out:
+ mutex_unlock(&mdev->mcu.mutex);
+
+ return ret;
+}
+
+int mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
+ int len, bool wait_resp)
+{
+ struct sk_buff *skb;
+
+ skb = mt76_mcu_msg_alloc(mdev, data, len);
+ if (!skb)
+ return -ENOMEM;
+
+ return __mt76_mcu_skb_send_msg(mdev, skb, cmd, wait_resp);
+}
+EXPORT_SYMBOL_GPL(mt7615_mcu_msg_send);
+
+u32 mt7615_rf_rr(struct mt7615_dev *dev, u32 wf, u32 reg)
+{
+ struct {
+ __le32 wifi_stream;
+ __le32 address;
+ __le32 data;
+ } req = {
+ .wifi_stream = cpu_to_le32(wf),
+ .address = cpu_to_le32(reg),
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76,
+ MCU_EXT_CMD_RF_REG_ACCESS | MCU_QUERY_PREFIX,
+ &req, sizeof(req), true);
+}
+
+int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val)
+{
+ struct {
+ __le32 wifi_stream;
+ __le32 address;
+ __le32 data;
+ } req = {
+ .wifi_stream = cpu_to_le32(wf),
+ .address = cpu_to_le32(reg),
+ .data = cpu_to_le32(val),
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RF_REG_ACCESS, &req,
+ sizeof(req), false);
+}
+
+static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en)
+{
+ if (!is_mt7622(&dev->mt76))
+ return;
+
+ regmap_update_bits(dev->infracfg, MT_INFRACFG_MISC,
+ MT_INFRACFG_MISC_AP2CONN_WAKE,
+ !en * MT_INFRACFG_MISC_AP2CONN_WAKE);
+}
+
+static int mt7615_mcu_drv_pmctrl(struct mt7615_dev *dev)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt76_dev *mdev = &dev->mt76;
+ u32 addr;
+ int err;
+
+ if (is_mt7663(mdev)) {
+ /* Clear firmware own via N9 eint */
+ mt76_wr(dev, MT_PCIE_DOORBELL_PUSH, MT_CFG_LPCR_HOST_DRV_OWN);
+ mt76_poll(dev, MT_CONN_ON_MISC, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000);
+
+ addr = MT_CONN_HIF_ON_LPCTL;
+ } else {
+ addr = MT_CFG_LPCR_HOST;
+ }
+
+ mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN);
+
+ mt7622_trigger_hif_int(dev, true);
+
+ err = !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000);
+
+ mt7622_trigger_hif_int(dev, false);
+
+ if (err) {
+ dev_err(mdev->dev, "driver own failed\n");
+ return -ETIMEDOUT;
+ }
+
+ clear_bit(MT76_STATE_PM, &mphy->state);
+
+ return 0;
+}
+
+static int mt7615_mcu_lp_drv_pmctrl(struct mt7615_dev *dev)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ int i;
+
+ if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state))
+ goto out;
+
+ for (i = 0; i < MT7615_DRV_OWN_RETRY_COUNT; i++) {
+ mt76_wr(dev, MT_PCIE_DOORBELL_PUSH, MT_CFG_LPCR_HOST_DRV_OWN);
+ if (mt76_poll_msec(dev, MT_CONN_HIF_ON_LPCTL,
+ MT_CFG_LPCR_HOST_FW_OWN, 0, 50))
+ break;
+ }
+
+ if (i == MT7615_DRV_OWN_RETRY_COUNT) {
+ dev_err(dev->mt76.dev, "driver own failed\n");
+ set_bit(MT76_STATE_PM, &mphy->state);
+ return -EIO;
+ }
+
+out:
+ dev->pm.last_activity = jiffies;
+
+ return 0;
+}
+
+static int mt7615_mcu_fw_pmctrl(struct mt7615_dev *dev)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ int err = 0;
+ u32 addr;
+
+ if (test_and_set_bit(MT76_STATE_PM, &mphy->state))
+ return 0;
+
+ mt7622_trigger_hif_int(dev, true);
+
+ addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
+ mt76_wr(dev, addr, MT_CFG_LPCR_HOST_FW_OWN);
+
+ if (is_mt7622(&dev->mt76) &&
+ !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN,
+ MT_CFG_LPCR_HOST_FW_OWN, 3000)) {
+ dev_err(dev->mt76.dev, "Timeout for firmware own\n");
+ clear_bit(MT76_STATE_PM, &mphy->state);
+ err = -EIO;
+ }
+
+ mt7622_trigger_hif_int(dev, false);
+
+ return err;
+}
+
+static void
+mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ if (vif->csa_active)
+ ieee80211_csa_finish(vif);
+}
+
+static void
+mt7615_mcu_rx_radar_detected(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7615_mcu_rdd_report *r;
+
+ r = (struct mt7615_mcu_rdd_report *)skb->data;
+
+ if (r->idx && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
+ ieee80211_radar_detected(mphy->hw);
+ dev->hw_pattern++;
+}
+
+static void
+mt7615_mcu_rx_log_message(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
+ const char *data = (char *)&rxd[1];
+ const char *type;
+
+ switch (rxd->s2d_index) {
+ case 0:
+ type = "N9";
+ break;
+ case 2:
+ type = "CR4";
+ break;
+ default:
+ type = "unknown";
+ break;
+ }
+
+ wiphy_info(mt76_hw(dev)->wiphy, "%s: %s", type, data);
+}
+
+static void
+mt7615_mcu_rx_ext_event(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
+
+ switch (rxd->ext_eid) {
+ case MCU_EXT_EVENT_RDD_REPORT:
+ mt7615_mcu_rx_radar_detected(dev, skb);
+ break;
+ case MCU_EXT_EVENT_CSA_NOTIFY:
+ ieee80211_iterate_active_interfaces_atomic(dev->mt76.hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7615_mcu_csa_finish, dev);
+ break;
+ case MCU_EXT_EVENT_FW_LOG_2_HOST:
+ mt7615_mcu_rx_log_message(dev, skb);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+mt7615_mcu_scan_event(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ u8 *seq_num = skb->data + sizeof(struct mt7615_mcu_rxd);
+ struct mt7615_phy *phy;
+ struct mt76_phy *mphy;
+
+ if (*seq_num & BIT(7) && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+ else
+ mphy = &dev->mt76.phy;
+
+ phy = (struct mt7615_phy *)mphy->priv;
+
+ spin_lock_bh(&dev->mt76.lock);
+ __skb_queue_tail(&phy->scan_event_list, skb);
+ spin_unlock_bh(&dev->mt76.lock);
+
+ ieee80211_queue_delayed_work(mphy->hw, &phy->scan_work,
+ MT7615_HW_SCAN_TIMEOUT);
+}
+
+static void
+mt7615_mcu_roc_event(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ struct mt7615_roc_tlv *event;
+ struct mt7615_phy *phy;
+ struct mt76_phy *mphy;
+ int duration;
+
+ skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
+ event = (struct mt7615_roc_tlv *)skb->data;
+
+ if (event->dbdc_band && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+ else
+ mphy = &dev->mt76.phy;
+
+ ieee80211_ready_on_channel(mphy->hw);
+
+ phy = (struct mt7615_phy *)mphy->priv;
+ phy->roc_grant = true;
+ wake_up(&phy->roc_wait);
+
+ duration = le32_to_cpu(event->max_interval);
+ mod_timer(&phy->roc_timer,
+ round_jiffies_up(jiffies + msecs_to_jiffies(duration)));
+}
+
+static void
+mt7615_mcu_beacon_loss_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_beacon_loss_event *event = priv;
+
+ if (mvif->idx != event->bss_idx)
+ return;
+
+ if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
+ return;
+
+ ieee80211_beacon_loss(vif);
+}
+
+static void
+mt7615_mcu_beacon_loss_event(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ struct mt7615_beacon_loss_event *event;
+ struct mt76_phy *mphy;
+ u8 band_idx = 0; /* DBDC support */
+
+ skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
+ event = (struct mt7615_beacon_loss_event *)skb->data;
+ if (band_idx && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+ else
+ mphy = &dev->mt76.phy;
+
+ ieee80211_iterate_active_interfaces_atomic(mphy->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7615_mcu_beacon_loss_iter, event);
+}
+
+static void
+mt7615_mcu_bss_event(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ struct mt7615_mcu_bss_event *event;
+ struct mt76_phy *mphy;
+ u8 band_idx = 0; /* DBDC support */
+
+ event = (struct mt7615_mcu_bss_event *)(skb->data +
+ sizeof(struct mt7615_mcu_rxd));
+
+ if (band_idx && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+ else
+ mphy = &dev->mt76.phy;
+
+ if (event->is_absent)
+ ieee80211_stop_queues(mphy->hw);
+ else
+ ieee80211_wake_queues(mphy->hw);
+}
+
+static void
+mt7615_mcu_rx_unsolicited_event(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
+
+ switch (rxd->eid) {
+ case MCU_EVENT_EXT:
+ mt7615_mcu_rx_ext_event(dev, skb);
+ break;
+ case MCU_EVENT_BSS_BEACON_LOSS:
+ mt7615_mcu_beacon_loss_event(dev, skb);
+ break;
+ case MCU_EVENT_ROC:
+ mt7615_mcu_roc_event(dev, skb);
+ break;
+ case MCU_EVENT_SCHED_SCAN_DONE:
+ case MCU_EVENT_SCAN_DONE:
+ mt7615_mcu_scan_event(dev, skb);
+ return;
+ case MCU_EVENT_BSS_ABSENCE:
+ mt7615_mcu_bss_event(dev, skb);
+ break;
+ default:
+ break;
+ }
+ dev_kfree_skb(skb);
+}
+
+void mt7615_mcu_rx_event(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
+
+ if (rxd->ext_eid == MCU_EXT_EVENT_THERMAL_PROTECT ||
+ rxd->ext_eid == MCU_EXT_EVENT_FW_LOG_2_HOST ||
+ rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP ||
+ rxd->ext_eid == MCU_EXT_EVENT_PS_SYNC ||
+ rxd->eid == MCU_EVENT_BSS_BEACON_LOSS ||
+ rxd->eid == MCU_EVENT_SCHED_SCAN_DONE ||
+ rxd->eid == MCU_EVENT_BSS_ABSENCE ||
+ rxd->eid == MCU_EVENT_SCAN_DONE ||
+ rxd->eid == MCU_EVENT_ROC ||
+ !rxd->seq)
+ mt7615_mcu_rx_unsolicited_event(dev, skb);
+ else
+ mt76_mcu_rx_event(&dev->mt76, skb);
+}
+
+static int mt7615_mcu_init_download(struct mt7615_dev *dev, u32 addr,
+ u32 len, u32 mode)
+{
+ struct {
+ __le32 addr;
+ __le32 len;
+ __le32 mode;
+ } req = {
+ .addr = cpu_to_le32(addr),
+ .len = cpu_to_le32(len),
+ .mode = cpu_to_le32(mode),
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_TARGET_ADDRESS_LEN_REQ,
+ &req, sizeof(req), true);
+}
+
+static int
+mt7615_mcu_add_dev(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct {
+ struct req_hdr {
+ u8 omac_idx;
+ u8 band_idx;
+ __le16 tlv_num;
+ u8 is_tlv_append;
+ u8 rsv[3];
+ } __packed hdr;
+ struct req_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 active;
+ u8 band_idx;
+ u8 omac_addr[ETH_ALEN];
+ } __packed tlv;
+ } data = {
+ .hdr = {
+ .omac_idx = mvif->omac_idx,
+ .band_idx = mvif->band_idx,
+ .tlv_num = cpu_to_le16(1),
+ .is_tlv_append = 1,
+ },
+ .tlv = {
+ .tag = cpu_to_le16(DEV_INFO_ACTIVE),
+ .len = cpu_to_le16(sizeof(struct req_tlv)),
+ .active = enable,
+ .band_idx = mvif->band_idx,
+ },
+ };
+
+ memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN);
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_DEV_INFO_UPDATE,
+ &data, sizeof(data), true);
+}
+
+static int
+mt7615_mcu_add_beacon_offload(struct mt7615_dev *dev,
+ struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ struct ieee80211_mutable_offsets offs;
+ struct ieee80211_tx_info *info;
+ struct req {
+ u8 omac_idx;
+ u8 enable;
+ u8 wlan_idx;
+ u8 band_idx;
+ u8 pkt_type;
+ u8 need_pre_tbtt_int;
+ __le16 csa_ie_pos;
+ __le16 pkt_len;
+ __le16 tim_ie_pos;
+ u8 pkt[512];
+ u8 csa_cnt;
+ /* bss color change */
+ u8 bcc_cnt;
+ __le16 bcc_ie_pos;
+ } __packed req = {
+ .omac_idx = mvif->omac_idx,
+ .enable = enable,
+ .wlan_idx = wcid->idx,
+ .band_idx = mvif->band_idx,
+ };
+ struct sk_buff *skb;
+
+ skb = ieee80211_beacon_get_template(hw, vif, &offs);
+ if (!skb)
+ return -EINVAL;
+
+ if (skb->len > 512 - MT_TXD_SIZE) {
+ dev_err(dev->mt76.dev, "Bcn size limit exceed\n");
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ if (mvif->band_idx) {
+ info = IEEE80211_SKB_CB(skb);
+ info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
+ }
+
+ mt7615_mac_write_txwi(dev, (__le32 *)(req.pkt), skb, wcid, NULL,
+ 0, NULL, true);
+ memcpy(req.pkt + MT_TXD_SIZE, skb->data, skb->len);
+ req.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
+ req.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
+ if (offs.cntdwn_counter_offs[0]) {
+ u16 csa_offs;
+
+ csa_offs = MT_TXD_SIZE + offs.cntdwn_counter_offs[0] - 4;
+ req.csa_ie_pos = cpu_to_le16(csa_offs);
+ req.csa_cnt = skb->data[offs.cntdwn_counter_offs[0]];
+ }
+ dev_kfree_skb(skb);
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_BCN_OFFLOAD,
+ &req, sizeof(req), true);
+}
+
+static int
+mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int band, int state)
+{
+#define ENTER_PM_STATE 1
+#define EXIT_PM_STATE 2
+ struct {
+ u8 pm_number;
+ u8 pm_state;
+ u8 bssid[ETH_ALEN];
+ u8 dtim_period;
+ u8 wlan_idx;
+ __le16 bcn_interval;
+ __le32 aid;
+ __le32 rx_filter;
+ u8 band_idx;
+ u8 rsv[3];
+ __le32 feature;
+ u8 omac_idx;
+ u8 wmm_idx;
+ u8 bcn_loss_cnt;
+ u8 bcn_sp_duration;
+ } __packed req = {
+ .pm_number = 5,
+ .pm_state = state ? ENTER_PM_STATE : EXIT_PM_STATE,
+ .band_idx = band,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PM_STATE_CTRL,
+ &req, sizeof(req), true);
+}
+
+static struct sk_buff *
+mt7615_mcu_alloc_sta_req(struct mt7615_dev *dev, struct mt7615_vif *mvif,
+ struct mt7615_sta *msta)
+{
+ struct sta_req_hdr hdr = {
+ .bss_idx = mvif->idx,
+ .wlan_idx = msta ? msta->wcid.idx : 0,
+ .muar_idx = msta ? mvif->omac_idx : 0,
+ .is_tlv_append = 1,
+ };
+ struct sk_buff *skb;
+
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, MT7615_STA_UPDATE_MAX_SIZE);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ skb_put_data(skb, &hdr, sizeof(hdr));
+
+ return skb;
+}
+
+static struct wtbl_req_hdr *
+mt7615_mcu_alloc_wtbl_req(struct mt7615_dev *dev, struct mt7615_sta *msta,
+ int cmd, void *sta_wtbl, struct sk_buff **skb)
+{
+ struct tlv *sta_hdr = sta_wtbl;
+ struct wtbl_req_hdr hdr = {
+ .wlan_idx = msta->wcid.idx,
+ .operation = cmd,
+ };
+ struct sk_buff *nskb = *skb;
+
+ if (!nskb) {
+ nskb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
+ MT7615_WTBL_UPDATE_BA_SIZE);
+ if (!nskb)
+ return ERR_PTR(-ENOMEM);
+
+ *skb = nskb;
+ }
+
+ if (sta_hdr)
+ sta_hdr->len = cpu_to_le16(sizeof(hdr));
+
+ return skb_put_data(nskb, &hdr, sizeof(hdr));
+}
+
+static struct tlv *
+mt7615_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
+ void *sta_ntlv, void *sta_wtbl)
+{
+ struct sta_ntlv_hdr *ntlv_hdr = sta_ntlv;
+ struct tlv *sta_hdr = sta_wtbl;
+ struct tlv *ptlv, tlv = {
+ .tag = cpu_to_le16(tag),
+ .len = cpu_to_le16(len),
+ };
+ u16 ntlv;
+
+ ptlv = skb_put(skb, len);
+ memcpy(ptlv, &tlv, sizeof(tlv));
+
+ ntlv = le16_to_cpu(ntlv_hdr->tlv_num);
+ ntlv_hdr->tlv_num = cpu_to_le16(ntlv + 1);
+
+ if (sta_hdr) {
+ u16 size = le16_to_cpu(sta_hdr->len);
+
+ sta_hdr->len = cpu_to_le16(size + len);
+ }
+
+ return ptlv;
+}
+
+static struct tlv *
+mt7615_mcu_add_tlv(struct sk_buff *skb, int tag, int len)
+{
+ return mt7615_mcu_add_nested_tlv(skb, tag, len, skb->data, NULL);
+}
+
+static int
+mt7615_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ u32 type = vif->p2p ? NETWORK_P2P : NETWORK_INFRA;
+ struct bss_info_basic *bss;
+ u8 wlan_idx = mvif->sta.wcid.idx;
+ struct tlv *tlv;
+
+ tlv = mt7615_mcu_add_tlv(skb, BSS_INFO_BASIC, sizeof(*bss));
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ break;
+ case NL80211_IFTYPE_STATION:
+ /* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
+ if (enable && sta) {
+ struct mt7615_sta *msta;
+
+ msta = (struct mt7615_sta *)sta->drv_priv;
+ wlan_idx = msta->wcid.idx;
+ }
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ type = NETWORK_IBSS;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ bss = (struct bss_info_basic *)tlv;
+ memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
+ bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
+ bss->network_type = cpu_to_le32(type);
+ bss->dtim_period = vif->bss_conf.dtim_period;
+ bss->bmc_tx_wlan_idx = wlan_idx;
+ bss->wmm_idx = mvif->wmm_idx;
+ bss->active = enable;
+
+ return 0;
+}
+
+static void
+mt7615_mcu_bss_omac_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct bss_info_omac *omac;
+ struct tlv *tlv;
+ u32 type = 0;
+ u8 idx;
+
+ tlv = mt7615_mcu_add_tlv(skb, BSS_INFO_OMAC, sizeof(*omac));
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ if (vif->p2p)
+ type = CONNECTION_P2P_GO;
+ else
+ type = CONNECTION_INFRA_AP;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (vif->p2p)
+ type = CONNECTION_P2P_GC;
+ else
+ type = CONNECTION_INFRA_STA;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ type = CONNECTION_IBSS_ADHOC;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ omac = (struct bss_info_omac *)tlv;
+ idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
+ omac->conn_type = cpu_to_le32(type);
+ omac->omac_idx = mvif->omac_idx;
+ omac->band_idx = mvif->band_idx;
+ omac->hw_bss_idx = idx;
+}
+
+/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
+#define BCN_TX_ESTIMATE_TIME (4096 + 20)
+static void
+mt7615_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt7615_vif *mvif)
+{
+ struct bss_info_ext_bss *ext;
+ int ext_bss_idx, tsf_offset;
+ struct tlv *tlv;
+
+ ext_bss_idx = mvif->omac_idx - EXT_BSSID_START;
+ if (ext_bss_idx < 0)
+ return;
+
+ tlv = mt7615_mcu_add_tlv(skb, BSS_INFO_EXT_BSS, sizeof(*ext));
+
+ ext = (struct bss_info_ext_bss *)tlv;
+ tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME;
+ ext->mbss_tsf_offset = cpu_to_le32(tsf_offset);
+}
+
+static void
+mt7615_mcu_sta_ba_tlv(struct sk_buff *skb,
+ struct ieee80211_ampdu_params *params,
+ bool enable, bool tx)
+{
+ struct sta_rec_ba *ba;
+ struct tlv *tlv;
+
+ tlv = mt7615_mcu_add_tlv(skb, STA_REC_BA, sizeof(*ba));
+
+ ba = (struct sta_rec_ba *)tlv;
+ ba->ba_type = tx ? MT_BA_TYPE_ORIGINATOR : MT_BA_TYPE_RECIPIENT,
+ ba->winsize = cpu_to_le16(params->buf_size);
+ ba->ssn = cpu_to_le16(params->ssn);
+ ba->ba_en = enable << params->tid;
+ ba->amsdu = params->amsdu;
+ ba->tid = params->tid;
+}
+
+static void
+mt7615_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+{
+ struct sta_rec_basic *basic;
+ struct tlv *tlv;
+ int conn_type;
+
+ tlv = mt7615_mcu_add_tlv(skb, STA_REC_BASIC, sizeof(*basic));
+
+ basic = (struct sta_rec_basic *)tlv;
+ basic->extra_info = cpu_to_le16(EXTRA_INFO_VER);
+
+ if (enable) {
+ basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
+ basic->conn_state = CONN_STATE_PORT_SECURE;
+ } else {
+ basic->conn_state = CONN_STATE_DISCONNECT;
+ }
+
+ if (!sta) {
+ basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
+ eth_broadcast_addr(basic->peer_addr);
+ return;
+ }
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ if (vif->p2p)
+ conn_type = CONNECTION_P2P_GC;
+ else
+ conn_type = CONNECTION_INFRA_STA;
+ basic->conn_type = cpu_to_le32(conn_type);
+ basic->aid = cpu_to_le16(sta->aid);
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (vif->p2p)
+ conn_type = CONNECTION_P2P_GO;
+ else
+ conn_type = CONNECTION_INFRA_AP;
+ basic->conn_type = cpu_to_le32(conn_type);
+ basic->aid = cpu_to_le16(vif->bss_conf.aid);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
+ basic->aid = cpu_to_le16(sta->aid);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ memcpy(basic->peer_addr, sta->addr, ETH_ALEN);
+ basic->qos = sta->wme;
+}
+
+static void
+mt7615_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+{
+ struct tlv *tlv;
+
+ if (sta->ht_cap.ht_supported) {
+ struct sta_rec_ht *ht;
+
+ tlv = mt7615_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
+ ht = (struct sta_rec_ht *)tlv;
+ ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
+ }
+ if (sta->vht_cap.vht_supported) {
+ struct sta_rec_vht *vht;
+
+ tlv = mt7615_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
+ vht = (struct sta_rec_vht *)tlv;
+ vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map;
+ vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map;
+ vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
+ }
+}
+
+static void
+mt7615_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct sta_rec_uapsd *uapsd;
+ struct tlv *tlv;
+
+ if (vif->type != NL80211_IFTYPE_AP || !sta->wme)
+ return;
+
+ tlv = mt7615_mcu_add_tlv(skb, STA_REC_APPS, sizeof(*uapsd));
+ uapsd = (struct sta_rec_uapsd *)tlv;
+
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) {
+ uapsd->dac_map |= BIT(3);
+ uapsd->tac_map |= BIT(3);
+ }
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) {
+ uapsd->dac_map |= BIT(2);
+ uapsd->tac_map |= BIT(2);
+ }
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) {
+ uapsd->dac_map |= BIT(1);
+ uapsd->tac_map |= BIT(1);
+ }
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) {
+ uapsd->dac_map |= BIT(0);
+ uapsd->tac_map |= BIT(0);
+ }
+ uapsd->max_sp = sta->max_sp;
+}
+
+static void
+mt7615_mcu_wtbl_ba_tlv(struct sk_buff *skb,
+ struct ieee80211_ampdu_params *params,
+ bool enable, bool tx, void *sta_wtbl,
+ void *wtbl_tlv)
+{
+ struct wtbl_ba *ba;
+ struct tlv *tlv;
+
+ tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_BA, sizeof(*ba),
+ wtbl_tlv, sta_wtbl);
+
+ ba = (struct wtbl_ba *)tlv;
+ ba->tid = params->tid;
+
+ if (tx) {
+ ba->ba_type = MT_BA_TYPE_ORIGINATOR;
+ ba->sn = enable ? cpu_to_le16(params->ssn) : 0;
+ ba->ba_winsize = cpu_to_le16(params->buf_size);
+ ba->ba_en = enable;
+ } else {
+ memcpy(ba->peer_addr, params->sta->addr, ETH_ALEN);
+ ba->ba_type = MT_BA_TYPE_RECIPIENT;
+ ba->rst_ba_tid = params->tid;
+ ba->rst_ba_sel = RST_BA_MAC_TID_MATCH;
+ ba->rst_ba_sb = 1;
+ }
+
+ if (enable && tx) {
+ u8 ba_range[] = { 4, 8, 12, 24, 36, 48, 54, 64 };
+ int i;
+
+ for (i = 7; i > 0; i--) {
+ if (params->buf_size >= ba_range[i])
+ break;
+ }
+ ba->ba_winsize_idx = i;
+ }
+}
+
+static void
+mt7615_mcu_wtbl_generic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, void *sta_wtbl,
+ void *wtbl_tlv)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct wtbl_generic *generic;
+ struct wtbl_rx *rx;
+ struct wtbl_spe *spe;
+ struct tlv *tlv;
+
+ tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_GENERIC, sizeof(*generic),
+ wtbl_tlv, sta_wtbl);
+
+ generic = (struct wtbl_generic *)tlv;
+
+ if (sta) {
+ if (vif->type == NL80211_IFTYPE_STATION)
+ generic->partial_aid = cpu_to_le16(vif->bss_conf.aid);
+ else
+ generic->partial_aid = cpu_to_le16(sta->aid);
+ memcpy(generic->peer_addr, sta->addr, ETH_ALEN);
+ generic->muar_idx = mvif->omac_idx;
+ generic->qos = sta->wme;
+ } else {
+ eth_broadcast_addr(generic->peer_addr);
+ generic->muar_idx = 0xe;
+ }
+
+ tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_RX, sizeof(*rx),
+ wtbl_tlv, sta_wtbl);
+
+ rx = (struct wtbl_rx *)tlv;
+ rx->rca1 = sta ? vif->type != NL80211_IFTYPE_AP : 1;
+ rx->rca2 = 1;
+ rx->rv = 1;
+
+ tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_SPE, sizeof(*spe),
+ wtbl_tlv, sta_wtbl);
+ spe = (struct wtbl_spe *)tlv;
+ spe->spe_idx = 24;
+}
+
+static void
+mt7615_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
+ void *sta_wtbl, void *wtbl_tlv)
+{
+ struct tlv *tlv;
+ struct wtbl_ht *ht = NULL;
+ u32 flags = 0;
+
+ if (sta->ht_cap.ht_supported) {
+ tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht),
+ wtbl_tlv, sta_wtbl);
+ ht = (struct wtbl_ht *)tlv;
+ ht->ldpc = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
+ ht->af = sta->ht_cap.ampdu_factor;
+ ht->mm = sta->ht_cap.ampdu_density;
+ ht->ht = 1;
+
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
+ flags |= MT_WTBL_W5_SHORT_GI_20;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ flags |= MT_WTBL_W5_SHORT_GI_40;
+ }
+
+ if (sta->vht_cap.vht_supported) {
+ struct wtbl_vht *vht;
+ u8 af;
+
+ tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_VHT, sizeof(*vht),
+ wtbl_tlv, sta_wtbl);
+ vht = (struct wtbl_vht *)tlv;
+ vht->ldpc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
+ vht->vht = 1;
+
+ af = (sta->vht_cap.cap &
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+
+ if (ht)
+ ht->af = max(ht->af, af);
+
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+ flags |= MT_WTBL_W5_SHORT_GI_80;
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+ flags |= MT_WTBL_W5_SHORT_GI_160;
+ }
+
+ /* wtbl smps */
+ if (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) {
+ struct wtbl_smps *smps;
+
+ tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_SMPS, sizeof(*smps),
+ wtbl_tlv, sta_wtbl);
+ smps = (struct wtbl_smps *)tlv;
+ smps->smps = 1;
+ }
+
+ if (sta->ht_cap.ht_supported) {
+ /* sgi */
+ u32 msk = MT_WTBL_W5_SHORT_GI_20 | MT_WTBL_W5_SHORT_GI_40 |
+ MT_WTBL_W5_SHORT_GI_80 | MT_WTBL_W5_SHORT_GI_160;
+ struct wtbl_raw *raw;
+
+ tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_RAW_DATA,
+ sizeof(*raw), wtbl_tlv,
+ sta_wtbl);
+ raw = (struct wtbl_raw *)tlv;
+ raw->val = cpu_to_le32(flags);
+ raw->msk = cpu_to_le32(~msk);
+ raw->wtbl_idx = 1;
+ raw->dw = 5;
+ }
+}
+
+static int
+mt7615_mcu_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_dev *dev = phy->dev;
+ struct sk_buff *skb;
+
+ skb = mt7615_mcu_alloc_sta_req(dev, mvif, NULL);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ if (enable)
+ mt7615_mcu_bss_omac_tlv(skb, vif);
+
+ mt7615_mcu_bss_basic_tlv(skb, vif, sta, enable);
+
+ if (enable && mvif->omac_idx > EXT_BSSID_START)
+ mt7615_mcu_bss_ext_tlv(skb, mvif);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_BSS_INFO_UPDATE, true);
+}
+
+static int
+mt7615_mcu_wtbl_tx_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
+ struct mt7615_vif *mvif = msta->vif;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct sk_buff *skb = NULL;
+ int err;
+
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, NULL, &skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ mt7615_mcu_wtbl_ba_tlv(skb, params, enable, true, NULL, wtbl_hdr);
+
+ err = __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_WTBL_UPDATE, true);
+ if (err < 0)
+ return err;
+
+ skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7615_mcu_sta_ba_tlv(skb, params, enable, true);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_STA_REC_UPDATE, true);
+}
+
+static int
+mt7615_mcu_wtbl_rx_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
+ struct mt7615_vif *mvif = msta->vif;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct sk_buff *skb;
+ int err;
+
+ skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7615_mcu_sta_ba_tlv(skb, params, enable, false);
+
+ err = __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_STA_REC_UPDATE, true);
+ if (err < 0 || !enable)
+ return err;
+
+ skb = NULL;
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, NULL, &skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ mt7615_mcu_wtbl_ba_tlv(skb, params, enable, false, NULL, wtbl_hdr);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_WTBL_UPDATE, true);
+}
+
+static int
+mt7615_mcu_wtbl_sta_add(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct sk_buff *skb, *sskb, *wskb = NULL;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct mt7615_sta *msta;
+ int cmd, err;
+
+ msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta;
+
+ sskb = mt7615_mcu_alloc_sta_req(dev, mvif, msta);
+ if (IS_ERR(sskb))
+ return PTR_ERR(sskb);
+
+ mt7615_mcu_sta_basic_tlv(sskb, vif, sta, enable);
+ if (enable && sta) {
+ mt7615_mcu_sta_ht_tlv(sskb, sta);
+ mt7615_mcu_sta_uapsd(sskb, vif, sta);
+ }
+
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET,
+ NULL, &wskb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ if (enable) {
+ mt7615_mcu_wtbl_generic_tlv(wskb, vif, sta, NULL, wtbl_hdr);
+ if (sta)
+ mt7615_mcu_wtbl_ht_tlv(wskb, sta, NULL, wtbl_hdr);
+ }
+
+ cmd = enable ? MCU_EXT_CMD_WTBL_UPDATE : MCU_EXT_CMD_STA_REC_UPDATE;
+ skb = enable ? wskb : sskb;
+
+ err = __mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true);
+ if (err < 0) {
+ skb = enable ? sskb : wskb;
+ dev_kfree_skb(skb);
+
+ return err;
+ }
+
+ cmd = enable ? MCU_EXT_CMD_STA_REC_UPDATE : MCU_EXT_CMD_WTBL_UPDATE;
+ skb = enable ? sskb : wskb;
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true);
+}
+
+static const struct mt7615_mcu_ops wtbl_update_ops = {
+ .add_beacon_offload = mt7615_mcu_add_beacon_offload,
+ .set_pm_state = mt7615_mcu_ctrl_pm_state,
+ .add_dev_info = mt7615_mcu_add_dev,
+ .add_bss_info = mt7615_mcu_add_bss,
+ .add_tx_ba = mt7615_mcu_wtbl_tx_ba,
+ .add_rx_ba = mt7615_mcu_wtbl_rx_ba,
+ .sta_add = mt7615_mcu_wtbl_sta_add,
+ .set_drv_ctrl = mt7615_mcu_drv_pmctrl,
+ .set_fw_ctrl = mt7615_mcu_fw_pmctrl,
+};
+
+static int
+mt7615_mcu_sta_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable, bool tx)
+{
+ struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
+ struct mt7615_vif *mvif = msta->vif;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct tlv *sta_wtbl;
+ struct sk_buff *skb;
+
+ skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7615_mcu_sta_ba_tlv(skb, params, enable, tx);
+
+ sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
+ &skb);
+ mt7615_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_STA_REC_UPDATE, true);
+}
+
+static int
+mt7615_mcu_sta_tx_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ return mt7615_mcu_sta_ba(dev, params, enable, true);
+}
+
+static int
+mt7615_mcu_sta_rx_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ return mt7615_mcu_sta_ba(dev, params, enable, false);
+}
+
+static int
+mt7615_mcu_add_sta_cmd(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable, int cmd)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct mt7615_sta *msta;
+ struct tlv *sta_wtbl;
+ struct sk_buff *skb;
+
+ msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta;
+
+ skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7615_mcu_sta_basic_tlv(skb, vif, sta, enable);
+ if (enable && sta) {
+ mt7615_mcu_sta_ht_tlv(skb, sta);
+ mt7615_mcu_sta_uapsd(skb, vif, sta);
+ }
+
+ sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET,
+ sta_wtbl, &skb);
+ if (enable) {
+ mt7615_mcu_wtbl_generic_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr);
+ if (sta)
+ mt7615_mcu_wtbl_ht_tlv(skb, sta, sta_wtbl, wtbl_hdr);
+ }
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true);
+}
+
+static int
+mt7615_mcu_add_sta(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+{
+ return mt7615_mcu_add_sta_cmd(dev, vif, sta, enable,
+ MCU_EXT_CMD_STA_REC_UPDATE);
+}
+
+static const struct mt7615_mcu_ops sta_update_ops = {
+ .add_beacon_offload = mt7615_mcu_add_beacon_offload,
+ .set_pm_state = mt7615_mcu_ctrl_pm_state,
+ .add_dev_info = mt7615_mcu_add_dev,
+ .add_bss_info = mt7615_mcu_add_bss,
+ .add_tx_ba = mt7615_mcu_sta_tx_ba,
+ .add_rx_ba = mt7615_mcu_sta_rx_ba,
+ .sta_add = mt7615_mcu_add_sta,
+ .set_drv_ctrl = mt7615_mcu_drv_pmctrl,
+ .set_fw_ctrl = mt7615_mcu_fw_pmctrl,
+};
+
+static int
+mt7615_mcu_uni_add_dev(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif, bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct {
+ struct {
+ u8 omac_idx;
+ u8 band_idx;
+ __le16 pad;
+ } __packed hdr;
+ struct req_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 active;
+ u8 pad;
+ u8 omac_addr[ETH_ALEN];
+ } __packed tlv;
+ } dev_req = {
+ .hdr = {
+ .omac_idx = mvif->omac_idx,
+ .band_idx = mvif->band_idx,
+ },
+ .tlv = {
+ .tag = cpu_to_le16(DEV_INFO_ACTIVE),
+ .len = cpu_to_le16(sizeof(struct req_tlv)),
+ .active = enable,
+ },
+ };
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct mt7615_bss_basic_tlv basic;
+ } basic_req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .basic = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_BASIC),
+ .len = cpu_to_le16(sizeof(struct mt7615_bss_basic_tlv)),
+ .omac_idx = mvif->omac_idx,
+ .band_idx = mvif->band_idx,
+ .wmm_idx = mvif->wmm_idx,
+ .active = enable,
+ .bmc_tx_wlan_idx = cpu_to_le16(mvif->sta.wcid.idx),
+ .sta_idx = cpu_to_le16(mvif->sta.wcid.idx),
+ .conn_state = 1,
+ },
+ };
+ int err, idx, cmd, len;
+ void *data;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ basic_req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
+ break;
+ case NL80211_IFTYPE_STATION:
+ basic_req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_STA);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ basic_req.basic.conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
+ basic_req.basic.hw_bss_idx = idx;
+
+ memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN);
+
+ cmd = enable ? MCU_UNI_CMD_DEV_INFO_UPDATE : MCU_UNI_CMD_BSS_INFO_UPDATE;
+ data = enable ? (void *)&dev_req : (void *)&basic_req;
+ len = enable ? sizeof(dev_req) : sizeof(basic_req);
+
+ err = __mt76_mcu_send_msg(&dev->mt76, cmd, data, len, true);
+ if (err < 0)
+ return err;
+
+ cmd = enable ? MCU_UNI_CMD_BSS_INFO_UPDATE : MCU_UNI_CMD_DEV_INFO_UPDATE;
+ data = enable ? (void *)&basic_req : (void *)&dev_req;
+ len = enable ? sizeof(basic_req) : sizeof(dev_req);
+
+ return __mt76_mcu_send_msg(&dev->mt76, cmd, data, len, true);
+}
+
+static int
+mt7615_mcu_uni_ctrl_pm_state(struct mt7615_dev *dev, int band, int state)
+{
+ return 0;
+}
+
+static int
+mt7615_mcu_uni_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2;
+ struct mt7615_dev *dev = phy->dev;
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct mt7615_bss_basic_tlv basic;
+ struct mt7615_bss_qos_tlv qos;
+ } basic_req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .basic = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_BASIC),
+ .len = cpu_to_le16(sizeof(struct mt7615_bss_basic_tlv)),
+ .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
+ .dtim_period = vif->bss_conf.dtim_period,
+ .omac_idx = mvif->omac_idx,
+ .band_idx = mvif->band_idx,
+ .wmm_idx = mvif->wmm_idx,
+ .active = true, /* keep bss deactivated */
+ .phymode = 0x38,
+ },
+ .qos = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_QBSS),
+ .len = cpu_to_le16(sizeof(struct mt7615_bss_qos_tlv)),
+ .qos = vif->bss_conf.qos,
+ },
+ };
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct rlm_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 control_channel;
+ u8 center_chan;
+ u8 center_chan2;
+ u8 bw;
+ u8 tx_streams;
+ u8 rx_streams;
+ u8 short_st;
+ u8 ht_op_info;
+ u8 sco;
+ u8 pad[3];
+ } __packed rlm;
+ } __packed rlm_req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .rlm = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_RLM),
+ .len = cpu_to_le16(sizeof(struct rlm_tlv)),
+ .control_channel = chandef->chan->hw_value,
+ .center_chan = ieee80211_frequency_to_channel(freq1),
+ .center_chan2 = ieee80211_frequency_to_channel(freq2),
+ .tx_streams = hweight8(phy->mt76->antenna_mask),
+ .rx_streams = phy->chainmask,
+ .short_st = true,
+ },
+ };
+ int err, conn_type;
+ u8 idx;
+
+ idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
+ basic_req.basic.hw_bss_idx = idx;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ if (vif->p2p)
+ conn_type = CONNECTION_P2P_GO;
+ else
+ conn_type = CONNECTION_INFRA_AP;
+ basic_req.basic.conn_type = cpu_to_le32(conn_type);
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (vif->p2p)
+ conn_type = CONNECTION_P2P_GC;
+ else
+ conn_type = CONNECTION_INFRA_STA;
+ basic_req.basic.conn_type = cpu_to_le32(conn_type);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ basic_req.basic.conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ memcpy(basic_req.basic.bssid, vif->bss_conf.bssid, ETH_ALEN);
+ basic_req.basic.bmc_tx_wlan_idx = cpu_to_le16(mvif->sta.wcid.idx);
+ basic_req.basic.sta_idx = cpu_to_le16(mvif->sta.wcid.idx);
+ basic_req.basic.conn_state = !enable;
+
+ err = __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE,
+ &basic_req, sizeof(basic_req), true);
+ if (err < 0)
+ return err;
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_40:
+ rlm_req.rlm.bw = CMD_CBW_40MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ rlm_req.rlm.bw = CMD_CBW_80MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ rlm_req.rlm.bw = CMD_CBW_8080MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ rlm_req.rlm.bw = CMD_CBW_160MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_5:
+ rlm_req.rlm.bw = CMD_CBW_5MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_10:
+ rlm_req.rlm.bw = CMD_CBW_10MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ default:
+ rlm_req.rlm.bw = CMD_CBW_20MHZ;
+ break;
+ }
+
+ if (rlm_req.rlm.control_channel < rlm_req.rlm.center_chan)
+ rlm_req.rlm.sco = 1; /* SCA */
+ else if (rlm_req.rlm.control_channel > rlm_req.rlm.center_chan)
+ rlm_req.rlm.sco = 3; /* SCB */
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE,
+ &rlm_req, sizeof(rlm_req), true);
+}
+
+static int
+mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev,
+ struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ struct ieee80211_mutable_offsets offs;
+ struct {
+ struct req_hdr {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct bcn_content_tlv {
+ __le16 tag;
+ __le16 len;
+ __le16 tim_ie_pos;
+ __le16 csa_ie_pos;
+ __le16 bcc_ie_pos;
+ /* 0: enable beacon offload
+ * 1: disable beacon offload
+ * 2: update probe respond offload
+ */
+ u8 enable;
+ /* 0: legacy format (TXD + payload)
+ * 1: only cap field IE
+ */
+ u8 type;
+ __le16 pkt_len;
+ u8 pkt[512];
+ } __packed beacon_tlv;
+ } req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .beacon_tlv = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_BCN_CONTENT),
+ .len = cpu_to_le16(sizeof(struct bcn_content_tlv)),
+ .enable = enable,
+ },
+ };
+ struct sk_buff *skb;
+
+ skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs);
+ if (!skb)
+ return -EINVAL;
+
+ if (skb->len > 512 - MT_TXD_SIZE) {
+ dev_err(dev->mt76.dev, "beacon size limit exceed\n");
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ mt7615_mac_write_txwi(dev, (__le32 *)(req.beacon_tlv.pkt), skb,
+ wcid, NULL, 0, NULL, true);
+ memcpy(req.beacon_tlv.pkt + MT_TXD_SIZE, skb->data, skb->len);
+ req.beacon_tlv.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
+ req.beacon_tlv.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
+
+ if (offs.cntdwn_counter_offs[0]) {
+ u16 csa_offs;
+
+ csa_offs = MT_TXD_SIZE + offs.cntdwn_counter_offs[0] - 4;
+ req.beacon_tlv.csa_ie_pos = cpu_to_le16(csa_offs);
+ }
+ dev_kfree_skb(skb);
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE,
+ &req, sizeof(req), true);
+}
+
+static int
+mt7615_mcu_uni_tx_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
+ struct mt7615_vif *mvif = msta->vif;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct tlv *sta_wtbl;
+ struct sk_buff *skb;
+ int err;
+
+ skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
+ &skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ mt7615_mcu_wtbl_ba_tlv(skb, params, enable, true, sta_wtbl,
+ wtbl_hdr);
+
+ err = __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_UNI_CMD_STA_REC_UPDATE, true);
+ if (err < 0)
+ return err;
+
+ skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7615_mcu_sta_ba_tlv(skb, params, enable, true);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_UNI_CMD_STA_REC_UPDATE, true);
+}
+
+static int
+mt7615_mcu_uni_rx_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
+ struct mt7615_vif *mvif = msta->vif;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct tlv *sta_wtbl;
+ struct sk_buff *skb;
+ int err;
+
+ skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7615_mcu_sta_ba_tlv(skb, params, enable, false);
+
+ err = __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_UNI_CMD_STA_REC_UPDATE, true);
+ if (err < 0 || !enable)
+ return err;
+
+ skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
+ &skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ mt7615_mcu_wtbl_ba_tlv(skb, params, enable, false, sta_wtbl,
+ wtbl_hdr);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_UNI_CMD_STA_REC_UPDATE, true);
+}
+
+static int
+mt7615_mcu_uni_add_sta(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+{
+ return mt7615_mcu_add_sta_cmd(dev, vif, sta, enable,
+ MCU_UNI_CMD_STA_REC_UPDATE);
+}
+
+static const struct mt7615_mcu_ops uni_update_ops = {
+ .add_beacon_offload = mt7615_mcu_uni_add_beacon_offload,
+ .set_pm_state = mt7615_mcu_uni_ctrl_pm_state,
+ .add_dev_info = mt7615_mcu_uni_add_dev,
+ .add_bss_info = mt7615_mcu_uni_add_bss,
+ .add_tx_ba = mt7615_mcu_uni_tx_ba,
+ .add_rx_ba = mt7615_mcu_uni_rx_ba,
+ .sta_add = mt7615_mcu_uni_add_sta,
+ .set_drv_ctrl = mt7615_mcu_lp_drv_pmctrl,
+ .set_fw_ctrl = mt7615_mcu_fw_pmctrl,
+};
+
+static int mt7615_mcu_send_firmware(struct mt7615_dev *dev, const void *data,
+ int len)
+{
+ int ret = 0, cur_len;
+
+ while (len > 0) {
+ cur_len = min_t(int, 4096 - dev->mt76.mcu_ops->headroom, len);
+
+ ret = __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_FW_SCATTER,
+ data, cur_len, false);
+ if (ret)
+ break;
+
+ data += cur_len;
+ len -= cur_len;
+
+ if (mt76_is_mmio(&dev->mt76))
+ mt76_queue_tx_cleanup(dev, MT_TXQ_FWDL, false);
+ }
+
+ return ret;
+}
+
+static int mt7615_mcu_start_firmware(struct mt7615_dev *dev, u32 addr,
+ u32 option)
+{
+ struct {
+ __le32 option;
+ __le32 addr;
+ } req = {
+ .option = cpu_to_le32(option),
+ .addr = cpu_to_le32(addr),
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_FW_START_REQ,
+ &req, sizeof(req), true);
+}
+
+int mt7615_mcu_restart(struct mt76_dev *dev)
+{
+ return __mt76_mcu_send_msg(dev, MCU_CMD_RESTART_DL_REQ, NULL,
+ 0, true);
+}
+EXPORT_SYMBOL_GPL(mt7615_mcu_restart);
+
+static int mt7615_mcu_patch_sem_ctrl(struct mt7615_dev *dev, bool get)
+{
+ struct {
+ __le32 op;
+ } req = {
+ .op = cpu_to_le32(get ? PATCH_SEM_GET : PATCH_SEM_RELEASE),
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_PATCH_SEM_CONTROL,
+ &req, sizeof(req), true);
+}
+
+static int mt7615_mcu_start_patch(struct mt7615_dev *dev)
+{
+ struct {
+ u8 check_crc;
+ u8 reserved[3];
+ } req = {
+ .check_crc = 0,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_PATCH_FINISH_REQ,
+ &req, sizeof(req), true);
+}
+
+static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name)
+{
+ const struct mt7615_patch_hdr *hdr;
+ const struct firmware *fw = NULL;
+ int len, ret, sem;
+
+ sem = mt7615_mcu_patch_sem_ctrl(dev, 1);
+ switch (sem) {
+ case PATCH_IS_DL:
+ return 0;
+ case PATCH_NOT_DL_SEM_SUCCESS:
+ break;
+ default:
+ dev_err(dev->mt76.dev, "Failed to get patch semaphore\n");
+ return -EAGAIN;
+ }
+
+ ret = firmware_request_nowarn(&fw, name, dev->mt76.dev);
+ if (ret)
+ goto out;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt7615_patch_hdr *)(fw->data);
+
+ dev_info(dev->mt76.dev, "HW/SW Version: 0x%x, Build Time: %.16s\n",
+ be32_to_cpu(hdr->hw_sw_ver), hdr->build_date);
+
+ len = fw->size - sizeof(*hdr);
+
+ ret = mt7615_mcu_init_download(dev, addr, len, DL_MODE_NEED_RSP);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Download request failed\n");
+ goto out;
+ }
+
+ ret = mt7615_mcu_send_firmware(dev, fw->data + sizeof(*hdr), len);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to send firmware to device\n");
+ goto out;
+ }
+
+ ret = mt7615_mcu_start_patch(dev);
+ if (ret)
+ dev_err(dev->mt76.dev, "Failed to start patch\n");
+
+out:
+ release_firmware(fw);
+
+ sem = mt7615_mcu_patch_sem_ctrl(dev, 0);
+ switch (sem) {
+ case PATCH_REL_SEM_SUCCESS:
+ break;
+ default:
+ ret = -EAGAIN;
+ dev_err(dev->mt76.dev, "Failed to release patch semaphore\n");
+ break;
+ }
+
+ return ret;
+}
+
+static u32 mt7615_mcu_gen_dl_mode(u8 feature_set, bool is_cr4)
+{
+ u32 ret = 0;
+
+ ret |= (feature_set & FW_FEATURE_SET_ENCRYPT) ?
+ (DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV) : 0;
+ ret |= FIELD_PREP(DL_MODE_KEY_IDX,
+ FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set));
+ ret |= DL_MODE_NEED_RSP;
+ ret |= is_cr4 ? DL_MODE_WORKING_PDA_CR4 : 0;
+
+ return ret;
+}
+
+static int
+mt7615_mcu_send_ram_firmware(struct mt7615_dev *dev,
+ const struct mt7615_fw_trailer *hdr,
+ const u8 *data, bool is_cr4)
+{
+ int n_region = is_cr4 ? CR4_REGION_NUM : N9_REGION_NUM;
+ int err, i, offset = 0;
+ u32 len, addr, mode;
+
+ for (i = 0; i < n_region; i++) {
+ mode = mt7615_mcu_gen_dl_mode(hdr[i].feature_set, is_cr4);
+ len = le32_to_cpu(hdr[i].len) + IMG_CRC_LEN;
+ addr = le32_to_cpu(hdr[i].addr);
+
+ err = mt7615_mcu_init_download(dev, addr, len, mode);
+ if (err) {
+ dev_err(dev->mt76.dev, "Download request failed\n");
+ return err;
+ }
+
+ err = mt7615_mcu_send_firmware(dev, data + offset, len);
+ if (err) {
+ dev_err(dev->mt76.dev, "Failed to send firmware to device\n");
+ return err;
+ }
+
+ offset += len;
+ }
+
+ return 0;
+}
+
+static const struct wiphy_wowlan_support mt7615_wowlan_support = {
+ .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | WIPHY_WOWLAN_NET_DETECT,
+ .n_patterns = 1,
+ .pattern_min_len = 1,
+ .pattern_max_len = MT7615_WOW_PATTEN_MAX_LEN,
+ .max_nd_match_sets = 10,
+};
+
+static int mt7615_load_n9(struct mt7615_dev *dev, const char *name)
+{
+ const struct mt7615_fw_trailer *hdr;
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, name, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < N9_REGION_NUM * sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt7615_fw_trailer *)(fw->data + fw->size -
+ N9_REGION_NUM * sizeof(*hdr));
+
+ dev_info(dev->mt76.dev, "N9 Firmware Version: %.10s, Build Time: %.15s\n",
+ hdr->fw_ver, hdr->build_date);
+
+ ret = mt7615_mcu_send_ram_firmware(dev, hdr, fw->data, false);
+ if (ret)
+ goto out;
+
+ ret = mt7615_mcu_start_firmware(dev, le32_to_cpu(hdr->addr),
+ FW_START_OVERRIDE);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to start N9 firmware\n");
+ goto out;
+ }
+
+ snprintf(dev->mt76.hw->wiphy->fw_version,
+ sizeof(dev->mt76.hw->wiphy->fw_version),
+ "%.10s-%.15s", hdr->fw_ver, hdr->build_date);
+
+ if (!is_mt7615(&dev->mt76) &&
+ !strncmp(hdr->fw_ver, "2.0", sizeof(hdr->fw_ver))) {
+ dev->fw_ver = MT7615_FIRMWARE_V2;
+ dev->mcu_ops = &sta_update_ops;
+ } else {
+ dev->fw_ver = MT7615_FIRMWARE_V1;
+ dev->mcu_ops = &wtbl_update_ops;
+ }
+
+out:
+ release_firmware(fw);
+ return ret;
+}
+
+static int mt7615_load_cr4(struct mt7615_dev *dev, const char *name)
+{
+ const struct mt7615_fw_trailer *hdr;
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, name, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < CR4_REGION_NUM * sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt7615_fw_trailer *)(fw->data + fw->size -
+ CR4_REGION_NUM * sizeof(*hdr));
+
+ dev_info(dev->mt76.dev, "CR4 Firmware Version: %.10s, Build Time: %.15s\n",
+ hdr->fw_ver, hdr->build_date);
+
+ ret = mt7615_mcu_send_ram_firmware(dev, hdr, fw->data, true);
+ if (ret)
+ goto out;
+
+ ret = mt7615_mcu_start_firmware(dev, 0, FW_START_WORKING_PDA_CR4);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to start CR4 firmware\n");
+ goto out;
+ }
+
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int mt7615_load_ram(struct mt7615_dev *dev)
+{
+ int ret;
+
+ ret = mt7615_load_n9(dev, MT7615_FIRMWARE_N9);
+ if (ret)
+ return ret;
+
+ return mt7615_load_cr4(dev, MT7615_FIRMWARE_CR4);
+}
+
+static int mt7615_load_firmware(struct mt7615_dev *dev)
+{
+ int ret;
+ u32 val;
+
+ val = mt76_get_field(dev, MT_TOP_MISC2, MT_TOP_MISC2_FW_STATE);
+
+ if (val != FW_STATE_FW_DOWNLOAD) {
+ dev_err(dev->mt76.dev, "Firmware is not ready for download\n");
+ return -EIO;
+ }
+
+ ret = mt7615_load_patch(dev, MT7615_PATCH_ADDRESS, MT7615_ROM_PATCH);
+ if (ret)
+ return ret;
+
+ ret = mt7615_load_ram(dev);
+ if (ret)
+ return ret;
+
+ if (!mt76_poll_msec(dev, MT_TOP_MISC2, MT_TOP_MISC2_FW_STATE,
+ FIELD_PREP(MT_TOP_MISC2_FW_STATE,
+ FW_STATE_CR4_RDY), 500)) {
+ dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mt7622_load_firmware(struct mt7615_dev *dev)
+{
+ int ret;
+ u32 val;
+
+ mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH);
+
+ val = mt76_get_field(dev, MT_TOP_OFF_RSV, MT_TOP_OFF_RSV_FW_STATE);
+ if (val != FW_STATE_FW_DOWNLOAD) {
+ dev_err(dev->mt76.dev, "Firmware is not ready for download\n");
+ return -EIO;
+ }
+
+ ret = mt7615_load_patch(dev, MT7622_PATCH_ADDRESS, MT7622_ROM_PATCH);
+ if (ret)
+ return ret;
+
+ ret = mt7615_load_n9(dev, MT7622_FIRMWARE_N9);
+ if (ret)
+ return ret;
+
+ if (!mt76_poll_msec(dev, MT_TOP_OFF_RSV, MT_TOP_OFF_RSV_FW_STATE,
+ FIELD_PREP(MT_TOP_OFF_RSV_FW_STATE,
+ FW_STATE_NORMAL_TRX), 1500)) {
+ dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
+ return -EIO;
+ }
+
+ mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH);
+
+ return 0;
+}
+
+int mt7615_mcu_fw_log_2_host(struct mt7615_dev *dev, u8 ctrl)
+{
+ struct {
+ u8 ctrl_val;
+ u8 pad[3];
+ } data = {
+ .ctrl_val = ctrl
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_FW_LOG_2_HOST,
+ &data, sizeof(data), true);
+}
+
+static int mt7663_load_n9(struct mt7615_dev *dev, const char *name)
+{
+ u32 offset = 0, override_addr = 0, flag = FW_START_DLYCAL;
+ const struct mt7663_fw_trailer *hdr;
+ const struct mt7663_fw_buf *buf;
+ const struct firmware *fw;
+ const u8 *base_addr;
+ int i, ret;
+
+ ret = request_firmware(&fw, name, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < FW_V3_COMMON_TAILER_SIZE) {
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt7663_fw_trailer *)(fw->data + fw->size -
+ FW_V3_COMMON_TAILER_SIZE);
+
+ dev_info(dev->mt76.dev, "N9 Firmware Version: %.10s, Build Time: %.15s\n",
+ hdr->fw_ver, hdr->build_date);
+ dev_info(dev->mt76.dev, "Region number: 0x%x\n", hdr->n_region);
+
+ base_addr = fw->data + fw->size - FW_V3_COMMON_TAILER_SIZE;
+ for (i = 0; i < hdr->n_region; i++) {
+ u32 shift = (hdr->n_region - i) * FW_V3_REGION_TAILER_SIZE;
+ u32 len, addr, mode;
+
+ dev_info(dev->mt76.dev, "Parsing tailer Region: %d\n", i);
+
+ buf = (const struct mt7663_fw_buf *)(base_addr - shift);
+ mode = mt7615_mcu_gen_dl_mode(buf->feature_set, false);
+ addr = le32_to_cpu(buf->img_dest_addr);
+ len = le32_to_cpu(buf->img_size);
+
+ ret = mt7615_mcu_init_download(dev, addr, len, mode);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Download request failed\n");
+ goto out;
+ }
+
+ ret = mt7615_mcu_send_firmware(dev, fw->data + offset, len);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to send firmware\n");
+ goto out;
+ }
+
+ offset += le32_to_cpu(buf->img_size);
+ if (buf->feature_set & DL_MODE_VALID_RAM_ENTRY) {
+ override_addr = le32_to_cpu(buf->img_dest_addr);
+ dev_info(dev->mt76.dev, "Region %d, override_addr = 0x%08x\n",
+ i, override_addr);
+ }
+ }
+
+ if (override_addr)
+ flag |= FW_START_OVERRIDE;
+
+ dev_info(dev->mt76.dev, "override_addr = 0x%08x, option = %d\n",
+ override_addr, flag);
+
+ ret = mt7615_mcu_start_firmware(dev, override_addr, flag);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to start N9 firmware\n");
+ goto out;
+ }
+
+ snprintf(dev->mt76.hw->wiphy->fw_version,
+ sizeof(dev->mt76.hw->wiphy->fw_version),
+ "%.10s-%.15s", hdr->fw_ver, hdr->build_date);
+
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int
+mt7663_load_rom_patch(struct mt7615_dev *dev, const char **n9_firmware)
+{
+ const char *selected_rom, *secondary_rom = MT7663_ROM_PATCH;
+ const char *primary_rom = MT7663_OFFLOAD_ROM_PATCH;
+ int ret;
+
+ if (!prefer_offload_fw) {
+ secondary_rom = MT7663_OFFLOAD_ROM_PATCH;
+ primary_rom = MT7663_ROM_PATCH;
+ }
+ selected_rom = primary_rom;
+
+ ret = mt7615_load_patch(dev, MT7663_PATCH_ADDRESS, primary_rom);
+ if (ret) {
+ dev_info(dev->mt76.dev, "%s not found, switching to %s",
+ primary_rom, secondary_rom);
+ ret = mt7615_load_patch(dev, MT7663_PATCH_ADDRESS,
+ secondary_rom);
+ if (ret) {
+ dev_err(dev->mt76.dev, "failed to load %s",
+ secondary_rom);
+ return ret;
+ }
+ selected_rom = secondary_rom;
+ }
+
+ if (!strcmp(selected_rom, MT7663_OFFLOAD_ROM_PATCH)) {
+ *n9_firmware = MT7663_OFFLOAD_FIRMWARE_N9;
+ dev->fw_ver = MT7615_FIRMWARE_V3;
+ dev->mcu_ops = &uni_update_ops;
+ } else {
+ *n9_firmware = MT7663_FIRMWARE_N9;
+ dev->fw_ver = MT7615_FIRMWARE_V2;
+ dev->mcu_ops = &sta_update_ops;
+ }
+
+ return 0;
+}
+
+int __mt7663_load_firmware(struct mt7615_dev *dev)
+{
+ const char *n9_firmware;
+ int ret;
+
+ ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY);
+ if (ret) {
+ dev_dbg(dev->mt76.dev, "Firmware is already download\n");
+ return -EIO;
+ }
+
+ ret = mt7663_load_rom_patch(dev, &n9_firmware);
+ if (ret)
+ return ret;
+
+ ret = mt7663_load_n9(dev, n9_firmware);
+ if (ret)
+ return ret;
+
+ if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY,
+ MT_TOP_MISC2_FW_N9_RDY, 1500)) {
+ ret = mt76_get_field(dev, MT_CONN_ON_MISC,
+ MT7663_TOP_MISC2_FW_STATE);
+ dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
+ return -EIO;
+ }
+
+#ifdef CONFIG_PM
+ if (mt7615_firmware_offload(dev))
+ dev->mt76.hw->wiphy->wowlan = &mt7615_wowlan_support;
+#endif /* CONFIG_PM */
+
+ dev_dbg(dev->mt76.dev, "Firmware init done\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__mt7663_load_firmware);
+
+static int mt7663_load_firmware(struct mt7615_dev *dev)
+{
+ int ret;
+
+ mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH);
+
+ ret = __mt7663_load_firmware(dev);
+ if (ret)
+ return ret;
+
+ mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH);
+
+ return 0;
+}
+
+int mt7615_mcu_init(struct mt7615_dev *dev)
+{
+ static const struct mt76_mcu_ops mt7615_mcu_ops = {
+ .headroom = sizeof(struct mt7615_mcu_txd),
+ .mcu_skb_send_msg = mt7615_mcu_send_message,
+ .mcu_send_msg = mt7615_mcu_msg_send,
+ .mcu_restart = mt7615_mcu_restart,
+ };
+ int ret;
+
+ dev->mt76.mcu_ops = &mt7615_mcu_ops,
+
+ ret = mt7615_mcu_drv_pmctrl(dev);
+ if (ret)
+ return ret;
+
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7622:
+ ret = mt7622_load_firmware(dev);
+ break;
+ case 0x7663:
+ ret = mt7663_load_firmware(dev);
+ break;
+ default:
+ ret = mt7615_load_firmware(dev);
+ break;
+ }
+ if (ret)
+ return ret;
+
+ mt76_queue_tx_cleanup(dev, MT_TXQ_FWDL, false);
+ dev_dbg(dev->mt76.dev, "Firmware init done\n");
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+ mt7615_mcu_fw_log_2_host(dev, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt7615_mcu_init);
+
+void mt7615_mcu_exit(struct mt7615_dev *dev)
+{
+ __mt76_mcu_restart(&dev->mt76);
+ mt7615_mcu_set_fw_ctrl(dev);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
+}
+EXPORT_SYMBOL_GPL(mt7615_mcu_exit);
+
+int mt7615_mcu_set_eeprom(struct mt7615_dev *dev)
+{
+ struct {
+ u8 buffer_mode;
+ u8 content_format;
+ __le16 len;
+ } __packed req_hdr = {
+ .buffer_mode = 1,
+ };
+ u8 *eep = (u8 *)dev->mt76.eeprom.data;
+ struct sk_buff *skb;
+ int eep_len, offset;
+
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7622:
+ eep_len = MT7622_EE_MAX - MT_EE_NIC_CONF_0;
+ offset = MT_EE_NIC_CONF_0;
+ break;
+ case 0x7663:
+ eep_len = MT7663_EE_MAX - MT_EE_CHIP_ID;
+ req_hdr.content_format = 1;
+ offset = MT_EE_CHIP_ID;
+ break;
+ default:
+ eep_len = MT7615_EE_MAX - MT_EE_NIC_CONF_0;
+ offset = MT_EE_NIC_CONF_0;
+ break;
+ }
+
+ req_hdr.len = cpu_to_le16(eep_len);
+
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req_hdr) + eep_len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_data(skb, &req_hdr, sizeof(req_hdr));
+ skb_put_data(skb, eep + offset, eep_len);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_EFUSE_BUFFER_MODE, true);
+}
+EXPORT_SYMBOL_GPL(mt7615_mcu_set_eeprom);
+
+int mt7615_mcu_set_mac_enable(struct mt7615_dev *dev, int band, bool enable)
+{
+ struct {
+ u8 enable;
+ u8 band;
+ u8 rsv[2];
+ } __packed req = {
+ .enable = enable,
+ .band = band,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_MAC_INIT_CTRL,
+ &req, sizeof(req), true);
+}
+
+int mt7615_mcu_set_rts_thresh(struct mt7615_phy *phy, u32 val)
+{
+ struct mt7615_dev *dev = phy->dev;
+ struct {
+ u8 prot_idx;
+ u8 band;
+ u8 rsv[2];
+ __le32 len_thresh;
+ __le32 pkt_thresh;
+ } __packed req = {
+ .prot_idx = 1,
+ .band = phy != &dev->phy,
+ .len_thresh = cpu_to_le32(val),
+ .pkt_thresh = cpu_to_le32(0x2),
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PROTECT_CTRL,
+ &req, sizeof(req), true);
+}
+
+int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+#define WMM_AIFS_SET BIT(0)
+#define WMM_CW_MIN_SET BIT(1)
+#define WMM_CW_MAX_SET BIT(2)
+#define WMM_TXOP_SET BIT(3)
+#define WMM_PARAM_SET (WMM_AIFS_SET | WMM_CW_MIN_SET | \
+ WMM_CW_MAX_SET | WMM_TXOP_SET)
+ struct req_data {
+ u8 number;
+ u8 rsv[3];
+ u8 queue;
+ u8 valid;
+ u8 aifs;
+ u8 cw_min;
+ __le16 cw_max;
+ __le16 txop;
+ } __packed req = {
+ .number = 1,
+ .queue = queue,
+ .valid = WMM_PARAM_SET,
+ .aifs = params->aifs,
+ .cw_min = 5,
+ .cw_max = cpu_to_le16(10),
+ .txop = cpu_to_le16(params->txop),
+ };
+
+ if (params->cw_min)
+ req.cw_min = fls(params->cw_min);
+ if (params->cw_max)
+ req.cw_max = cpu_to_le16(fls(params->cw_max));
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EDCA_UPDATE,
+ &req, sizeof(req), true);
+}
+
+int mt7615_mcu_set_dbdc(struct mt7615_dev *dev)
+{
+ struct mt7615_phy *ext_phy = mt7615_ext_phy(dev);
+ struct dbdc_entry {
+ u8 type;
+ u8 index;
+ u8 band;
+ u8 _rsv;
+ };
+ struct {
+ u8 enable;
+ u8 num;
+ u8 _rsv[2];
+ struct dbdc_entry entry[64];
+ } req = {
+ .enable = !!ext_phy,
+ };
+ int i;
+
+ if (!ext_phy)
+ goto out;
+
+#define ADD_DBDC_ENTRY(_type, _idx, _band) \
+ do { \
+ req.entry[req.num].type = _type; \
+ req.entry[req.num].index = _idx; \
+ req.entry[req.num++].band = _band; \
+ } while (0)
+
+ for (i = 0; i < 4; i++) {
+ bool band = !!(ext_phy->omac_mask & BIT(i));
+
+ ADD_DBDC_ENTRY(DBDC_TYPE_BSS, i, band);
+ }
+
+ for (i = 0; i < 14; i++) {
+ bool band = !!(ext_phy->omac_mask & BIT(0x11 + i));
+
+ ADD_DBDC_ENTRY(DBDC_TYPE_MBSS, i, band);
+ }
+
+ ADD_DBDC_ENTRY(DBDC_TYPE_MU, 0, 1);
+
+ for (i = 0; i < 3; i++)
+ ADD_DBDC_ENTRY(DBDC_TYPE_BF, i, 1);
+
+ ADD_DBDC_ENTRY(DBDC_TYPE_WMM, 0, 0);
+ ADD_DBDC_ENTRY(DBDC_TYPE_WMM, 1, 0);
+ ADD_DBDC_ENTRY(DBDC_TYPE_WMM, 2, 1);
+ ADD_DBDC_ENTRY(DBDC_TYPE_WMM, 3, 1);
+
+ ADD_DBDC_ENTRY(DBDC_TYPE_MGMT, 0, 0);
+ ADD_DBDC_ENTRY(DBDC_TYPE_MGMT, 1, 1);
+
+out:
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_DBDC_CTRL,
+ &req, sizeof(req), true);
+}
+
+int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev)
+{
+ struct wtbl_req_hdr req = {
+ .operation = WTBL_RESET_ALL,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
+ &req, sizeof(req), true);
+}
+EXPORT_SYMBOL_GPL(mt7615_mcu_del_wtbl_all);
+
+int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev,
+ enum mt7615_rdd_cmd cmd, u8 index,
+ u8 rx_sel, u8 val)
+{
+ struct {
+ u8 ctrl;
+ u8 rdd_idx;
+ u8 rdd_rx_sel;
+ u8 val;
+ u8 rsv[4];
+ } req = {
+ .ctrl = cmd,
+ .rdd_idx = index,
+ .rdd_rx_sel = rx_sel,
+ .val = val,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_CTRL,
+ &req, sizeof(req), true);
+}
+
+int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val)
+{
+ struct {
+ __le16 tag;
+ __le16 min_lpn;
+ } req = {
+ .tag = cpu_to_le16(0x1),
+ .min_lpn = cpu_to_le16(val),
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH,
+ &req, sizeof(req), true);
+}
+
+int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev,
+ const struct mt7615_dfs_pulse *pulse)
+{
+ struct {
+ __le16 tag;
+ __le32 max_width; /* us */
+ __le32 max_pwr; /* dbm */
+ __le32 min_pwr; /* dbm */
+ __le32 min_stgr_pri; /* us */
+ __le32 max_stgr_pri; /* us */
+ __le32 min_cr_pri; /* us */
+ __le32 max_cr_pri; /* us */
+ } req = {
+ .tag = cpu_to_le16(0x3),
+#define __req_field(field) .field = cpu_to_le32(pulse->field)
+ __req_field(max_width),
+ __req_field(max_pwr),
+ __req_field(min_pwr),
+ __req_field(min_stgr_pri),
+ __req_field(max_stgr_pri),
+ __req_field(min_cr_pri),
+ __req_field(max_cr_pri),
+#undef __req_field
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH,
+ &req, sizeof(req), true);
+}
+
+int mt7615_mcu_set_radar_th(struct mt7615_dev *dev, int index,
+ const struct mt7615_dfs_pattern *pattern)
+{
+ struct {
+ __le16 tag;
+ __le16 radar_type;
+ u8 enb;
+ u8 stgr;
+ u8 min_crpn;
+ u8 max_crpn;
+ u8 min_crpr;
+ u8 min_pw;
+ u8 max_pw;
+ __le32 min_pri;
+ __le32 max_pri;
+ u8 min_crbn;
+ u8 max_crbn;
+ u8 min_stgpn;
+ u8 max_stgpn;
+ u8 min_stgpr;
+ } req = {
+ .tag = cpu_to_le16(0x2),
+ .radar_type = cpu_to_le16(index),
+#define __req_field_u8(field) .field = pattern->field
+#define __req_field_u32(field) .field = cpu_to_le32(pattern->field)
+ __req_field_u8(enb),
+ __req_field_u8(stgr),
+ __req_field_u8(min_crpn),
+ __req_field_u8(max_crpn),
+ __req_field_u8(min_crpr),
+ __req_field_u8(min_pw),
+ __req_field_u8(max_pw),
+ __req_field_u32(min_pri),
+ __req_field_u32(max_pri),
+ __req_field_u8(min_crbn),
+ __req_field_u8(max_crbn),
+ __req_field_u8(min_stgpn),
+ __req_field_u8(max_stgpn),
+ __req_field_u8(min_stgpr),
+#undef __req_field_u8
+#undef __req_field_u32
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH,
+ &req, sizeof(req), true);
+}
+
+int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev)
+{
+ struct {
+ u8 pulse_num;
+ u8 rsv[3];
+ struct {
+ __le32 start_time;
+ __le16 width;
+ __le16 power;
+ } pattern[32];
+ } req = {
+ .pulse_num = dev->radar_pattern.n_pulses,
+ };
+ u32 start_time = ktime_to_ms(ktime_get_boottime());
+ int i;
+
+ if (dev->radar_pattern.n_pulses > ARRAY_SIZE(req.pattern))
+ return -EINVAL;
+
+ /* TODO: add some noise here */
+ for (i = 0; i < dev->radar_pattern.n_pulses; i++) {
+ u32 ts = start_time + i * dev->radar_pattern.period;
+
+ req.pattern[i].width = cpu_to_le16(dev->radar_pattern.width);
+ req.pattern[i].power = cpu_to_le16(dev->radar_pattern.power);
+ req.pattern[i].start_time = cpu_to_le32(ts);
+ }
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_PATTERN,
+ &req, sizeof(req), false);
+}
+
+static void mt7615_mcu_set_txpower_sku(struct mt7615_phy *phy, u8 *sku)
+{
+ struct mt76_phy *mphy = phy->mt76;
+ struct ieee80211_hw *hw = mphy->hw;
+ int n_chains = hweight8(mphy->antenna_mask);
+ int tx_power;
+ int i;
+
+ tx_power = hw->conf.power_level * 2 -
+ mt76_tx_power_nss_delta(n_chains);
+ mphy->txpower_cur = tx_power;
+
+ for (i = 0; i < MT_SKU_1SS_DELTA; i++)
+ sku[i] = tx_power;
+
+ for (i = 0; i < 4; i++) {
+ int delta = 0;
+
+ if (i < n_chains - 1)
+ delta = mt76_tx_power_nss_delta(n_chains) -
+ mt76_tx_power_nss_delta(i + 1);
+ sku[MT_SKU_1SS_DELTA + i] = delta;
+ }
+}
+
+static u8 mt7615_mcu_chan_bw(struct cfg80211_chan_def *chandef)
+{
+ static const u8 width_to_bw[] = {
+ [NL80211_CHAN_WIDTH_40] = CMD_CBW_40MHZ,
+ [NL80211_CHAN_WIDTH_80] = CMD_CBW_80MHZ,
+ [NL80211_CHAN_WIDTH_80P80] = CMD_CBW_8080MHZ,
+ [NL80211_CHAN_WIDTH_160] = CMD_CBW_160MHZ,
+ [NL80211_CHAN_WIDTH_5] = CMD_CBW_5MHZ,
+ [NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ,
+ [NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ,
+ [NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ,
+ };
+
+ if (chandef->width >= ARRAY_SIZE(width_to_bw))
+ return 0;
+
+ return width_to_bw[chandef->width];
+}
+
+int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
+{
+ struct mt7615_dev *dev = phy->dev;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2;
+ struct {
+ u8 control_chan;
+ u8 center_chan;
+ u8 bw;
+ u8 tx_streams;
+ u8 rx_streams_mask;
+ u8 switch_reason;
+ u8 band_idx;
+ /* for 80+80 only */
+ u8 center_chan2;
+ __le16 cac_case;
+ u8 channel_band;
+ u8 rsv0;
+ __le32 outband_freq;
+ u8 txpower_drop;
+ u8 rsv1[3];
+ u8 txpower_sku[53];
+ u8 rsv2[3];
+ } req = {
+ .control_chan = chandef->chan->hw_value,
+ .center_chan = ieee80211_frequency_to_channel(freq1),
+ .tx_streams = hweight8(phy->mt76->antenna_mask),
+ .rx_streams_mask = phy->chainmask,
+ .center_chan2 = ieee80211_frequency_to_channel(freq2),
+ };
+
+ if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
+ else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
+ chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
+ req.switch_reason = CH_SWITCH_DFS;
+ else
+ req.switch_reason = CH_SWITCH_NORMAL;
+
+ req.band_idx = phy != &dev->phy;
+ req.bw = mt7615_mcu_chan_bw(chandef);
+
+ if (mt76_testmode_enabled(&dev->mt76))
+ memset(req.txpower_sku, 0x3f, 49);
+ else
+ mt7615_mcu_set_txpower_sku(phy, req.txpower_sku);
+
+ return __mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true);
+}
+
+int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index)
+{
+ struct {
+ u8 action;
+ u8 rsv[3];
+ } req = {
+ .action = index,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_GET_TEMP, &req,
+ sizeof(req), true);
+}
+
+int mt7615_mcu_set_test_param(struct mt7615_dev *dev, u8 param, bool test_mode,
+ u32 val)
+{
+ struct {
+ u8 test_mode_en;
+ u8 param_idx;
+ u8 _rsv[2];
+
+ __le32 value;
+
+ u8 pad[8];
+ } req = {
+ .test_mode_en = test_mode,
+ .param_idx = param,
+ .value = cpu_to_le32(val),
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_ATE_CTRL, &req,
+ sizeof(req), false);
+}
+
+int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable)
+{
+ struct mt7615_dev *dev = phy->dev;
+ struct {
+ u8 format_id;
+ u8 sku_enable;
+ u8 band_idx;
+ u8 rsv;
+ } req = {
+ .format_id = 0,
+ .band_idx = phy != &dev->phy,
+ .sku_enable = enable,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_TX_POWER_FEATURE_CTRL, &req,
+ sizeof(req), true);
+}
+
+int mt7615_mcu_set_vif_ps(struct mt7615_dev *dev, struct ieee80211_vif *vif)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct {
+ u8 bss_idx;
+ u8 ps_state; /* 0: device awake
+ * 1: static power save
+ * 2: dynamic power saving
+ */
+ } req = {
+ .bss_idx = mvif->idx,
+ .ps_state = vif->bss_conf.ps ? 2 : 0,
+ };
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return -ENOTSUPP;
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_PS_PROFILE,
+ &req, sizeof(req), false);
+}
+
+int mt7615_mcu_set_channel_domain(struct mt7615_phy *phy)
+{
+ struct mt76_phy *mphy = phy->mt76;
+ struct mt7615_dev *dev = phy->dev;
+ struct mt7615_mcu_channel_domain {
+ __le32 country_code; /* regulatory_request.alpha2 */
+ u8 bw_2g; /* BW_20_40M 0
+ * BW_20M 1
+ * BW_20_40_80M 2
+ * BW_20_40_80_160M 3
+ * BW_20_40_80_8080M 4
+ */
+ u8 bw_5g;
+ __le16 pad;
+ u8 n_2ch;
+ u8 n_5ch;
+ __le16 pad2;
+ } __packed hdr = {
+ .bw_2g = 0,
+ .bw_5g = 3,
+ .n_2ch = mphy->sband_2g.sband.n_channels,
+ .n_5ch = mphy->sband_5g.sband.n_channels,
+ };
+ struct mt7615_mcu_chan {
+ __le16 hw_value;
+ __le16 pad;
+ __le32 flags;
+ } __packed;
+ int i, n_channels = hdr.n_2ch + hdr.n_5ch;
+ int len = sizeof(hdr) + n_channels * sizeof(struct mt7615_mcu_chan);
+ struct sk_buff *skb;
+
+ if (!mt7615_firmware_offload(dev))
+ return 0;
+
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_data(skb, &hdr, sizeof(hdr));
+
+ for (i = 0; i < n_channels; i++) {
+ struct ieee80211_channel *chan;
+ struct mt7615_mcu_chan channel;
+
+ if (i < hdr.n_2ch)
+ chan = &mphy->sband_2g.sband.channels[i];
+ else
+ chan = &mphy->sband_5g.sband.channels[i - hdr.n_2ch];
+
+ channel.hw_value = cpu_to_le16(chan->hw_value);
+ channel.flags = cpu_to_le32(chan->flags);
+ channel.pad = 0;
+
+ skb_put_data(skb, &channel, sizeof(channel));
+ }
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_CMD_SET_CHAN_DOMAIN, false);
+}
+
+#define MT7615_SCAN_CHANNEL_TIME 60
+int mt7615_mcu_hw_scan(struct mt7615_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *scan_req)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct cfg80211_scan_request *sreq = &scan_req->req;
+ int n_ssids = 0, err, i, duration = MT7615_SCAN_CHANNEL_TIME;
+ int ext_channels_num = max_t(int, sreq->n_channels - 32, 0);
+ struct ieee80211_channel **scan_list = sreq->channels;
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ struct mt7615_mcu_scan_channel *chan;
+ struct mt7615_hw_scan_req *req;
+ struct sk_buff *skb;
+
+ /* fall-back to sw-scan */
+ if (!mt7615_firmware_offload(dev))
+ return 1;
+
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(*req));
+ if (!skb)
+ return -ENOMEM;
+
+ set_bit(MT76_HW_SCANNING, &phy->mt76->state);
+ mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f;
+
+ req = (struct mt7615_hw_scan_req *)skb_put(skb, sizeof(*req));
+
+ req->seq_num = mvif->scan_seq_num | ext_phy << 7;
+ req->bss_idx = mvif->idx;
+ req->scan_type = sreq->n_ssids ? 1 : 0;
+ req->probe_req_num = sreq->n_ssids ? 2 : 0;
+ req->version = 1;
+
+ for (i = 0; i < sreq->n_ssids; i++) {
+ if (!sreq->ssids[i].ssid_len)
+ continue;
+
+ req->ssids[i].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len);
+ memcpy(req->ssids[i].ssid, sreq->ssids[i].ssid,
+ sreq->ssids[i].ssid_len);
+ n_ssids++;
+ }
+ req->ssid_type = n_ssids ? BIT(2) : BIT(0);
+ req->ssid_type_ext = n_ssids ? BIT(0) : 0;
+ req->ssids_num = n_ssids;
+
+ /* increase channel time for passive scan */
+ if (!sreq->n_ssids)
+ duration *= 2;
+ req->timeout_value = cpu_to_le16(sreq->n_channels * duration);
+ req->channel_min_dwell_time = cpu_to_le16(duration);
+ req->channel_dwell_time = cpu_to_le16(duration);
+
+ req->channels_num = min_t(u8, sreq->n_channels, 32);
+ req->ext_channels_num = min_t(u8, ext_channels_num, 32);
+ for (i = 0; i < req->channels_num + req->ext_channels_num; i++) {
+ if (i >= 32)
+ chan = &req->ext_channels[i - 32];
+ else
+ chan = &req->channels[i];
+
+ chan->band = scan_list[i]->band == NL80211_BAND_2GHZ ? 1 : 2;
+ chan->channel_num = scan_list[i]->hw_value;
+ }
+ req->channel_type = sreq->n_channels ? 4 : 0;
+
+ if (sreq->ie_len > 0) {
+ memcpy(req->ies, sreq->ie, sreq->ie_len);
+ req->ies_len = cpu_to_le16(sreq->ie_len);
+ }
+
+ memcpy(req->bssid, sreq->bssid, ETH_ALEN);
+ if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ get_random_mask_addr(req->random_mac, sreq->mac_addr,
+ sreq->mac_addr_mask);
+ req->scan_func = 1;
+ }
+
+ err = __mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_CMD_START_HW_SCAN,
+ false);
+ if (err < 0)
+ clear_bit(MT76_HW_SCANNING, &phy->mt76->state);
+
+ return err;
+}
+
+int mt7615_mcu_cancel_hw_scan(struct mt7615_phy *phy,
+ struct ieee80211_vif *vif)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_dev *dev = phy->dev;
+ struct {
+ u8 seq_num;
+ u8 is_ext_channel;
+ u8 rsv[2];
+ } __packed req = {
+ .seq_num = mvif->scan_seq_num,
+ };
+
+ if (test_and_clear_bit(MT76_HW_SCANNING, &phy->mt76->state)) {
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ ieee80211_scan_completed(phy->mt76->hw, &info);
+ }
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_CANCEL_HW_SCAN, &req,
+ sizeof(req), false);
+}
+
+int mt7615_mcu_sched_scan_req(struct mt7615_phy *phy,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *sreq)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct ieee80211_channel **scan_list = sreq->channels;
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ struct mt7615_mcu_scan_channel *chan;
+ struct mt7615_sched_scan_req *req;
+ struct cfg80211_match_set *match;
+ struct cfg80211_ssid *ssid;
+ struct sk_buff *skb;
+ int i;
+
+ if (!mt7615_firmware_offload(dev))
+ return -ENOTSUPP;
+
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
+ sizeof(*req) + sreq->ie_len);
+ if (!skb)
+ return -ENOMEM;
+
+ mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f;
+
+ req = (struct mt7615_sched_scan_req *)skb_put(skb, sizeof(*req));
+ req->version = 1;
+ req->seq_num = mvif->scan_seq_num | ext_phy << 7;
+
+ if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ get_random_mask_addr(req->random_mac, sreq->mac_addr,
+ sreq->mac_addr_mask);
+ req->scan_func = 1;
+ }
+
+ req->ssids_num = sreq->n_ssids;
+ for (i = 0; i < req->ssids_num; i++) {
+ ssid = &sreq->ssids[i];
+ memcpy(req->ssids[i].ssid, ssid->ssid, ssid->ssid_len);
+ req->ssids[i].ssid_len = cpu_to_le32(ssid->ssid_len);
+ }
+
+ req->match_num = sreq->n_match_sets;
+ for (i = 0; i < req->match_num; i++) {
+ match = &sreq->match_sets[i];
+ memcpy(req->match[i].ssid, match->ssid.ssid,
+ match->ssid.ssid_len);
+ req->match[i].rssi_th = cpu_to_le32(match->rssi_thold);
+ req->match[i].ssid_len = match->ssid.ssid_len;
+ }
+
+ req->channel_type = sreq->n_channels ? 4 : 0;
+ req->channels_num = min_t(u8, sreq->n_channels, 64);
+ for (i = 0; i < req->channels_num; i++) {
+ chan = &req->channels[i];
+ chan->band = scan_list[i]->band == NL80211_BAND_2GHZ ? 1 : 2;
+ chan->channel_num = scan_list[i]->hw_value;
+ }
+
+ req->intervals_num = sreq->n_scan_plans;
+ for (i = 0; i < req->intervals_num; i++)
+ req->intervals[i] = cpu_to_le16(sreq->scan_plans[i].interval);
+
+ if (sreq->ie_len > 0) {
+ req->ie_len = cpu_to_le16(sreq->ie_len);
+ memcpy(skb_put(skb, sreq->ie_len), sreq->ie, sreq->ie_len);
+ }
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_CMD_SCHED_SCAN_REQ, false);
+}
+
+int mt7615_mcu_sched_scan_enable(struct mt7615_phy *phy,
+ struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct mt7615_dev *dev = phy->dev;
+ struct {
+ u8 active; /* 0: enabled 1: disabled */
+ u8 rsv[3];
+ } __packed req = {
+ .active = !enable,
+ };
+
+ if (!mt7615_firmware_offload(dev))
+ return -ENOTSUPP;
+
+ if (enable)
+ set_bit(MT76_HW_SCHED_SCANNING, &phy->mt76->state);
+ else
+ clear_bit(MT76_HW_SCHED_SCANNING, &phy->mt76->state);
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SCHED_SCAN_ENABLE,
+ &req, sizeof(req), false);
+}
+
+static int mt7615_find_freq_idx(const u16 *freqs, int n_freqs, u16 cur)
+{
+ int i;
+
+ for (i = 0; i < n_freqs; i++)
+ if (cur == freqs[i])
+ return i;
+
+ return -1;
+}
+
+static int mt7615_dcoc_freq_idx(u16 freq, u8 bw)
+{
+ static const u16 freq_list[] = {
+ 4980, 5805, 5905, 5190,
+ 5230, 5270, 5310, 5350,
+ 5390, 5430, 5470, 5510,
+ 5550, 5590, 5630, 5670,
+ 5710, 5755, 5795, 5835,
+ 5875, 5210, 5290, 5370,
+ 5450, 5530, 5610, 5690,
+ 5775, 5855
+ };
+ static const u16 freq_bw40[] = {
+ 5190, 5230, 5270, 5310,
+ 5350, 5390, 5430, 5470,
+ 5510, 5550, 5590, 5630,
+ 5670, 5710, 5755, 5795,
+ 5835, 5875
+ };
+ int offset_2g = ARRAY_SIZE(freq_list);
+ int idx;
+
+ if (freq < 4000) {
+ if (freq < 2427)
+ return offset_2g;
+ if (freq < 2442)
+ return offset_2g + 1;
+ if (freq < 2457)
+ return offset_2g + 2;
+
+ return offset_2g + 3;
+ }
+
+ switch (bw) {
+ case NL80211_CHAN_WIDTH_80:
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ break;
+ default:
+ idx = mt7615_find_freq_idx(freq_bw40, ARRAY_SIZE(freq_bw40),
+ freq + 10);
+ if (idx >= 0) {
+ freq = freq_bw40[idx];
+ break;
+ }
+
+ idx = mt7615_find_freq_idx(freq_bw40, ARRAY_SIZE(freq_bw40),
+ freq - 10);
+ if (idx >= 0) {
+ freq = freq_bw40[idx];
+ break;
+ }
+ fallthrough;
+ case NL80211_CHAN_WIDTH_40:
+ idx = mt7615_find_freq_idx(freq_bw40, ARRAY_SIZE(freq_bw40),
+ freq);
+ if (idx >= 0)
+ break;
+
+ return -1;
+
+ }
+
+ return mt7615_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), freq);
+}
+
+int mt7615_mcu_apply_rx_dcoc(struct mt7615_phy *phy)
+{
+ struct mt7615_dev *dev = phy->dev;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ int freq2 = chandef->center_freq2;
+ int ret;
+ struct {
+ u8 direction;
+ u8 runtime_calibration;
+ u8 _rsv[2];
+
+ __le16 center_freq;
+ u8 bw;
+ u8 band;
+ u8 is_freq2;
+ u8 success;
+ u8 dbdc_en;
+
+ u8 _rsv2;
+
+ struct {
+ __le32 sx0_i_lna[4];
+ __le32 sx0_q_lna[4];
+
+ __le32 sx2_i_lna[4];
+ __le32 sx2_q_lna[4];
+ } dcoc_data[4];
+ } req = {
+ .direction = 1,
+
+ .bw = mt7615_mcu_chan_bw(chandef),
+ .band = chandef->center_freq1 > 4000,
+ .dbdc_en = !!dev->mt76.phy2,
+ };
+ u16 center_freq = chandef->center_freq1;
+ int freq_idx;
+ u8 *eep = dev->mt76.eeprom.data;
+
+ if (!(eep[MT_EE_CALDATA_FLASH] & MT_EE_CALDATA_FLASH_RX_CAL))
+ return 0;
+
+ if (chandef->width == NL80211_CHAN_WIDTH_160) {
+ freq2 = center_freq + 40;
+ center_freq -= 40;
+ }
+
+again:
+ req.runtime_calibration = 1;
+ freq_idx = mt7615_dcoc_freq_idx(center_freq, chandef->width);
+ if (freq_idx < 0)
+ goto out;
+
+ memcpy(req.dcoc_data, eep + MT7615_EEPROM_DCOC_OFFSET +
+ freq_idx * MT7615_EEPROM_DCOC_SIZE,
+ sizeof(req.dcoc_data));
+ req.runtime_calibration = 0;
+
+out:
+ req.center_freq = cpu_to_le16(center_freq);
+ ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RXDCOC_CAL, &req,
+ sizeof(req), true);
+
+ if ((chandef->width == NL80211_CHAN_WIDTH_80P80 ||
+ chandef->width == NL80211_CHAN_WIDTH_160) && !req.is_freq2) {
+ req.is_freq2 = true;
+ center_freq = freq2;
+ goto again;
+ }
+
+ return ret;
+}
+
+static int mt7615_dpd_freq_idx(u16 freq, u8 bw)
+{
+ static const u16 freq_list[] = {
+ 4920, 4940, 4960, 4980,
+ 5040, 5060, 5080, 5180,
+ 5200, 5220, 5240, 5260,
+ 5280, 5300, 5320, 5340,
+ 5360, 5380, 5400, 5420,
+ 5440, 5460, 5480, 5500,
+ 5520, 5540, 5560, 5580,
+ 5600, 5620, 5640, 5660,
+ 5680, 5700, 5720, 5745,
+ 5765, 5785, 5805, 5825,
+ 5845, 5865, 5885, 5905
+ };
+ int offset_2g = ARRAY_SIZE(freq_list);
+ int idx;
+
+ if (freq < 4000) {
+ if (freq < 2432)
+ return offset_2g;
+ if (freq < 2457)
+ return offset_2g + 1;
+
+ return offset_2g + 2;
+ }
+
+ if (bw != NL80211_CHAN_WIDTH_20) {
+ idx = mt7615_find_freq_idx(freq_list, ARRAY_SIZE(freq_list),
+ freq + 10);
+ if (idx >= 0)
+ return idx;
+
+ idx = mt7615_find_freq_idx(freq_list, ARRAY_SIZE(freq_list),
+ freq - 10);
+ if (idx >= 0)
+ return idx;
+ }
+
+ return mt7615_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), freq);
+}
+
+
+int mt7615_mcu_apply_tx_dpd(struct mt7615_phy *phy)
+{
+ struct mt7615_dev *dev = phy->dev;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ int freq2 = chandef->center_freq2;
+ int ret;
+ struct {
+ u8 direction;
+ u8 runtime_calibration;
+ u8 _rsv[2];
+
+ __le16 center_freq;
+ u8 bw;
+ u8 band;
+ u8 is_freq2;
+ u8 success;
+ u8 dbdc_en;
+
+ u8 _rsv2;
+
+ struct {
+ struct {
+ u32 dpd_g0;
+ u8 data[32];
+ } wf0, wf1;
+
+ struct {
+ u32 dpd_g0_prim;
+ u32 dpd_g0_sec;
+ u8 data_prim[32];
+ u8 data_sec[32];
+ } wf2, wf3;
+ } dpd_data;
+ } req = {
+ .direction = 1,
+
+ .bw = mt7615_mcu_chan_bw(chandef),
+ .band = chandef->center_freq1 > 4000,
+ .dbdc_en = !!dev->mt76.phy2,
+ };
+ u16 center_freq = chandef->center_freq1;
+ int freq_idx;
+ u8 *eep = dev->mt76.eeprom.data;
+
+ if (!(eep[MT_EE_CALDATA_FLASH] & MT_EE_CALDATA_FLASH_TX_DPD))
+ return 0;
+
+ if (chandef->width == NL80211_CHAN_WIDTH_160) {
+ freq2 = center_freq + 40;
+ center_freq -= 40;
+ }
+
+again:
+ req.runtime_calibration = 1;
+ freq_idx = mt7615_dpd_freq_idx(center_freq, chandef->width);
+ if (freq_idx < 0)
+ goto out;
+
+ memcpy(&req.dpd_data, eep + MT7615_EEPROM_TXDPD_OFFSET +
+ freq_idx * MT7615_EEPROM_TXDPD_SIZE,
+ sizeof(req.dpd_data));
+ req.runtime_calibration = 0;
+
+out:
+ req.center_freq = cpu_to_le16(center_freq);
+ ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_TXDPD_CAL, &req,
+ sizeof(req), true);
+
+ if ((chandef->width == NL80211_CHAN_WIDTH_80P80 ||
+ chandef->width == NL80211_CHAN_WIDTH_160) && !req.is_freq2) {
+ req.is_freq2 = true;
+ center_freq = freq2;
+ goto again;
+ }
+
+ return ret;
+}
+
+int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct {
+ u8 bss_idx;
+ u8 dtim_period;
+ __le16 aid;
+ __le16 bcn_interval;
+ __le16 atim_window;
+ u8 uapsd;
+ u8 bmc_delivered_ac;
+ u8 bmc_triggered_ac;
+ u8 pad;
+ } req = {
+ .bss_idx = mvif->idx,
+ .aid = cpu_to_le16(vif->bss_conf.aid),
+ .dtim_period = vif->bss_conf.dtim_period,
+ .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
+ };
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } req_hdr = {
+ .bss_idx = mvif->idx,
+ };
+ int err;
+
+ if (vif->type != NL80211_IFTYPE_STATION ||
+ !mt7615_firmware_offload(dev))
+ return -ENOTSUPP;
+
+ err = __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_BSS_ABORT,
+ &req_hdr, sizeof(req_hdr), false);
+ if (err < 0 || !enable)
+ return err;
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_BSS_CONNECTED,
+ &req, sizeof(req), false);
+}
+
+#ifdef CONFIG_PM
+int mt7615_mcu_set_hif_suspend(struct mt7615_dev *dev, bool suspend)
+{
+ struct {
+ struct {
+ u8 hif_type; /* 0x0: HIF_SDIO
+ * 0x1: HIF_USB
+ * 0x2: HIF_PCIE
+ */
+ u8 pad[3];
+ } __packed hdr;
+ struct hif_suspend_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 suspend;
+ } __packed hif_suspend;
+ } req = {
+ .hif_suspend = {
+ .tag = cpu_to_le16(0), /* 0: UNI_HIF_CTRL_BASIC */
+ .len = cpu_to_le16(sizeof(struct hif_suspend_tlv)),
+ .suspend = suspend,
+ },
+ };
+
+ if (mt76_is_mmio(&dev->mt76))
+ req.hdr.hif_type = 2;
+ else if (mt76_is_usb(&dev->mt76))
+ req.hdr.hif_type = 1;
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_HIF_CTRL,
+ &req, sizeof(req), true);
+}
+EXPORT_SYMBOL_GPL(mt7615_mcu_set_hif_suspend);
+
+static int
+mt7615_mcu_set_wow_ctrl(struct mt7615_phy *phy, struct ieee80211_vif *vif,
+ bool suspend, struct cfg80211_wowlan *wowlan)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_dev *dev = phy->dev;
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct mt7615_wow_ctrl_tlv wow_ctrl_tlv;
+ } req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .wow_ctrl_tlv = {
+ .tag = cpu_to_le16(UNI_SUSPEND_WOW_CTRL),
+ .len = cpu_to_le16(sizeof(struct mt7615_wow_ctrl_tlv)),
+ .cmd = suspend ? 1 : 2,
+ },
+ };
+
+ if (wowlan->magic_pkt)
+ req.wow_ctrl_tlv.trigger |= BIT(0);
+ if (wowlan->disconnect)
+ req.wow_ctrl_tlv.trigger |= BIT(2);
+ if (wowlan->nd_config) {
+ mt7615_mcu_sched_scan_req(phy, vif, wowlan->nd_config);
+ req.wow_ctrl_tlv.trigger |= BIT(5);
+ mt7615_mcu_sched_scan_enable(phy, vif, suspend);
+ }
+
+ if (mt76_is_mmio(&dev->mt76))
+ req.wow_ctrl_tlv.wakeup_hif = 2;
+ else if (mt76_is_usb(&dev->mt76))
+ req.wow_ctrl_tlv.wakeup_hif = 1;
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_SUSPEND,
+ &req, sizeof(req), true);
+}
+
+static int
+mt7615_mcu_set_wow_pattern(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif,
+ u8 index, bool enable,
+ struct cfg80211_pkt_pattern *pattern)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_wow_pattern_tlv *ptlv;
+ struct sk_buff *skb;
+ struct req_hdr {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr = {
+ .bss_idx = mvif->idx,
+ };
+
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
+ sizeof(hdr) + sizeof(*ptlv));
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_data(skb, &hdr, sizeof(hdr));
+ ptlv = (struct mt7615_wow_pattern_tlv *)skb_put(skb, sizeof(*ptlv));
+ ptlv->tag = cpu_to_le16(UNI_SUSPEND_WOW_PATTERN);
+ ptlv->len = cpu_to_le16(sizeof(*ptlv));
+ ptlv->data_len = pattern->pattern_len;
+ ptlv->enable = enable;
+ ptlv->index = index;
+
+ memcpy(ptlv->pattern, pattern->pattern, pattern->pattern_len);
+ memcpy(ptlv->mask, pattern->mask, pattern->pattern_len / 8);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_UNI_CMD_SUSPEND, true);
+}
+
+static int
+mt7615_mcu_set_suspend_mode(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif,
+ bool enable, u8 mdtim, bool wow_suspend)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct mt7615_suspend_tlv suspend_tlv;
+ } req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .suspend_tlv = {
+ .tag = cpu_to_le16(UNI_SUSPEND_MODE_SETTING),
+ .len = cpu_to_le16(sizeof(struct mt7615_suspend_tlv)),
+ .enable = enable,
+ .mdtim = mdtim,
+ .wow_suspend = wow_suspend,
+ },
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_SUSPEND,
+ &req, sizeof(req), true);
+}
+
+static int
+mt7615_mcu_set_gtk_rekey(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif,
+ bool suspend)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct mt7615_gtk_rekey_tlv gtk_tlv;
+ } __packed req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .gtk_tlv = {
+ .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_GTK_REKEY),
+ .len = cpu_to_le16(sizeof(struct mt7615_gtk_rekey_tlv)),
+ .rekey_mode = !suspend,
+ },
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_OFFLOAD,
+ &req, sizeof(req), true);
+}
+
+static int
+mt7615_mcu_set_arp_filter(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ bool suspend)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct mt7615_arpns_tlv arpns;
+ } req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .arpns = {
+ .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP),
+ .len = cpu_to_le16(sizeof(struct mt7615_arpns_tlv)),
+ .mode = suspend,
+ },
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_OFFLOAD,
+ &req, sizeof(req), true);
+}
+
+void mt7615_mcu_set_suspend_iter(void *priv, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct mt7615_phy *phy = priv;
+ bool suspend = test_bit(MT76_STATE_SUSPEND, &phy->mt76->state);
+ struct ieee80211_hw *hw = phy->mt76->hw;
+ struct cfg80211_wowlan *wowlan = hw->wiphy->wowlan_config;
+ int i;
+
+ mt7615_mcu_set_bss_pm(phy->dev, vif, suspend);
+
+ mt7615_mcu_set_gtk_rekey(phy->dev, vif, suspend);
+ mt7615_mcu_set_arp_filter(phy->dev, vif, suspend);
+
+ mt7615_mcu_set_suspend_mode(phy->dev, vif, suspend, 1, true);
+
+ for (i = 0; i < wowlan->n_patterns; i++)
+ mt7615_mcu_set_wow_pattern(phy->dev, vif, i, suspend,
+ &wowlan->patterns[i]);
+ mt7615_mcu_set_wow_ctrl(phy, vif, suspend, wowlan);
+}
+
+static void
+mt7615_mcu_key_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct ieee80211_key_conf *key,
+ void *data)
+{
+ struct mt7615_gtk_rekey_tlv *gtk_tlv = data;
+ u32 cipher;
+
+ if (key->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
+ key->cipher != WLAN_CIPHER_SUITE_CCMP &&
+ key->cipher != WLAN_CIPHER_SUITE_TKIP)
+ return;
+
+ if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
+ gtk_tlv->proto = cpu_to_le32(NL80211_WPA_VERSION_1);
+ cipher = BIT(3);
+ } else {
+ gtk_tlv->proto = cpu_to_le32(NL80211_WPA_VERSION_2);
+ cipher = BIT(4);
+ }
+
+ /* we are assuming here to have a single pairwise key */
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ gtk_tlv->pairwise_cipher = cpu_to_le32(cipher);
+ gtk_tlv->group_cipher = cpu_to_le32(cipher);
+ gtk_tlv->keyid = key->keyidx;
+ }
+}
+
+int mt7615_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *key)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_gtk_rekey_tlv *gtk_tlv;
+ struct sk_buff *skb;
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr = {
+ .bss_idx = mvif->idx,
+ };
+
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
+ sizeof(hdr) + sizeof(*gtk_tlv));
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_data(skb, &hdr, sizeof(hdr));
+ gtk_tlv = (struct mt7615_gtk_rekey_tlv *)skb_put(skb,
+ sizeof(*gtk_tlv));
+ gtk_tlv->tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_GTK_REKEY);
+ gtk_tlv->len = cpu_to_le16(sizeof(*gtk_tlv));
+ gtk_tlv->rekey_mode = 2;
+ gtk_tlv->option = 1;
+
+ rcu_read_lock();
+ ieee80211_iter_keys_rcu(hw, vif, mt7615_mcu_key_iter, gtk_tlv);
+ rcu_read_unlock();
+
+ memcpy(gtk_tlv->kek, key->kek, NL80211_KEK_LEN);
+ memcpy(gtk_tlv->kck, key->kck, NL80211_KCK_LEN);
+ memcpy(gtk_tlv->replay_ctr, key->replay_ctr, NL80211_REPLAY_CTR_LEN);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_UNI_CMD_OFFLOAD, true);
+}
+#endif /* CONFIG_PM */
+
+int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan, int duration)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_dev *dev = phy->dev;
+ struct mt7615_roc_tlv req = {
+ .bss_idx = mvif->idx,
+ .active = !chan,
+ .max_interval = cpu_to_le32(duration),
+ .primary_chan = chan ? chan->hw_value : 0,
+ .band = chan ? chan->band : 0,
+ .req_type = 2,
+ };
+
+ phy->roc_grant = false;
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_ROC, &req,
+ sizeof(req), false);
+}
+
+int mt7615_mcu_update_arp_filter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct sk_buff *skb;
+ int i, len = min_t(int, info->arp_addr_cnt,
+ IEEE80211_BSS_ARP_ADDR_LIST_LEN);
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct mt7615_arpns_tlv arp;
+ } req_hdr = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .arp = {
+ .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP),
+ .len = cpu_to_le16(sizeof(struct mt7615_arpns_tlv)),
+ .ips_num = len,
+ .mode = 2, /* update */
+ .option = 1,
+ },
+ };
+
+ if (!mt7615_firmware_offload(dev))
+ return 0;
+
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
+ sizeof(req_hdr) + len * sizeof(__be32));
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_data(skb, &req_hdr, sizeof(req_hdr));
+ for (i = 0; i < len; i++) {
+ u8 *addr = (u8 *)skb_put(skb, sizeof(__be32));
+
+ memcpy(addr, &info->arp_addr_list[i], sizeof(__be32));
+ }
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_UNI_CMD_OFFLOAD, true);
+}
+
+int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ int ct_window = vif->bss_conf.p2p_noa_attr.oppps_ctwindow;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct {
+ __le32 ct_win;
+ u8 bss_idx;
+ u8 rsv[3];
+ } __packed req = {
+ .ct_win = cpu_to_le32(ct_window),
+ .bss_idx = mvif->idx,
+ };
+
+ if (!mt7615_firmware_offload(dev))
+ return -ENOTSUPP;
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_P2P_OPPPS,
+ &req, sizeof(req), false);
+}
+
+u32 mt7615_mcu_reg_rr(struct mt76_dev *dev, u32 offset)
+{
+ struct {
+ __le32 addr;
+ __le32 val;
+ } __packed req = {
+ .addr = cpu_to_le32(offset),
+ };
+
+ return __mt76_mcu_send_msg(dev, MCU_CMD_REG_READ,
+ &req, sizeof(req), true);
+}
+EXPORT_SYMBOL_GPL(mt7615_mcu_reg_rr);
+
+void mt7615_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val)
+{
+ struct {
+ __le32 addr;
+ __le32 val;
+ } __packed req = {
+ .addr = cpu_to_le32(offset),
+ .val = cpu_to_le32(val),
+ };
+
+ __mt76_mcu_send_msg(dev, MCU_CMD_REG_WRITE,
+ &req, sizeof(req), false);
+}
+EXPORT_SYMBOL_GPL(mt7615_mcu_reg_wr);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
new file mode 100644
index 000000000..7b856e9ee
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
@@ -0,0 +1,1060 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2019 MediaTek Inc. */
+
+#ifndef __MT7615_MCU_H
+#define __MT7615_MCU_H
+
+struct mt7615_mcu_txd {
+ __le32 txd[8];
+
+ __le16 len;
+ __le16 pq_id;
+
+ u8 cid;
+ u8 pkt_type;
+ u8 set_query; /* FW don't care */
+ u8 seq;
+
+ u8 uc_d2b0_rev;
+ u8 ext_cid;
+ u8 s2d_index;
+ u8 ext_cid_ack;
+
+ u32 reserved[5];
+} __packed __aligned(4);
+
+/**
+ * struct mt7615_uni_txd - mcu command descriptor for firmware v3
+ * @txd: hardware descriptor
+ * @len: total length not including txd
+ * @cid: command identifier
+ * @pkt_type: must be 0xa0 (cmd packet by long format)
+ * @frag_n: fragment number
+ * @seq: sequence number
+ * @checksum: 0 mean there is no checksum
+ * @s2d_index: index for command source and destination
+ * Definition | value | note
+ * CMD_S2D_IDX_H2N | 0x00 | command from HOST to WM
+ * CMD_S2D_IDX_C2N | 0x01 | command from WA to WM
+ * CMD_S2D_IDX_H2C | 0x02 | command from HOST to WA
+ * CMD_S2D_IDX_H2N_AND_H2C | 0x03 | command from HOST to WA and WM
+ *
+ * @option: command option
+ * BIT[0]: UNI_CMD_OPT_BIT_ACK
+ * set to 1 to request a fw reply
+ * if UNI_CMD_OPT_BIT_0_ACK is set and UNI_CMD_OPT_BIT_2_SET_QUERY
+ * is set, mcu firmware will send response event EID = 0x01
+ * (UNI_EVENT_ID_CMD_RESULT) to the host.
+ * BIT[1]: UNI_CMD_OPT_BIT_UNI_CMD
+ * 0: original command
+ * 1: unified command
+ * BIT[2]: UNI_CMD_OPT_BIT_SET_QUERY
+ * 0: QUERY command
+ * 1: SET command
+ */
+struct mt7615_uni_txd {
+ __le32 txd[8];
+
+ /* DW1 */
+ __le16 len;
+ __le16 cid;
+
+ /* DW2 */
+ u8 reserved;
+ u8 pkt_type;
+ u8 frag_n;
+ u8 seq;
+
+ /* DW3 */
+ __le16 checksum;
+ u8 s2d_index;
+ u8 option;
+
+ /* DW4 */
+ u8 reserved2[4];
+} __packed __aligned(4);
+
+/* event table */
+enum {
+ MCU_EVENT_TARGET_ADDRESS_LEN = 0x01,
+ MCU_EVENT_FW_START = 0x01,
+ MCU_EVENT_GENERIC = 0x01,
+ MCU_EVENT_ACCESS_REG = 0x02,
+ MCU_EVENT_MT_PATCH_SEM = 0x04,
+ MCU_EVENT_REG_ACCESS = 0x05,
+ MCU_EVENT_SCAN_DONE = 0x0d,
+ MCU_EVENT_ROC = 0x10,
+ MCU_EVENT_BSS_ABSENCE = 0x11,
+ MCU_EVENT_BSS_BEACON_LOSS = 0x13,
+ MCU_EVENT_CH_PRIVILEGE = 0x18,
+ MCU_EVENT_SCHED_SCAN_DONE = 0x23,
+ MCU_EVENT_EXT = 0xed,
+ MCU_EVENT_RESTART_DL = 0xef,
+};
+
+/* ext event table */
+enum {
+ MCU_EXT_EVENT_PS_SYNC = 0x5,
+ MCU_EXT_EVENT_FW_LOG_2_HOST = 0x13,
+ MCU_EXT_EVENT_THERMAL_PROTECT = 0x22,
+ MCU_EXT_EVENT_ASSERT_DUMP = 0x23,
+ MCU_EXT_EVENT_RDD_REPORT = 0x3a,
+ MCU_EXT_EVENT_CSA_NOTIFY = 0x4f,
+};
+
+enum {
+ MT_SKU_CCK_1_2 = 0,
+ MT_SKU_CCK_55_11,
+ MT_SKU_OFDM_6_9,
+ MT_SKU_OFDM_12_18,
+ MT_SKU_OFDM_24_36,
+ MT_SKU_OFDM_48,
+ MT_SKU_OFDM_54,
+ MT_SKU_HT20_0_8,
+ MT_SKU_HT20_32,
+ MT_SKU_HT20_1_2_9_10,
+ MT_SKU_HT20_3_4_11_12,
+ MT_SKU_HT20_5_13,
+ MT_SKU_HT20_6_14,
+ MT_SKU_HT20_7_15,
+ MT_SKU_HT40_0_8,
+ MT_SKU_HT40_32,
+ MT_SKU_HT40_1_2_9_10,
+ MT_SKU_HT40_3_4_11_12,
+ MT_SKU_HT40_5_13,
+ MT_SKU_HT40_6_14,
+ MT_SKU_HT40_7_15,
+ MT_SKU_VHT20_0,
+ MT_SKU_VHT20_1_2,
+ MT_SKU_VHT20_3_4,
+ MT_SKU_VHT20_5_6,
+ MT_SKU_VHT20_7,
+ MT_SKU_VHT20_8,
+ MT_SKU_VHT20_9,
+ MT_SKU_VHT40_0,
+ MT_SKU_VHT40_1_2,
+ MT_SKU_VHT40_3_4,
+ MT_SKU_VHT40_5_6,
+ MT_SKU_VHT40_7,
+ MT_SKU_VHT40_8,
+ MT_SKU_VHT40_9,
+ MT_SKU_VHT80_0,
+ MT_SKU_VHT80_1_2,
+ MT_SKU_VHT80_3_4,
+ MT_SKU_VHT80_5_6,
+ MT_SKU_VHT80_7,
+ MT_SKU_VHT80_8,
+ MT_SKU_VHT80_9,
+ MT_SKU_VHT160_0,
+ MT_SKU_VHT160_1_2,
+ MT_SKU_VHT160_3_4,
+ MT_SKU_VHT160_5_6,
+ MT_SKU_VHT160_7,
+ MT_SKU_VHT160_8,
+ MT_SKU_VHT160_9,
+ MT_SKU_1SS_DELTA,
+ MT_SKU_2SS_DELTA,
+ MT_SKU_3SS_DELTA,
+ MT_SKU_4SS_DELTA,
+};
+
+struct mt7615_mcu_rxd {
+ __le32 rxd[4];
+
+ __le16 len;
+ __le16 pkt_type_id;
+
+ u8 eid;
+ u8 seq;
+ __le16 __rsv;
+
+ u8 ext_eid;
+ u8 __rsv1[2];
+ u8 s2d_index;
+};
+
+struct mt7615_mcu_rdd_report {
+ struct mt7615_mcu_rxd rxd;
+
+ u8 idx;
+ u8 long_detected;
+ u8 constant_prf_detected;
+ u8 staggered_prf_detected;
+ u8 radar_type_idx;
+ u8 periodic_pulse_num;
+ u8 long_pulse_num;
+ u8 hw_pulse_num;
+
+ u8 out_lpn;
+ u8 out_spn;
+ u8 out_crpn;
+ u8 out_crpw;
+ u8 out_crbn;
+ u8 out_stgpn;
+ u8 out_stgpw;
+
+ u8 _rsv[2];
+
+ __le32 out_pri_const;
+ __le32 out_pri_stg[3];
+
+ struct {
+ __le32 start;
+ __le16 pulse_width;
+ __le16 pulse_power;
+ } long_pulse[32];
+
+ struct {
+ __le32 start;
+ __le16 pulse_width;
+ __le16 pulse_power;
+ } periodic_pulse[32];
+
+ struct {
+ __le32 start;
+ __le16 pulse_width;
+ __le16 pulse_power;
+ u8 sc_pass;
+ u8 sw_reset;
+ } hw_pulse[32];
+};
+
+#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10))
+#define MCU_PKT_ID 0xa0
+
+enum {
+ MCU_Q_QUERY,
+ MCU_Q_SET,
+ MCU_Q_RESERVED,
+ MCU_Q_NA
+};
+
+enum {
+ MCU_S2D_H2N,
+ MCU_S2D_C2N,
+ MCU_S2D_H2C,
+ MCU_S2D_H2CN
+};
+
+#define MCU_FW_PREFIX BIT(31)
+#define MCU_UNI_PREFIX BIT(30)
+#define MCU_CE_PREFIX BIT(29)
+#define MCU_QUERY_PREFIX BIT(28)
+#define MCU_CMD_MASK ~(MCU_FW_PREFIX | MCU_UNI_PREFIX | \
+ MCU_CE_PREFIX | MCU_QUERY_PREFIX)
+
+#define MCU_QUERY_MASK BIT(16)
+
+enum {
+ MCU_CMD_TARGET_ADDRESS_LEN_REQ = MCU_FW_PREFIX | 0x01,
+ MCU_CMD_FW_START_REQ = MCU_FW_PREFIX | 0x02,
+ MCU_CMD_INIT_ACCESS_REG = 0x3,
+ MCU_CMD_PATCH_START_REQ = 0x05,
+ MCU_CMD_PATCH_FINISH_REQ = MCU_FW_PREFIX | 0x07,
+ MCU_CMD_PATCH_SEM_CONTROL = MCU_FW_PREFIX | 0x10,
+ MCU_CMD_EXT_CID = 0xED,
+ MCU_CMD_FW_SCATTER = MCU_FW_PREFIX | 0xEE,
+ MCU_CMD_RESTART_DL_REQ = MCU_FW_PREFIX | 0xEF,
+};
+
+enum {
+ MCU_EXT_CMD_RF_REG_ACCESS = 0x02,
+ MCU_EXT_CMD_PM_STATE_CTRL = 0x07,
+ MCU_EXT_CMD_CHANNEL_SWITCH = 0x08,
+ MCU_EXT_CMD_SET_TX_POWER_CTRL = 0x11,
+ MCU_EXT_CMD_FW_LOG_2_HOST = 0x13,
+ MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21,
+ MCU_EXT_CMD_STA_REC_UPDATE = 0x25,
+ MCU_EXT_CMD_BSS_INFO_UPDATE = 0x26,
+ MCU_EXT_CMD_EDCA_UPDATE = 0x27,
+ MCU_EXT_CMD_DEV_INFO_UPDATE = 0x2A,
+ MCU_EXT_CMD_GET_TEMP = 0x2c,
+ MCU_EXT_CMD_WTBL_UPDATE = 0x32,
+ MCU_EXT_CMD_SET_RDD_CTRL = 0x3a,
+ MCU_EXT_CMD_ATE_CTRL = 0x3d,
+ MCU_EXT_CMD_PROTECT_CTRL = 0x3e,
+ MCU_EXT_CMD_DBDC_CTRL = 0x45,
+ MCU_EXT_CMD_MAC_INIT_CTRL = 0x46,
+ MCU_EXT_CMD_BCN_OFFLOAD = 0x49,
+ MCU_EXT_CMD_SET_RX_PATH = 0x4e,
+ MCU_EXT_CMD_TX_POWER_FEATURE_CTRL = 0x58,
+ MCU_EXT_CMD_RXDCOC_CAL = 0x59,
+ MCU_EXT_CMD_TXDPD_CAL = 0x60,
+ MCU_EXT_CMD_SET_RDD_TH = 0x7c,
+ MCU_EXT_CMD_SET_RDD_PATTERN = 0x7d,
+};
+
+enum {
+ MCU_UNI_CMD_DEV_INFO_UPDATE = MCU_UNI_PREFIX | 0x01,
+ MCU_UNI_CMD_BSS_INFO_UPDATE = MCU_UNI_PREFIX | 0x02,
+ MCU_UNI_CMD_STA_REC_UPDATE = MCU_UNI_PREFIX | 0x03,
+ MCU_UNI_CMD_SUSPEND = MCU_UNI_PREFIX | 0x05,
+ MCU_UNI_CMD_OFFLOAD = MCU_UNI_PREFIX | 0x06,
+ MCU_UNI_CMD_HIF_CTRL = MCU_UNI_PREFIX | 0x07,
+};
+
+enum {
+ MCU_ATE_SET_FREQ_OFFSET = 0xa,
+ MCU_ATE_SET_TX_POWER_CONTROL = 0x15,
+};
+
+struct mt7615_mcu_uni_event {
+ u8 cid;
+ u8 pad[3];
+ __le32 status; /* 0: success, others: fail */
+} __packed;
+
+struct mt7615_beacon_loss_event {
+ u8 bss_idx;
+ u8 reason;
+ u8 pad[2];
+} __packed;
+
+struct mt7615_mcu_scan_ssid {
+ __le32 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+} __packed;
+
+struct mt7615_mcu_scan_channel {
+ u8 band; /* 1: 2.4GHz
+ * 2: 5.0GHz
+ * Others: Reserved
+ */
+ u8 channel_num;
+} __packed;
+
+struct mt7615_mcu_scan_match {
+ __le32 rssi_th;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_len;
+ u8 rsv[3];
+} __packed;
+
+struct mt7615_hw_scan_req {
+ u8 seq_num;
+ u8 bss_idx;
+ u8 scan_type; /* 0: PASSIVE SCAN
+ * 1: ACTIVE SCAN
+ */
+ u8 ssid_type; /* BIT(0) wildcard SSID
+ * BIT(1) P2P wildcard SSID
+ * BIT(2) specified SSID + wildcard SSID
+ * BIT(2) + ssid_type_ext BIT(0) specified SSID only
+ */
+ u8 ssids_num;
+ u8 probe_req_num; /* Number of probe request for each SSID */
+ u8 scan_func; /* BIT(0) Enable random MAC scan
+ * BIT(1) Disable DBDC scan type 1~3.
+ * BIT(2) Use DBDC scan type 3 (dedicated one RF to scan).
+ */
+ u8 version; /* 0: Not support fields after ies.
+ * 1: Support fields after ies.
+ */
+ struct mt7615_mcu_scan_ssid ssids[4];
+ __le16 probe_delay_time;
+ __le16 channel_dwell_time; /* channel Dwell interval */
+ __le16 timeout_value;
+ u8 channel_type; /* 0: Full channels
+ * 1: Only 2.4GHz channels
+ * 2: Only 5GHz channels
+ * 3: P2P social channel only (channel #1, #6 and #11)
+ * 4: Specified channels
+ * Others: Reserved
+ */
+ u8 channels_num; /* valid when channel_type is 4 */
+ /* valid when channels_num is set */
+ struct mt7615_mcu_scan_channel channels[32];
+ __le16 ies_len;
+ u8 ies[MT7615_SCAN_IE_LEN];
+ /* following fields are valid if version > 0 */
+ u8 ext_channels_num;
+ u8 ext_ssids_num;
+ __le16 channel_min_dwell_time;
+ struct mt7615_mcu_scan_channel ext_channels[32];
+ struct mt7615_mcu_scan_ssid ext_ssids[6];
+ u8 bssid[ETH_ALEN];
+ u8 random_mac[ETH_ALEN]; /* valid when BIT(1) in scan_func is set. */
+ u8 pad[63];
+ u8 ssid_type_ext;
+} __packed;
+
+#define SCAN_DONE_EVENT_MAX_CHANNEL_NUM 64
+struct mt7615_hw_scan_done {
+ u8 seq_num;
+ u8 sparse_channel_num;
+ struct mt7615_mcu_scan_channel sparse_channel;
+ u8 complete_channel_num;
+ u8 current_state;
+ u8 version;
+ u8 pad;
+ __le32 beacon_scan_num;
+ u8 pno_enabled;
+ u8 pad2[3];
+ u8 sparse_channel_valid_num;
+ u8 pad3[3];
+ u8 channel_num[SCAN_DONE_EVENT_MAX_CHANNEL_NUM];
+ /* idle format for channel_idle_time
+ * 0: first bytes: idle time(ms) 2nd byte: dwell time(ms)
+ * 1: first bytes: idle time(8ms) 2nd byte: dwell time(8ms)
+ * 2: dwell time (16us)
+ */
+ __le16 channel_idle_time[SCAN_DONE_EVENT_MAX_CHANNEL_NUM];
+ /* beacon and probe response count */
+ u8 beacon_probe_num[SCAN_DONE_EVENT_MAX_CHANNEL_NUM];
+ u8 mdrdy_count[SCAN_DONE_EVENT_MAX_CHANNEL_NUM];
+ __le32 beacon_2g_num;
+ __le32 beacon_5g_num;
+} __packed;
+
+struct mt7615_sched_scan_req {
+ u8 version;
+ u8 seq_num;
+ u8 stop_on_match;
+ u8 ssids_num;
+ u8 match_num;
+ u8 pad;
+ __le16 ie_len;
+ struct mt7615_mcu_scan_ssid ssids[MT7615_MAX_SCHED_SCAN_SSID];
+ struct mt7615_mcu_scan_match match[MT7615_MAX_SCAN_MATCH];
+ u8 channel_type;
+ u8 channels_num;
+ u8 intervals_num;
+ u8 scan_func; /* BIT(0) eable random mac address */
+ struct mt7615_mcu_scan_channel channels[64];
+ __le16 intervals[MT7615_MAX_SCHED_SCAN_INTERVAL];
+ u8 random_mac[ETH_ALEN]; /* valid when BIT(0) in scan_func is set */
+ u8 pad2[58];
+} __packed;
+
+struct nt7615_sched_scan_done {
+ u8 seq_num;
+ u8 status; /* 0: ssid found */
+ __le16 pad;
+} __packed;
+
+struct mt7615_mcu_reg_event {
+ __le32 reg;
+ __le32 val;
+} __packed;
+
+struct mt7615_mcu_bss_event {
+ u8 bss_idx;
+ u8 is_absent;
+ u8 free_quota;
+ u8 pad;
+} __packed;
+
+struct mt7615_bss_basic_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 active;
+ u8 omac_idx;
+ u8 hw_bss_idx;
+ u8 band_idx;
+ __le32 conn_type;
+ u8 conn_state;
+ u8 wmm_idx;
+ u8 bssid[ETH_ALEN];
+ __le16 bmc_tx_wlan_idx;
+ __le16 bcn_interval;
+ u8 dtim_period;
+ u8 phymode; /* bit(0): A
+ * bit(1): B
+ * bit(2): G
+ * bit(3): GN
+ * bit(4): AN
+ * bit(5): AC
+ */
+ __le16 sta_idx;
+ u8 nonht_basic_phy;
+ u8 pad[3];
+} __packed;
+
+struct mt7615_bss_qos_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 qos;
+ u8 pad[3];
+} __packed;
+
+struct mt7615_wow_ctrl_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 cmd; /* 0x1: PM_WOWLAN_REQ_START
+ * 0x2: PM_WOWLAN_REQ_STOP
+ * 0x3: PM_WOWLAN_PARAM_CLEAR
+ */
+ u8 trigger; /* 0: NONE
+ * BIT(0): NL80211_WOWLAN_TRIG_MAGIC_PKT
+ * BIT(1): NL80211_WOWLAN_TRIG_ANY
+ * BIT(2): NL80211_WOWLAN_TRIG_DISCONNECT
+ * BIT(3): NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE
+ * BIT(4): BEACON_LOST
+ * BIT(5): NL80211_WOWLAN_TRIG_NET_DETECT
+ */
+ u8 wakeup_hif; /* 0x0: HIF_SDIO
+ * 0x1: HIF_USB
+ * 0x2: HIF_PCIE
+ * 0x3: HIF_GPIO
+ */
+ u8 pad;
+ u8 rsv[4];
+} __packed;
+
+#define MT7615_WOW_MASK_MAX_LEN 16
+#define MT7615_WOW_PATTEN_MAX_LEN 128
+struct mt7615_wow_pattern_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 index; /* pattern index */
+ u8 enable; /* 0: disable
+ * 1: enable
+ */
+ u8 data_len; /* pattern length */
+ u8 pad;
+ u8 mask[MT7615_WOW_MASK_MAX_LEN];
+ u8 pattern[MT7615_WOW_PATTEN_MAX_LEN];
+ u8 rsv[4];
+} __packed;
+
+struct mt7615_suspend_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 enable; /* 0: suspend mode disabled
+ * 1: suspend mode enabled
+ */
+ u8 mdtim; /* LP parameter */
+ u8 wow_suspend; /* 0: update by origin policy
+ * 1: update by wow dtim
+ */
+ u8 pad[5];
+} __packed;
+
+struct mt7615_gtk_rekey_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 kek[NL80211_KEK_LEN];
+ u8 kck[NL80211_KCK_LEN];
+ u8 replay_ctr[NL80211_REPLAY_CTR_LEN];
+ u8 rekey_mode; /* 0: rekey offload enable
+ * 1: rekey offload disable
+ * 2: rekey update
+ */
+ u8 keyid;
+ u8 pad[2];
+ __le32 proto; /* WPA-RSN-WAPI-OPSN */
+ __le32 pairwise_cipher;
+ __le32 group_cipher;
+ __le32 key_mgmt; /* NONE-PSK-IEEE802.1X */
+ __le32 mgmt_group_cipher;
+ u8 option; /* 1: rekey data update without enabling offload */
+ u8 reserverd[3];
+} __packed;
+
+struct mt7615_roc_tlv {
+ u8 bss_idx;
+ u8 token;
+ u8 active;
+ u8 primary_chan;
+ u8 sco;
+ u8 band;
+ u8 width; /* To support 80/160MHz bandwidth */
+ u8 freq_seg1; /* To support 80/160MHz bandwidth */
+ u8 freq_seg2; /* To support 80/160MHz bandwidth */
+ u8 req_type;
+ u8 dbdc_band;
+ u8 rsv0;
+ __le32 max_interval; /* ms */
+ u8 rsv1[8];
+} __packed;
+
+struct mt7615_arpns_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 mode;
+ u8 ips_num;
+ u8 option;
+ u8 pad[1];
+} __packed;
+
+/* offload mcu commands */
+enum {
+ MCU_CMD_START_HW_SCAN = MCU_CE_PREFIX | 0x03,
+ MCU_CMD_SET_PS_PROFILE = MCU_CE_PREFIX | 0x05,
+ MCU_CMD_SET_CHAN_DOMAIN = MCU_CE_PREFIX | 0x0f,
+ MCU_CMD_SET_BSS_CONNECTED = MCU_CE_PREFIX | 0x16,
+ MCU_CMD_SET_BSS_ABORT = MCU_CE_PREFIX | 0x17,
+ MCU_CMD_CANCEL_HW_SCAN = MCU_CE_PREFIX | 0x1b,
+ MCU_CMD_SET_ROC = MCU_CE_PREFIX | 0x1c,
+ MCU_CMD_SET_P2P_OPPPS = MCU_CE_PREFIX | 0x33,
+ MCU_CMD_SCHED_SCAN_ENABLE = MCU_CE_PREFIX | 0x61,
+ MCU_CMD_SCHED_SCAN_REQ = MCU_CE_PREFIX | 0x62,
+ MCU_CMD_REG_WRITE = MCU_CE_PREFIX | 0xc0,
+ MCU_CMD_REG_READ = MCU_CE_PREFIX | MCU_QUERY_MASK | 0xc0,
+};
+
+#define MCU_CMD_ACK BIT(0)
+#define MCU_CMD_UNI BIT(1)
+#define MCU_CMD_QUERY BIT(2)
+
+#define MCU_CMD_UNI_EXT_ACK (MCU_CMD_ACK | MCU_CMD_UNI | MCU_CMD_QUERY)
+
+enum {
+ UNI_BSS_INFO_BASIC = 0,
+ UNI_BSS_INFO_RLM = 2,
+ UNI_BSS_INFO_BCN_CONTENT = 7,
+ UNI_BSS_INFO_QBSS = 15,
+ UNI_BSS_INFO_UAPSD = 19,
+};
+
+enum {
+ UNI_SUSPEND_MODE_SETTING,
+ UNI_SUSPEND_WOW_CTRL,
+ UNI_SUSPEND_WOW_GPIO_PARAM,
+ UNI_SUSPEND_WOW_WAKEUP_PORT,
+ UNI_SUSPEND_WOW_PATTERN,
+};
+
+enum {
+ UNI_OFFLOAD_OFFLOAD_ARP,
+ UNI_OFFLOAD_OFFLOAD_ND,
+ UNI_OFFLOAD_OFFLOAD_GTK_REKEY,
+ UNI_OFFLOAD_OFFLOAD_BMC_RPY_DETECT,
+};
+
+enum {
+ PATCH_SEM_RELEASE = 0x0,
+ PATCH_SEM_GET = 0x1
+};
+
+enum {
+ PATCH_NOT_DL_SEM_FAIL = 0x0,
+ PATCH_IS_DL = 0x1,
+ PATCH_NOT_DL_SEM_SUCCESS = 0x2,
+ PATCH_REL_SEM_SUCCESS = 0x3
+};
+
+enum {
+ FW_STATE_INITIAL = 0,
+ FW_STATE_FW_DOWNLOAD = 1,
+ FW_STATE_NORMAL_OPERATION = 2,
+ FW_STATE_NORMAL_TRX = 3,
+ FW_STATE_CR4_RDY = 7
+};
+
+enum {
+ FW_STATE_PWR_ON = 1,
+ FW_STATE_N9_RDY = 2,
+};
+
+#define STA_TYPE_STA BIT(0)
+#define STA_TYPE_AP BIT(1)
+#define STA_TYPE_ADHOC BIT(2)
+#define STA_TYPE_WDS BIT(4)
+#define STA_TYPE_BC BIT(5)
+
+#define NETWORK_INFRA BIT(16)
+#define NETWORK_P2P BIT(17)
+#define NETWORK_IBSS BIT(18)
+#define NETWORK_WDS BIT(21)
+
+#define CONNECTION_INFRA_STA (STA_TYPE_STA | NETWORK_INFRA)
+#define CONNECTION_INFRA_AP (STA_TYPE_AP | NETWORK_INFRA)
+#define CONNECTION_P2P_GC (STA_TYPE_STA | NETWORK_P2P)
+#define CONNECTION_P2P_GO (STA_TYPE_AP | NETWORK_P2P)
+#define CONNECTION_IBSS_ADHOC (STA_TYPE_ADHOC | NETWORK_IBSS)
+#define CONNECTION_WDS (STA_TYPE_WDS | NETWORK_WDS)
+#define CONNECTION_INFRA_BC (STA_TYPE_BC | NETWORK_INFRA)
+
+#define CONN_STATE_DISCONNECT 0
+#define CONN_STATE_CONNECT 1
+#define CONN_STATE_PORT_SECURE 2
+
+enum {
+ DEV_INFO_ACTIVE,
+ DEV_INFO_MAX_NUM
+};
+
+enum {
+ DBDC_TYPE_WMM,
+ DBDC_TYPE_MGMT,
+ DBDC_TYPE_BSS,
+ DBDC_TYPE_MBSS,
+ DBDC_TYPE_REPEATER,
+ DBDC_TYPE_MU,
+ DBDC_TYPE_BF,
+ DBDC_TYPE_PTA,
+ __DBDC_TYPE_MAX,
+};
+
+struct tlv {
+ __le16 tag;
+ __le16 len;
+} __packed;
+
+struct bss_info_omac {
+ __le16 tag;
+ __le16 len;
+ u8 hw_bss_idx;
+ u8 omac_idx;
+ u8 band_idx;
+ u8 rsv0;
+ __le32 conn_type;
+ u32 rsv1;
+} __packed;
+
+struct bss_info_basic {
+ __le16 tag;
+ __le16 len;
+ __le32 network_type;
+ u8 active;
+ u8 rsv0;
+ __le16 bcn_interval;
+ u8 bssid[ETH_ALEN];
+ u8 wmm_idx;
+ u8 dtim_period;
+ u8 bmc_tx_wlan_idx;
+ u8 cipher; /* not used */
+ u8 phymode; /* not used */
+ u8 rsv1[5];
+} __packed;
+
+struct bss_info_rf_ch {
+ __le16 tag;
+ __le16 len;
+ u8 pri_ch;
+ u8 central_ch0;
+ u8 central_ch1;
+ u8 bw;
+} __packed;
+
+struct bss_info_ext_bss {
+ __le16 tag;
+ __le16 len;
+ __le32 mbss_tsf_offset; /* in unit of us */
+ u8 rsv[8];
+} __packed;
+
+enum {
+ BSS_INFO_OMAC,
+ BSS_INFO_BASIC,
+ BSS_INFO_RF_CH, /* optional, for BT/LTE coex */
+ BSS_INFO_PM, /* sta only */
+ BSS_INFO_UAPSD, /* sta only */
+ BSS_INFO_ROAM_DETECTION, /* obsoleted */
+ BSS_INFO_LQ_RM, /* obsoleted */
+ BSS_INFO_EXT_BSS,
+ BSS_INFO_BMC_INFO, /* for bmc rate control in CR4 */
+ BSS_INFO_SYNC_MODE, /* obsoleted */
+ BSS_INFO_RA,
+ BSS_INFO_MAX_NUM
+};
+
+enum {
+ WTBL_RESET_AND_SET = 1,
+ WTBL_SET,
+ WTBL_QUERY,
+ WTBL_RESET_ALL
+};
+
+struct wtbl_req_hdr {
+ u8 wlan_idx;
+ u8 operation;
+ __le16 tlv_num;
+ u8 rsv[4];
+} __packed;
+
+struct wtbl_generic {
+ __le16 tag;
+ __le16 len;
+ u8 peer_addr[ETH_ALEN];
+ u8 muar_idx;
+ u8 skip_tx;
+ u8 cf_ack;
+ u8 qos;
+ u8 mesh;
+ u8 adm;
+ __le16 partial_aid;
+ u8 baf_en;
+ u8 aad_om;
+} __packed;
+
+struct wtbl_rx {
+ __le16 tag;
+ __le16 len;
+ u8 rcid;
+ u8 rca1;
+ u8 rca2;
+ u8 rv;
+ u8 rsv[4];
+} __packed;
+
+struct wtbl_ht {
+ __le16 tag;
+ __le16 len;
+ u8 ht;
+ u8 ldpc;
+ u8 af;
+ u8 mm;
+ u8 rsv[4];
+} __packed;
+
+struct wtbl_vht {
+ __le16 tag;
+ __le16 len;
+ u8 ldpc;
+ u8 dyn_bw;
+ u8 vht;
+ u8 txop_ps;
+ u8 rsv[4];
+} __packed;
+
+struct wtbl_tx_ps {
+ __le16 tag;
+ __le16 len;
+ u8 txps;
+ u8 rsv[3];
+} __packed;
+
+struct wtbl_hdr_trans {
+ __le16 tag;
+ __le16 len;
+ u8 to_ds;
+ u8 from_ds;
+ u8 disable_rx_trans;
+ u8 rsv;
+} __packed;
+
+enum {
+ MT_BA_TYPE_INVALID,
+ MT_BA_TYPE_ORIGINATOR,
+ MT_BA_TYPE_RECIPIENT
+};
+
+enum {
+ RST_BA_MAC_TID_MATCH,
+ RST_BA_MAC_MATCH,
+ RST_BA_NO_MATCH
+};
+
+struct wtbl_ba {
+ __le16 tag;
+ __le16 len;
+ /* common */
+ u8 tid;
+ u8 ba_type;
+ u8 rsv0[2];
+ /* originator only */
+ __le16 sn;
+ u8 ba_en;
+ u8 ba_winsize_idx;
+ __le16 ba_winsize;
+ /* recipient only */
+ u8 peer_addr[ETH_ALEN];
+ u8 rst_ba_tid;
+ u8 rst_ba_sel;
+ u8 rst_ba_sb;
+ u8 band_idx;
+ u8 rsv1[4];
+} __packed;
+
+struct wtbl_bf {
+ __le16 tag;
+ __le16 len;
+ u8 ibf;
+ u8 ebf;
+ u8 ibf_vht;
+ u8 ebf_vht;
+ u8 gid;
+ u8 pfmu_idx;
+ u8 rsv[2];
+} __packed;
+
+struct wtbl_smps {
+ __le16 tag;
+ __le16 len;
+ u8 smps;
+ u8 rsv[3];
+} __packed;
+
+struct wtbl_pn {
+ __le16 tag;
+ __le16 len;
+ u8 pn[6];
+ u8 rsv[2];
+} __packed;
+
+struct wtbl_spe {
+ __le16 tag;
+ __le16 len;
+ u8 spe_idx;
+ u8 rsv[3];
+} __packed;
+
+struct wtbl_raw {
+ __le16 tag;
+ __le16 len;
+ u8 wtbl_idx;
+ u8 dw;
+ u8 rsv[2];
+ __le32 msk;
+ __le32 val;
+} __packed;
+
+#define MT7615_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_req_hdr) + \
+ sizeof(struct wtbl_generic) + \
+ sizeof(struct wtbl_rx) + \
+ sizeof(struct wtbl_ht) + \
+ sizeof(struct wtbl_vht) + \
+ sizeof(struct wtbl_tx_ps) + \
+ sizeof(struct wtbl_hdr_trans) +\
+ sizeof(struct wtbl_ba) + \
+ sizeof(struct wtbl_bf) + \
+ sizeof(struct wtbl_smps) + \
+ sizeof(struct wtbl_pn) + \
+ sizeof(struct wtbl_spe))
+
+#define MT7615_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
+ sizeof(struct sta_rec_basic) + \
+ sizeof(struct sta_rec_ht) + \
+ sizeof(struct sta_rec_vht) + \
+ sizeof(struct sta_rec_uapsd) + \
+ sizeof(struct tlv) + \
+ MT7615_WTBL_UPDATE_MAX_SIZE)
+
+#define MT7615_WTBL_UPDATE_BA_SIZE (sizeof(struct wtbl_req_hdr) + \
+ sizeof(struct wtbl_ba))
+
+enum {
+ WTBL_GENERIC,
+ WTBL_RX,
+ WTBL_HT,
+ WTBL_VHT,
+ WTBL_PEER_PS, /* not used */
+ WTBL_TX_PS,
+ WTBL_HDR_TRANS,
+ WTBL_SEC_KEY,
+ WTBL_BA,
+ WTBL_RDG, /* obsoleted */
+ WTBL_PROTECT, /* not used */
+ WTBL_CLEAR, /* not used */
+ WTBL_BF,
+ WTBL_SMPS,
+ WTBL_RAW_DATA, /* debug only */
+ WTBL_PN,
+ WTBL_SPE,
+ WTBL_MAX_NUM
+};
+
+struct sta_ntlv_hdr {
+ u8 rsv[2];
+ __le16 tlv_num;
+} __packed;
+
+struct sta_req_hdr {
+ u8 bss_idx;
+ u8 wlan_idx;
+ __le16 tlv_num;
+ u8 is_tlv_append;
+ u8 muar_idx;
+ u8 rsv[2];
+} __packed;
+
+struct sta_rec_state {
+ __le16 tag;
+ __le16 len;
+ u8 state;
+ __le32 flags;
+ u8 vhtop;
+ u8 pad[2];
+} __packed;
+
+struct sta_rec_basic {
+ __le16 tag;
+ __le16 len;
+ __le32 conn_type;
+ u8 conn_state;
+ u8 qos;
+ __le16 aid;
+ u8 peer_addr[ETH_ALEN];
+#define EXTRA_INFO_VER BIT(0)
+#define EXTRA_INFO_NEW BIT(1)
+ __le16 extra_info;
+} __packed;
+
+struct sta_rec_ht {
+ __le16 tag;
+ __le16 len;
+ __le16 ht_cap;
+ u16 rsv;
+} __packed;
+
+struct sta_rec_vht {
+ __le16 tag;
+ __le16 len;
+ __le32 vht_cap;
+ __le16 vht_rx_mcs_map;
+ __le16 vht_tx_mcs_map;
+} __packed;
+
+struct sta_rec_ba {
+ __le16 tag;
+ __le16 len;
+ u8 tid;
+ u8 ba_type;
+ u8 amsdu;
+ u8 ba_en;
+ __le16 ssn;
+ __le16 winsize;
+} __packed;
+
+struct sta_rec_uapsd {
+ __le16 tag;
+ __le16 len;
+ u8 dac_map;
+ u8 tac_map;
+ u8 max_sp;
+ u8 rsv0;
+ __le16 listen_interval;
+ u8 rsv1[2];
+} __packed;
+
+enum {
+ STA_REC_BASIC,
+ STA_REC_RA,
+ STA_REC_RA_CMM_INFO,
+ STA_REC_RA_UPDATE,
+ STA_REC_BF,
+ STA_REC_AMSDU, /* for CR4 */
+ STA_REC_BA,
+ STA_REC_STATE,
+ STA_REC_TX_PROC, /* for hdr trans and CSO in CR4 */
+ STA_REC_HT,
+ STA_REC_VHT,
+ STA_REC_APPS,
+ STA_REC_WTBL = 13,
+ STA_REC_MAX_NUM
+};
+
+enum {
+ CMD_CBW_20MHZ,
+ CMD_CBW_40MHZ,
+ CMD_CBW_80MHZ,
+ CMD_CBW_160MHZ,
+ CMD_CBW_10MHZ,
+ CMD_CBW_5MHZ,
+ CMD_CBW_8080MHZ
+};
+
+enum {
+ CH_SWITCH_NORMAL = 0,
+ CH_SWITCH_SCAN = 3,
+ CH_SWITCH_MCC = 4,
+ CH_SWITCH_DFS = 5,
+ CH_SWITCH_BACKGROUND_SCAN_START = 6,
+ CH_SWITCH_BACKGROUND_SCAN_RUNNING = 7,
+ CH_SWITCH_BACKGROUND_SCAN_STOP = 8,
+ CH_SWITCH_SCAN_BYPASS_DPD = 9
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
new file mode 100644
index 000000000..9b191307e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
@@ -0,0 +1,274 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+
+#include "mt7615.h"
+#include "regs.h"
+#include "mac.h"
+#include "../trace.h"
+
+const u32 mt7615e_reg_map[] = {
+ [MT_TOP_CFG_BASE] = 0x01000,
+ [MT_HW_BASE] = 0x01000,
+ [MT_PCIE_REMAP_2] = 0x02504,
+ [MT_ARB_BASE] = 0x20c00,
+ [MT_HIF_BASE] = 0x04000,
+ [MT_CSR_BASE] = 0x07000,
+ [MT_PLE_BASE] = 0x08000,
+ [MT_PSE_BASE] = 0x0c000,
+ [MT_CFG_BASE] = 0x20200,
+ [MT_AGG_BASE] = 0x20a00,
+ [MT_TMAC_BASE] = 0x21000,
+ [MT_RMAC_BASE] = 0x21200,
+ [MT_DMA_BASE] = 0x21800,
+ [MT_PF_BASE] = 0x22000,
+ [MT_WTBL_BASE_ON] = 0x23000,
+ [MT_WTBL_BASE_OFF] = 0x23400,
+ [MT_LPON_BASE] = 0x24200,
+ [MT_MIB_BASE] = 0x24800,
+ [MT_WTBL_BASE_ADDR] = 0x30000,
+ [MT_PCIE_REMAP_BASE2] = 0x80000,
+ [MT_TOP_MISC_BASE] = 0xc0000,
+ [MT_EFUSE_ADDR_BASE] = 0x81070000,
+};
+
+const u32 mt7663e_reg_map[] = {
+ [MT_TOP_CFG_BASE] = 0x01000,
+ [MT_HW_BASE] = 0x02000,
+ [MT_DMA_SHDL_BASE] = 0x06000,
+ [MT_PCIE_REMAP_2] = 0x0700c,
+ [MT_ARB_BASE] = 0x20c00,
+ [MT_HIF_BASE] = 0x04000,
+ [MT_CSR_BASE] = 0x07000,
+ [MT_PLE_BASE] = 0x08000,
+ [MT_PSE_BASE] = 0x0c000,
+ [MT_PP_BASE] = 0x0e000,
+ [MT_CFG_BASE] = 0x20000,
+ [MT_AGG_BASE] = 0x22000,
+ [MT_TMAC_BASE] = 0x24000,
+ [MT_RMAC_BASE] = 0x25000,
+ [MT_DMA_BASE] = 0x27000,
+ [MT_PF_BASE] = 0x28000,
+ [MT_WTBL_BASE_ON] = 0x29000,
+ [MT_WTBL_BASE_OFF] = 0x29800,
+ [MT_LPON_BASE] = 0x2b000,
+ [MT_MIB_BASE] = 0x2d000,
+ [MT_WTBL_BASE_ADDR] = 0x30000,
+ [MT_PCIE_REMAP_BASE2] = 0x90000,
+ [MT_TOP_MISC_BASE] = 0xc0000,
+ [MT_EFUSE_ADDR_BASE] = 0x78011000,
+};
+
+u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr)
+{
+ u32 base, offset;
+
+ if (is_mt7663(&dev->mt76)) {
+ base = addr & MT7663_MCU_PCIE_REMAP_2_BASE;
+ offset = addr & MT7663_MCU_PCIE_REMAP_2_OFFSET;
+ } else {
+ base = addr & MT_MCU_PCIE_REMAP_2_BASE;
+ offset = addr & MT_MCU_PCIE_REMAP_2_OFFSET;
+ }
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_2, base);
+
+ return MT_PCIE_REMAP_BASE_2 + offset;
+}
+
+static void
+mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+
+ mt7615_irq_enable(dev, MT_INT_RX_DONE(q));
+}
+
+static irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
+{
+ struct mt7615_dev *dev = dev_instance;
+
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+ return IRQ_NONE;
+
+ tasklet_schedule(&dev->irq_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static void mt7615_irq_tasklet(unsigned long data)
+{
+ struct mt7615_dev *dev = (struct mt7615_dev *)data;
+ u32 intr, mask = 0, tx_mcu_mask = mt7615_tx_mcu_int_mask(dev);
+
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ intr &= dev->mt76.mmio.irqmask;
+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+
+ trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
+
+ mask |= intr & MT_INT_RX_DONE_ALL;
+ if (intr & tx_mcu_mask)
+ mask |= tx_mcu_mask;
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
+
+ if (intr & tx_mcu_mask)
+ napi_schedule(&dev->mt76.tx_napi);
+
+ if (intr & MT_INT_RX_DONE(0))
+ napi_schedule(&dev->mt76.napi[0]);
+
+ if (intr & MT_INT_RX_DONE(1))
+ napi_schedule(&dev->mt76.napi[1]);
+
+ if (intr & MT_INT_MCU_CMD) {
+ u32 val = mt76_rr(dev, MT_MCU_CMD);
+
+ if (val & MT_MCU_CMD_ERROR_MASK) {
+ dev->reset_state = val;
+ ieee80211_queue_work(mt76_hw(dev), &dev->reset_work);
+ wake_up(&dev->reset_wait);
+ }
+ }
+}
+
+static u32 __mt7615_reg_addr(struct mt7615_dev *dev, u32 addr)
+{
+ if (addr < 0x100000)
+ return addr;
+
+ return mt7615_reg_map(dev, addr);
+}
+
+static u32 mt7615_rr(struct mt76_dev *mdev, u32 offset)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ u32 addr = __mt7615_reg_addr(dev, offset);
+
+ return dev->bus_ops->rr(mdev, addr);
+}
+
+static void mt7615_wr(struct mt76_dev *mdev, u32 offset, u32 val)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ u32 addr = __mt7615_reg_addr(dev, offset);
+
+ dev->bus_ops->wr(mdev, addr, val);
+}
+
+static u32 mt7615_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ u32 addr = __mt7615_reg_addr(dev, offset);
+
+ return dev->bus_ops->rmw(mdev, addr, mask, val);
+}
+
+int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
+ int irq, const u32 *map)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ /* txwi_size = txd size + txp size */
+ .txwi_size = MT_TXD_SIZE + sizeof(struct mt7615_txp_common),
+ .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
+ .survey_flags = SURVEY_INFO_TIME_TX |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_BSS_RX,
+ .tx_prepare_skb = mt7615_tx_prepare_skb,
+ .tx_complete_skb = mt7615_tx_complete_skb,
+ .rx_skb = mt7615_queue_rx_skb,
+ .rx_poll_complete = mt7615_rx_poll_complete,
+ .sta_ps = mt7615_sta_ps,
+ .sta_add = mt7615_mac_sta_add,
+ .sta_remove = mt7615_mac_sta_remove,
+ .update_survey = mt7615_update_channel,
+ };
+ struct mt76_bus_ops *bus_ops;
+ struct ieee80211_ops *ops;
+ struct mt7615_dev *dev;
+ struct mt76_dev *mdev;
+ int ret;
+
+ ops = devm_kmemdup(pdev, &mt7615_ops, sizeof(mt7615_ops), GFP_KERNEL);
+ if (!ops)
+ return -ENOMEM;
+
+ mdev = mt76_alloc_device(pdev, sizeof(*dev), ops, &drv_ops);
+ if (!mdev)
+ return -ENOMEM;
+
+ dev = container_of(mdev, struct mt7615_dev, mt76);
+ mt76_mmio_init(&dev->mt76, mem_base);
+ tasklet_init(&dev->irq_tasklet, mt7615_irq_tasklet, (unsigned long)dev);
+
+ dev->reg_map = map;
+ dev->ops = ops;
+ mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
+ (mt76_rr(dev, MT_HW_REV) & 0xff);
+ dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+ dev->bus_ops = dev->mt76.bus;
+ bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
+ GFP_KERNEL);
+ if (!bus_ops) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ bus_ops->rr = mt7615_rr;
+ bus_ops->wr = mt7615_wr;
+ bus_ops->rmw = mt7615_rmw;
+ dev->mt76.bus = bus_ops;
+
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
+ ret = devm_request_irq(mdev->dev, irq, mt7615_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+ goto error;
+
+ if (is_mt7663(mdev))
+ mt76_wr(dev, MT_PCIE_IRQ_ENABLE, 1);
+
+ ret = mt7615_register_device(dev);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ mt76_free_device(&dev->mt76);
+
+ return ret;
+}
+
+static int __init mt7615_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&mt7615_pci_driver);
+ if (ret)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_MT7622_WMAC)) {
+ ret = platform_driver_register(&mt7622_wmac_driver);
+ if (ret)
+ pci_unregister_driver(&mt7615_pci_driver);
+ }
+
+ return ret;
+}
+
+static void __exit mt7615_exit(void)
+{
+ if (IS_ENABLED(CONFIG_MT7622_WMAC))
+ platform_driver_unregister(&mt7622_wmac_driver);
+ pci_unregister_driver(&mt7615_pci_driver);
+}
+
+module_init(mt7615_init);
+module_exit(mt7615_exit);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
new file mode 100644
index 000000000..4cee76691
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -0,0 +1,688 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2019 MediaTek Inc. */
+
+#ifndef __MT7615_H
+#define __MT7615_H
+
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/ktime.h>
+#include <linux/regmap.h>
+#include "../mt76.h"
+#include "regs.h"
+
+#define MT7615_MAX_INTERFACES 4
+#define MT7615_MAX_WMM_SETS 4
+#define MT7663_WTBL_SIZE 32
+#define MT7615_WTBL_SIZE 128
+#define MT7615_WTBL_RESERVED (mt7615_wtbl_size(dev) - 1)
+#define MT7615_WTBL_STA (MT7615_WTBL_RESERVED - \
+ MT7615_MAX_INTERFACES)
+
+#define MT7615_PM_TIMEOUT (HZ / 12)
+#define MT7615_WATCHDOG_TIME (HZ / 10)
+#define MT7615_HW_SCAN_TIMEOUT (HZ / 10)
+#define MT7615_RESET_TIMEOUT (30 * HZ)
+#define MT7615_RATE_RETRY 2
+
+#define MT7615_TX_RING_SIZE 1024
+#define MT7615_TX_MGMT_RING_SIZE 128
+#define MT7615_TX_MCU_RING_SIZE 128
+#define MT7615_TX_FWDL_RING_SIZE 128
+
+#define MT7615_RX_RING_SIZE 1024
+#define MT7615_RX_MCU_RING_SIZE 512
+
+#define MT7615_DRV_OWN_RETRY_COUNT 10
+
+#define MT7615_FIRMWARE_CR4 "mediatek/mt7615_cr4.bin"
+#define MT7615_FIRMWARE_N9 "mediatek/mt7615_n9.bin"
+#define MT7615_ROM_PATCH "mediatek/mt7615_rom_patch.bin"
+
+#define MT7622_FIRMWARE_N9 "mediatek/mt7622_n9.bin"
+#define MT7622_ROM_PATCH "mediatek/mt7622_rom_patch.bin"
+
+#define MT7615_FIRMWARE_V1 1
+#define MT7615_FIRMWARE_V2 2
+#define MT7615_FIRMWARE_V3 3
+
+#define MT7663_OFFLOAD_ROM_PATCH "mediatek/mt7663pr2h.bin"
+#define MT7663_OFFLOAD_FIRMWARE_N9 "mediatek/mt7663_n9_v3.bin"
+#define MT7663_ROM_PATCH "mediatek/mt7663pr2h_rebb.bin"
+#define MT7663_FIRMWARE_N9 "mediatek/mt7663_n9_rebb.bin"
+
+#define MT7615_EEPROM_SIZE 1024
+#define MT7615_TOKEN_SIZE 4096
+
+#define MT_FRAC_SCALE 12
+#define MT_FRAC(val, div) (((val) << MT_FRAC_SCALE) / (div))
+
+#define MT_CHFREQ_VALID BIT(7)
+#define MT_CHFREQ_DBDC_IDX BIT(6)
+#define MT_CHFREQ_SEQ GENMASK(5, 0)
+
+#define MT7615_BAR_RATE_DEFAULT 0x4b /* OFDM 6M */
+#define MT7615_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
+#define MT7615_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
+
+#define MT7615_SCAN_IE_LEN 600
+#define MT7615_MAX_SCHED_SCAN_INTERVAL 10
+#define MT7615_MAX_SCHED_SCAN_SSID 10
+#define MT7615_MAX_SCAN_MATCH 16
+
+struct mt7615_vif;
+struct mt7615_sta;
+struct mt7615_dfs_pulse;
+struct mt7615_dfs_pattern;
+enum mt7615_cipher_type;
+
+enum mt7615_hw_txq_id {
+ MT7615_TXQ_MAIN,
+ MT7615_TXQ_EXT,
+ MT7615_TXQ_MCU,
+ MT7615_TXQ_FWDL,
+};
+
+enum mt7622_hw_txq_id {
+ MT7622_TXQ_AC0,
+ MT7622_TXQ_AC1,
+ MT7622_TXQ_AC2,
+ MT7622_TXQ_FWDL = MT7615_TXQ_FWDL,
+ MT7622_TXQ_AC3,
+ MT7622_TXQ_MGMT,
+ MT7622_TXQ_MCU = 15,
+};
+
+struct mt7615_rate_set {
+ struct ieee80211_tx_rate probe_rate;
+ struct ieee80211_tx_rate rates[4];
+};
+
+struct mt7615_rate_desc {
+ bool rateset;
+ u16 probe_val;
+ u16 val[4];
+ u8 bw_idx;
+ u8 bw;
+};
+
+enum mt7615_wtbl_desc_type {
+ MT7615_WTBL_RATE_DESC,
+ MT7615_WTBL_KEY_DESC
+};
+
+struct mt7615_key_desc {
+ enum set_key_cmd cmd;
+ u32 cipher;
+ s8 keyidx;
+ u8 keylen;
+ u8 *key;
+};
+
+struct mt7615_wtbl_desc {
+ struct list_head node;
+
+ enum mt7615_wtbl_desc_type type;
+ struct mt7615_sta *sta;
+
+ union {
+ struct mt7615_rate_desc rate;
+ struct mt7615_key_desc key;
+ };
+};
+
+struct mt7615_sta {
+ struct mt76_wcid wcid; /* must be first */
+
+ struct mt7615_vif *vif;
+
+ struct list_head poll_list;
+ u32 airtime_ac[8];
+
+ struct ieee80211_tx_rate rates[4];
+
+ struct mt7615_rate_set rateset[2];
+ u32 rate_set_tsf;
+
+ u8 rate_count;
+ u8 n_rates;
+
+ u8 rate_probe;
+};
+
+struct mt7615_vif {
+ u8 idx;
+ u8 omac_idx;
+ u8 band_idx;
+ u8 wmm_idx;
+ u8 scan_seq_num;
+
+ struct mt7615_sta sta;
+};
+
+struct mib_stats {
+ u32 ack_fail_cnt;
+ u32 fcs_err_cnt;
+ u32 rts_cnt;
+ u32 rts_retries_cnt;
+ u32 ba_miss_cnt;
+ unsigned long aggr_per;
+};
+
+struct mt7615_phy {
+ struct mt76_phy *mt76;
+ struct mt7615_dev *dev;
+
+ struct ieee80211_vif *monitor_vif;
+
+ u32 rxfilter;
+ u32 omac_mask;
+
+ u16 noise;
+
+ bool scs_en;
+
+ unsigned long last_cca_adj;
+ int false_cca_ofdm, false_cca_cck;
+ s8 ofdm_sensitivity;
+ s8 cck_sensitivity;
+
+ u16 chainmask;
+
+ s16 coverage_class;
+ u8 slottime;
+
+ u8 chfreq;
+ u8 rdd_state;
+ int dfs_state;
+
+ __le32 rx_ampdu_ts;
+ u32 ampdu_ref;
+
+ struct mib_stats mib;
+
+ struct delayed_work mac_work;
+ u8 mac_work_count;
+
+ struct sk_buff_head scan_event_list;
+ struct delayed_work scan_work;
+
+ struct work_struct roc_work;
+ struct timer_list roc_timer;
+ wait_queue_head_t roc_wait;
+ bool roc_grant;
+};
+
+#define mt7615_mcu_add_tx_ba(dev, ...) (dev)->mcu_ops->add_tx_ba((dev), __VA_ARGS__)
+#define mt7615_mcu_add_rx_ba(dev, ...) (dev)->mcu_ops->add_rx_ba((dev), __VA_ARGS__)
+#define mt7615_mcu_sta_add(dev, ...) (dev)->mcu_ops->sta_add((dev), __VA_ARGS__)
+#define mt7615_mcu_add_dev_info(dev, ...) (dev)->mcu_ops->add_dev_info((dev), __VA_ARGS__)
+#define mt7615_mcu_add_bss_info(phy, ...) (phy->dev)->mcu_ops->add_bss_info((phy), __VA_ARGS__)
+#define mt7615_mcu_add_beacon(dev, ...) (dev)->mcu_ops->add_beacon_offload((dev), __VA_ARGS__)
+#define mt7615_mcu_set_pm(dev, ...) (dev)->mcu_ops->set_pm_state((dev), __VA_ARGS__)
+#define mt7615_mcu_set_drv_ctrl(dev) (dev)->mcu_ops->set_drv_ctrl((dev))
+#define mt7615_mcu_set_fw_ctrl(dev) (dev)->mcu_ops->set_fw_ctrl((dev))
+struct mt7615_mcu_ops {
+ int (*add_tx_ba)(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable);
+ int (*add_rx_ba)(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable);
+ int (*sta_add)(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable);
+ int (*add_dev_info)(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif, bool enable);
+ int (*add_bss_info)(struct mt7615_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable);
+ int (*add_beacon_offload)(struct mt7615_dev *dev,
+ struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, bool enable);
+ int (*set_pm_state)(struct mt7615_dev *dev, int band, int state);
+ int (*set_drv_ctrl)(struct mt7615_dev *dev);
+ int (*set_fw_ctrl)(struct mt7615_dev *dev);
+};
+
+struct mt7615_dev {
+ union { /* must be first */
+ struct mt76_dev mt76;
+ struct mt76_phy mphy;
+ };
+
+ const struct mt76_bus_ops *bus_ops;
+ struct tasklet_struct irq_tasklet;
+
+ struct mt7615_phy phy;
+ u32 omac_mask;
+
+ u16 chainmask;
+
+ struct ieee80211_ops *ops;
+ const struct mt7615_mcu_ops *mcu_ops;
+ struct regmap *infracfg;
+ const u32 *reg_map;
+
+ struct work_struct mcu_work;
+
+ struct work_struct reset_work;
+ wait_queue_head_t reset_wait;
+ u32 reset_state;
+
+ struct list_head sta_poll_list;
+ spinlock_t sta_poll_lock;
+
+ struct {
+ u8 n_pulses;
+ u32 period;
+ u16 width;
+ s16 power;
+ } radar_pattern;
+ u32 hw_pattern;
+
+ bool fw_debug;
+ bool flash_eeprom;
+ bool dbdc_support;
+
+ spinlock_t token_lock;
+ struct idr token;
+
+ u8 fw_ver;
+
+ struct work_struct wtbl_work;
+ struct list_head wd_head;
+
+ u32 debugfs_rf_wf;
+ u32 debugfs_rf_reg;
+
+#ifdef CONFIG_NL80211_TESTMODE
+ struct {
+ u32 *reg_backup;
+
+ s16 last_freq_offset;
+ u8 last_rcpi[4];
+ s8 last_ib_rssi;
+ s8 last_wb_rssi;
+ } test;
+#endif
+
+ struct {
+ bool enable;
+
+ spinlock_t txq_lock;
+ struct {
+ struct mt7615_sta *msta;
+ struct sk_buff *skb;
+ } tx_q[IEEE80211_NUM_ACS];
+
+ struct work_struct wake_work;
+ struct completion wake_cmpl;
+
+ struct delayed_work ps_work;
+ unsigned long last_activity;
+ unsigned long idle_timeout;
+ } pm;
+};
+
+enum tx_pkt_queue_idx {
+ MT_LMAC_AC00,
+ MT_LMAC_AC01,
+ MT_LMAC_AC02,
+ MT_LMAC_AC03,
+ MT_LMAC_ALTX0 = 0x10,
+ MT_LMAC_BMC0,
+ MT_LMAC_BCN0,
+ MT_LMAC_PSMP0,
+ MT_LMAC_ALTX1,
+ MT_LMAC_BMC1,
+ MT_LMAC_BCN1,
+ MT_LMAC_PSMP1,
+};
+
+enum {
+ HW_BSSID_0 = 0x0,
+ HW_BSSID_1,
+ HW_BSSID_2,
+ HW_BSSID_3,
+ HW_BSSID_MAX,
+ EXT_BSSID_START = 0x10,
+ EXT_BSSID_1,
+ EXT_BSSID_2,
+ EXT_BSSID_3,
+ EXT_BSSID_4,
+ EXT_BSSID_5,
+ EXT_BSSID_6,
+ EXT_BSSID_7,
+ EXT_BSSID_8,
+ EXT_BSSID_9,
+ EXT_BSSID_10,
+ EXT_BSSID_11,
+ EXT_BSSID_12,
+ EXT_BSSID_13,
+ EXT_BSSID_14,
+ EXT_BSSID_15,
+ EXT_BSSID_END
+};
+
+enum {
+ MT_RX_SEL0,
+ MT_RX_SEL1,
+};
+
+enum mt7615_rdd_cmd {
+ RDD_STOP,
+ RDD_START,
+ RDD_DET_MODE,
+ RDD_DET_STOP,
+ RDD_CAC_START,
+ RDD_CAC_END,
+ RDD_NORMAL_START,
+ RDD_DISABLE_DFS_CAL,
+ RDD_PULSE_DBG,
+ RDD_READ_PULSE,
+ RDD_RESUME_BF,
+};
+
+static inline struct mt7615_phy *
+mt7615_hw_phy(struct ieee80211_hw *hw)
+{
+ struct mt76_phy *phy = hw->priv;
+
+ return phy->priv;
+}
+
+static inline struct mt7615_dev *
+mt7615_hw_dev(struct ieee80211_hw *hw)
+{
+ struct mt76_phy *phy = hw->priv;
+
+ return container_of(phy->dev, struct mt7615_dev, mt76);
+}
+
+static inline struct mt7615_phy *
+mt7615_ext_phy(struct mt7615_dev *dev)
+{
+ struct mt76_phy *phy = dev->mt76.phy2;
+
+ if (!phy)
+ return NULL;
+
+ return phy->priv;
+}
+
+extern struct ieee80211_rate mt7615_rates[12];
+extern const struct ieee80211_ops mt7615_ops;
+extern const u32 mt7615e_reg_map[__MT_BASE_MAX];
+extern const u32 mt7663e_reg_map[__MT_BASE_MAX];
+extern const u32 mt7663_usb_sdio_reg_map[__MT_BASE_MAX];
+extern struct pci_driver mt7615_pci_driver;
+extern struct platform_driver mt7622_wmac_driver;
+extern const struct mt76_testmode_ops mt7615_testmode_ops;
+
+#ifdef CONFIG_MT7622_WMAC
+int mt7622_wmac_init(struct mt7615_dev *dev);
+#else
+static inline int mt7622_wmac_init(struct mt7615_dev *dev)
+{
+ return 0;
+}
+#endif
+
+int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
+ int irq, const u32 *map);
+u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr);
+
+void mt7615_check_offload_capability(struct mt7615_dev *dev);
+void mt7615_init_device(struct mt7615_dev *dev);
+int mt7615_register_device(struct mt7615_dev *dev);
+void mt7615_unregister_device(struct mt7615_dev *dev);
+int mt7615_register_ext_phy(struct mt7615_dev *dev);
+void mt7615_unregister_ext_phy(struct mt7615_dev *dev);
+int mt7615_eeprom_init(struct mt7615_dev *dev, u32 addr);
+int mt7615_eeprom_get_target_power_index(struct mt7615_dev *dev,
+ struct ieee80211_channel *chan,
+ u8 chain_idx);
+int mt7615_eeprom_get_power_delta_index(struct mt7615_dev *dev,
+ enum nl80211_band band);
+int mt7615_wait_pdma_busy(struct mt7615_dev *dev);
+int mt7615_dma_init(struct mt7615_dev *dev);
+void mt7615_dma_cleanup(struct mt7615_dev *dev);
+int mt7615_mcu_init(struct mt7615_dev *dev);
+bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev);
+void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
+ struct ieee80211_tx_rate *probe_rate,
+ struct ieee80211_tx_rate *rates);
+int mt7615_pm_set_enable(struct mt7615_dev *dev, bool enable);
+void mt7615_pm_wake_work(struct work_struct *work);
+int mt7615_pm_wake(struct mt7615_dev *dev);
+void mt7615_pm_power_save_sched(struct mt7615_dev *dev);
+void mt7615_pm_power_save_work(struct work_struct *work);
+int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev);
+int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd);
+int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
+ const struct ieee80211_tx_queue_params *params);
+void mt7615_mcu_rx_event(struct mt7615_dev *dev, struct sk_buff *skb);
+int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev,
+ enum mt7615_rdd_cmd cmd, u8 index,
+ u8 rx_sel, u8 val);
+int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev);
+int mt7615_mcu_fw_log_2_host(struct mt7615_dev *dev, u8 ctrl);
+
+static inline bool is_mt7622(struct mt76_dev *dev)
+{
+ if (!IS_ENABLED(CONFIG_MT7622_WMAC))
+ return false;
+
+ return mt76_chip(dev) == 0x7622;
+}
+
+static inline bool is_mt7615(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7615 || mt76_chip(dev) == 0x7611;
+}
+
+static inline bool is_mt7663(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7663;
+}
+
+static inline bool is_mt7611(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7611;
+}
+
+static inline void mt7615_irq_enable(struct mt7615_dev *dev, u32 mask)
+{
+ mt76_set_irq_mask(&dev->mt76, 0, 0, mask);
+
+ tasklet_schedule(&dev->irq_tasklet);
+}
+
+static inline bool mt7615_firmware_offload(struct mt7615_dev *dev)
+{
+ return dev->fw_ver > MT7615_FIRMWARE_V2;
+}
+
+static inline u16 mt7615_wtbl_size(struct mt7615_dev *dev)
+{
+ if (is_mt7663(&dev->mt76) && mt7615_firmware_offload(dev))
+ return MT7663_WTBL_SIZE;
+ else
+ return MT7615_WTBL_SIZE;
+}
+
+static inline void mt7615_mutex_acquire(struct mt7615_dev *dev)
+ __acquires(&dev->mt76.mutex)
+{
+ mutex_lock(&dev->mt76.mutex);
+ mt7615_pm_wake(dev);
+}
+
+static inline void mt7615_mutex_release(struct mt7615_dev *dev)
+ __releases(&dev->mt76.mutex)
+{
+ mt7615_pm_power_save_sched(dev);
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static inline u8 mt7615_lmac_mapping(struct mt7615_dev *dev, u8 ac)
+{
+ static const u8 lmac_queue_map[] = {
+ [IEEE80211_AC_BK] = MT_LMAC_AC00,
+ [IEEE80211_AC_BE] = MT_LMAC_AC01,
+ [IEEE80211_AC_VI] = MT_LMAC_AC02,
+ [IEEE80211_AC_VO] = MT_LMAC_AC03,
+ };
+
+ if (WARN_ON_ONCE(ac >= ARRAY_SIZE(lmac_queue_map)))
+ return MT_LMAC_AC01; /* BE */
+
+ return lmac_queue_map[ac];
+}
+
+static inline u32 mt7615_tx_mcu_int_mask(struct mt7615_dev *dev)
+{
+ return MT_INT_TX_DONE(dev->mt76.q_tx[MT_TXQ_MCU]->hw_idx);
+}
+
+void mt7615_dma_reset(struct mt7615_dev *dev);
+void mt7615_scan_work(struct work_struct *work);
+void mt7615_roc_work(struct work_struct *work);
+void mt7615_roc_timer(struct timer_list *timer);
+void mt7615_init_txpower(struct mt7615_dev *dev,
+ struct ieee80211_supported_band *sband);
+void mt7615_phy_init(struct mt7615_dev *dev);
+void mt7615_mac_init(struct mt7615_dev *dev);
+int mt7615_set_channel(struct mt7615_phy *phy);
+
+int mt7615_mcu_restart(struct mt76_dev *dev);
+void mt7615_update_channel(struct mt76_dev *mdev);
+bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask);
+void mt7615_mac_reset_counters(struct mt7615_dev *dev);
+void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy);
+void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable);
+void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy);
+void mt7615_mac_sta_poll(struct mt7615_dev *dev);
+int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta, int pid,
+ struct ieee80211_key_conf *key, bool beacon);
+void mt7615_mac_set_timing(struct mt7615_phy *phy);
+int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd);
+int mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev,
+ struct mt76_wcid *wcid,
+ enum mt7615_cipher_type cipher,
+ int keyidx, enum set_key_cmd cmd);
+void mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev,
+ struct mt76_wcid *wcid,
+ enum mt7615_cipher_type cipher,
+ enum set_key_cmd cmd);
+int mt7615_mac_wtbl_update_key(struct mt7615_dev *dev,
+ struct mt76_wcid *wcid,
+ u8 *key, u8 keylen,
+ enum mt7615_cipher_type cipher,
+ enum set_key_cmd cmd);
+void mt7615_mac_reset_work(struct work_struct *work);
+u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid);
+
+int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq);
+int mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
+ int len, bool wait_resp);
+u32 mt7615_rf_rr(struct mt7615_dev *dev, u32 wf, u32 reg);
+int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val);
+int mt7615_mcu_set_dbdc(struct mt7615_dev *dev);
+int mt7615_mcu_set_eeprom(struct mt7615_dev *dev);
+int mt7615_mcu_set_mac_enable(struct mt7615_dev *dev, int band, bool enable);
+int mt7615_mcu_set_rts_thresh(struct mt7615_phy *phy, u32 val);
+int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index);
+int mt7615_mcu_set_tx_power(struct mt7615_phy *phy);
+void mt7615_mcu_exit(struct mt7615_dev *dev);
+void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
+ int cmd, int *wait_seq);
+int mt7615_mcu_set_channel_domain(struct mt7615_phy *phy);
+int mt7615_mcu_hw_scan(struct mt7615_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *scan_req);
+int mt7615_mcu_cancel_hw_scan(struct mt7615_phy *phy,
+ struct ieee80211_vif *vif);
+int mt7615_mcu_sched_scan_req(struct mt7615_phy *phy,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *sreq);
+int mt7615_mcu_sched_scan_enable(struct mt7615_phy *phy,
+ struct ieee80211_vif *vif,
+ bool enable);
+
+int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info);
+
+void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
+void mt7615_tx_token_put(struct mt7615_dev *dev);
+void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb);
+void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
+int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt7615_mac_work(struct work_struct *work);
+void mt7615_txp_skb_unmap(struct mt76_dev *dev,
+ struct mt76_txwi_cache *txwi);
+int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val);
+int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev,
+ const struct mt7615_dfs_pulse *pulse);
+int mt7615_mcu_set_radar_th(struct mt7615_dev *dev, int index,
+ const struct mt7615_dfs_pattern *pattern);
+int mt7615_mcu_set_test_param(struct mt7615_dev *dev, u8 param, bool test_mode,
+ u32 val);
+int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable);
+int mt7615_mcu_apply_rx_dcoc(struct mt7615_phy *phy);
+int mt7615_mcu_apply_tx_dpd(struct mt7615_phy *phy);
+int mt7615_mcu_set_vif_ps(struct mt7615_dev *dev, struct ieee80211_vif *vif);
+int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy);
+
+int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan, int duration);
+
+int mt7615_init_debugfs(struct mt7615_dev *dev);
+int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq);
+
+int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ bool enable);
+int mt7615_mcu_set_hif_suspend(struct mt7615_dev *dev, bool suspend);
+void mt7615_mcu_set_suspend_iter(void *priv, u8 *mac,
+ struct ieee80211_vif *vif);
+int mt7615_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *key);
+int mt7615_mcu_update_arp_filter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info);
+int __mt7663_load_firmware(struct mt7615_dev *dev);
+u32 mt7615_mcu_reg_rr(struct mt76_dev *dev, u32 offset);
+void mt7615_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val);
+
+/* usb */
+int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info);
+bool mt7663_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update);
+void mt7663_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
+ struct mt76_queue_entry *e);
+void mt7663_usb_sdio_wtbl_work(struct work_struct *work);
+int mt7663_usb_sdio_register_device(struct mt7615_dev *dev);
+int mt7663u_mcu_init(struct mt7615_dev *dev);
+
+/* sdio */
+u32 mt7663s_read_pcr(struct mt7615_dev *dev);
+int mt7663s_mcu_init(struct mt7615_dev *dev);
+void mt7663s_tx_work(struct work_struct *work);
+void mt7663s_rx_work(struct work_struct *work);
+void mt7663s_sdio_irq(struct sdio_func *func);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h
new file mode 100644
index 000000000..d3eb49d83
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2019 Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#if !defined(__MT7615_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT7615_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt7615.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt7615
+
+#define MAXNAME 32
+#define DEV_ENTRY __array(char, wiphy_name, 32)
+#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \
+ wiphy_name(mt76_hw(dev)->wiphy), MAXNAME)
+#define DEV_PR_FMT "%s"
+#define DEV_PR_ARG __entry->wiphy_name
+
+#define TOKEN_ENTRY __field(u16, token)
+#define TOKEN_ASSIGN __entry->token = token
+#define TOKEN_PR_FMT " %d"
+#define TOKEN_PR_ARG __entry->token
+
+DECLARE_EVENT_CLASS(dev_token,
+ TP_PROTO(struct mt7615_dev *dev, u16 token),
+ TP_ARGS(dev, token),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ TOKEN_ENTRY
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ TOKEN_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT TOKEN_PR_FMT,
+ DEV_PR_ARG, TOKEN_PR_ARG
+ )
+);
+
+DEFINE_EVENT(dev_token, mac_tx_free,
+ TP_PROTO(struct mt7615_dev *dev, u16 token),
+ TP_ARGS(dev, token)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mt7615_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
new file mode 100644
index 000000000..dbd29d897
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ * Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mt7615.h"
+
+static const struct pci_device_id mt7615_pci_device_table[] = {
+ { PCI_DEVICE(0x14c3, 0x7615) },
+ { PCI_DEVICE(0x14c3, 0x7663) },
+ { PCI_DEVICE(0x14c3, 0x7611) },
+ { },
+};
+
+static int mt7615_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ const u32 *map;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ return ret;
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ goto error;
+
+ mt76_pci_disable_aspm(pdev);
+
+ map = id->device == 0x7663 ? mt7663e_reg_map : mt7615e_reg_map;
+ ret = mt7615_mmio_probe(&pdev->dev, pcim_iomap_table(pdev)[0],
+ pdev->irq, map);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ pci_free_irq_vectors(pdev);
+
+ return ret;
+}
+
+static void mt7615_pci_remove(struct pci_dev *pdev)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+
+ mt7615_unregister_device(dev);
+ devm_free_irq(&pdev->dev, pdev->irq, dev);
+ pci_free_irq_vectors(pdev);
+}
+
+#ifdef CONFIG_PM
+static int mt7615_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ bool hif_suspend;
+ int i, err;
+
+ err = mt7615_pm_wake(dev);
+ if (err < 0)
+ return err;
+
+ hif_suspend = !test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) &&
+ mt7615_firmware_offload(dev);
+ if (hif_suspend) {
+ err = mt7615_mcu_set_hif_suspend(dev, true);
+ if (err)
+ return err;
+ }
+
+ napi_disable(&mdev->tx_napi);
+ mt76_worker_disable(&mdev->tx_worker);
+
+ mt76_for_each_q_rx(mdev, i) {
+ napi_disable(&mdev->napi[i]);
+ }
+ tasklet_kill(&dev->irq_tasklet);
+
+ mt7615_dma_reset(dev);
+
+ err = mt7615_wait_pdma_busy(dev);
+ if (err)
+ goto restore;
+
+ if (is_mt7663(mdev)) {
+ mt76_set(dev, MT_PDMA_SLP_PROT, MT_PDMA_AXI_SLPPROT_ENABLE);
+ if (!mt76_poll_msec(dev, MT_PDMA_SLP_PROT,
+ MT_PDMA_AXI_SLPPROT_RDY,
+ MT_PDMA_AXI_SLPPROT_RDY, 1000)) {
+ dev_err(mdev->dev, "PDMA sleep protection failed\n");
+ err = -EIO;
+ goto restore;
+ }
+ }
+
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), true);
+ pci_save_state(pdev);
+ err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ if (err)
+ goto restore;
+
+ err = mt7615_mcu_set_fw_ctrl(dev);
+ if (err)
+ goto restore;
+
+ return 0;
+
+restore:
+ mt76_for_each_q_rx(mdev, i) {
+ napi_enable(&mdev->napi[i]);
+ }
+ napi_enable(&mdev->tx_napi);
+ if (hif_suspend)
+ mt7615_mcu_set_hif_suspend(dev, false);
+
+ return err;
+}
+
+static int mt7615_pci_resume(struct pci_dev *pdev)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ bool pdma_reset;
+ int i, err;
+
+ err = mt7615_mcu_set_drv_ctrl(dev);
+ if (err < 0)
+ return err;
+
+ err = pci_set_power_state(pdev, PCI_D0);
+ if (err)
+ return err;
+
+ pci_restore_state(pdev);
+
+ if (is_mt7663(&dev->mt76)) {
+ mt76_clear(dev, MT_PDMA_SLP_PROT, MT_PDMA_AXI_SLPPROT_ENABLE);
+ mt76_wr(dev, MT_PCIE_IRQ_ENABLE, 1);
+ }
+
+ pdma_reset = !mt76_rr(dev, MT_WPDMA_TX_RING0_CTRL0) &&
+ !mt76_rr(dev, MT_WPDMA_TX_RING0_CTRL1);
+ if (pdma_reset)
+ dev_err(mdev->dev, "PDMA engine must be reinitialized\n");
+
+ mt76_worker_enable(&mdev->tx_worker);
+ mt76_for_each_q_rx(mdev, i) {
+ napi_enable(&mdev->napi[i]);
+ napi_schedule(&mdev->napi[i]);
+ }
+ napi_enable(&mdev->tx_napi);
+ napi_schedule(&mdev->tx_napi);
+
+ if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) &&
+ mt7615_firmware_offload(dev))
+ err = mt7615_mcu_set_hif_suspend(dev, false);
+
+ return err;
+}
+#endif /* CONFIG_PM */
+
+struct pci_driver mt7615_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt7615_pci_device_table,
+ .probe = mt7615_pci_probe,
+ .remove = mt7615_pci_remove,
+#ifdef CONFIG_PM
+ .suspend = mt7615_pci_suspend,
+ .resume = mt7615_pci_resume,
+#endif /* CONFIG_PM */
+};
+
+MODULE_DEVICE_TABLE(pci, mt7615_pci_device_table);
+MODULE_FIRMWARE(MT7615_FIRMWARE_CR4);
+MODULE_FIRMWARE(MT7615_FIRMWARE_N9);
+MODULE_FIRMWARE(MT7615_ROM_PATCH);
+MODULE_FIRMWARE(MT7663_OFFLOAD_FIRMWARE_N9);
+MODULE_FIRMWARE(MT7663_OFFLOAD_ROM_PATCH);
+MODULE_FIRMWARE(MT7663_FIRMWARE_N9);
+MODULE_FIRMWARE(MT7663_ROM_PATCH);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
new file mode 100644
index 000000000..726e4781d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Roy Luo <royluo@google.com>
+ * Ryder Lee <ryder.lee@mediatek.com>
+ * Felix Fietkau <nbd@nbd.name>
+ * Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/etherdevice.h>
+#include "mt7615.h"
+#include "mac.h"
+#include "eeprom.h"
+
+static void mt7615_init_work(struct work_struct *work)
+{
+ struct mt7615_dev *dev = container_of(work, struct mt7615_dev,
+ mcu_work);
+
+ if (mt7615_mcu_init(dev))
+ return;
+
+ mt7615_mcu_set_eeprom(dev);
+ mt7615_mac_init(dev);
+ mt7615_phy_init(dev);
+ mt7615_mcu_del_wtbl_all(dev);
+ mt7615_check_offload_capability(dev);
+
+ if (dev->dbdc_support)
+ mt7615_register_ext_phy(dev);
+}
+
+static int mt7615_init_hardware(struct mt7615_dev *dev)
+{
+ u32 addr = mt7615_reg_map(dev, MT_EFUSE_BASE);
+ int ret, idx;
+
+ mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
+
+ INIT_WORK(&dev->mcu_work, mt7615_init_work);
+ spin_lock_init(&dev->token_lock);
+ idr_init(&dev->token);
+
+ ret = mt7615_eeprom_init(dev, addr);
+ if (ret < 0)
+ return ret;
+
+ ret = mt7615_dma_init(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+
+ /* Beacon and mgmt frames should occupy wcid 0 */
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
+ if (idx)
+ return -ENOSPC;
+
+ dev->mt76.global_wcid.idx = idx;
+ dev->mt76.global_wcid.hw_key_idx = -1;
+ rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid);
+
+ return 0;
+}
+
+static void
+mt7615_led_set_config(struct led_classdev *led_cdev,
+ u8 delay_on, u8 delay_off)
+{
+ struct mt7615_dev *dev;
+ struct mt76_dev *mt76;
+ u32 val, addr;
+
+ mt76 = container_of(led_cdev, struct mt76_dev, led_cdev);
+ dev = container_of(mt76, struct mt7615_dev, mt76);
+
+ if (test_bit(MT76_STATE_PM, &mt76->phy.state))
+ return;
+
+ val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) |
+ FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
+ FIELD_PREP(MT_LED_STATUS_ON, delay_on);
+
+ addr = mt7615_reg_map(dev, MT_LED_STATUS_0(mt76->led_pin));
+ mt76_wr(dev, addr, val);
+ addr = mt7615_reg_map(dev, MT_LED_STATUS_1(mt76->led_pin));
+ mt76_wr(dev, addr, val);
+
+ val = MT_LED_CTRL_REPLAY(mt76->led_pin) |
+ MT_LED_CTRL_KICK(mt76->led_pin);
+ if (mt76->led_al)
+ val |= MT_LED_CTRL_POLARITY(mt76->led_pin);
+ addr = mt7615_reg_map(dev, MT_LED_CTRL);
+ mt76_wr(dev, addr, val);
+}
+
+static int
+mt7615_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ u8 delta_on, delta_off;
+
+ delta_off = max_t(u8, *delay_off / 10, 1);
+ delta_on = max_t(u8, *delay_on / 10, 1);
+
+ mt7615_led_set_config(led_cdev, delta_on, delta_off);
+
+ return 0;
+}
+
+static void
+mt7615_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ if (!brightness)
+ mt7615_led_set_config(led_cdev, 0, 0xff);
+ else
+ mt7615_led_set_config(led_cdev, 0xff, 0);
+}
+
+int mt7615_register_device(struct mt7615_dev *dev)
+{
+ int ret;
+
+ mt7615_init_device(dev);
+
+ /* init led callbacks */
+ if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+ dev->mt76.led_cdev.brightness_set = mt7615_led_set_brightness;
+ dev->mt76.led_cdev.blink_set = mt7615_led_set_blink;
+ }
+
+ ret = mt7622_wmac_init(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7615_init_hardware(dev);
+ if (ret)
+ return ret;
+
+ ret = mt76_register_device(&dev->mt76, true, mt7615_rates,
+ ARRAY_SIZE(mt7615_rates));
+ if (ret)
+ return ret;
+
+ ieee80211_queue_work(mt76_hw(dev), &dev->mcu_work);
+ mt7615_init_txpower(dev, &dev->mphy.sband_2g.sband);
+ mt7615_init_txpower(dev, &dev->mphy.sband_5g.sband);
+
+ return mt7615_init_debugfs(dev);
+}
+
+void mt7615_unregister_device(struct mt7615_dev *dev)
+{
+ bool mcu_running;
+
+ mcu_running = mt7615_wait_for_mcu_init(dev);
+
+ mt7615_unregister_ext_phy(dev);
+ mt76_unregister_device(&dev->mt76);
+ if (mcu_running)
+ mt7615_mcu_exit(dev);
+
+ mt7615_tx_token_put(dev);
+ mt7615_dma_cleanup(dev);
+ tasklet_disable(&dev->irq_tasklet);
+
+ mt76_free_device(&dev->mt76);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
new file mode 100644
index 000000000..490d55651
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc.
+ *
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ * Roy Luo <royluo@google.com>
+ * Felix Fietkau <nbd@nbd.name>
+ * Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/timekeeping.h>
+
+#include "mt7615.h"
+#include "../dma.h"
+#include "mac.h"
+
+void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
+{
+ if (!e->txwi) {
+ dev_kfree_skb_any(e->skb);
+ return;
+ }
+
+ /* error path */
+ if (e->skb == DMA_DUMMY_DATA) {
+ struct mt76_txwi_cache *t;
+ struct mt7615_dev *dev;
+ struct mt7615_txp_common *txp;
+ u16 token;
+
+ dev = container_of(mdev, struct mt7615_dev, mt76);
+ txp = mt7615_txwi_to_txp(mdev, e->txwi);
+
+ if (is_mt7615(&dev->mt76))
+ token = le16_to_cpu(txp->fw.token);
+ else
+ token = le16_to_cpu(txp->hw.msdu_id[0]) &
+ ~MT_MSDU_ID_VALID;
+
+ spin_lock_bh(&dev->token_lock);
+ t = idr_remove(&dev->token, token);
+ spin_unlock_bh(&dev->token_lock);
+ e->skb = t ? t->skb : NULL;
+ }
+
+ if (e->skb)
+ mt76_tx_complete_skb(mdev, e->wcid, e->skb);
+}
+
+static void
+mt7615_write_hw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
+ void *txp_ptr, u32 id)
+{
+ struct mt7615_hw_txp *txp = txp_ptr;
+ struct mt7615_txp_ptr *ptr = &txp->ptr[0];
+ int i, nbuf = tx_info->nbuf - 1;
+ u32 last_mask;
+
+ tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
+ tx_info->nbuf = 1;
+
+ txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
+
+ if (is_mt7663(&dev->mt76))
+ last_mask = MT_TXD_LEN_LAST;
+ else
+ last_mask = MT_TXD_LEN_AMSDU_LAST |
+ MT_TXD_LEN_MSDU_LAST;
+
+ for (i = 0; i < nbuf; i++) {
+ u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK;
+ u32 addr = tx_info->buf[i + 1].addr;
+
+ if (i == nbuf - 1)
+ len |= last_mask;
+
+ if (i & 1) {
+ ptr->buf1 = cpu_to_le32(addr);
+ ptr->len1 = cpu_to_le16(len);
+ ptr++;
+ } else {
+ ptr->buf0 = cpu_to_le32(addr);
+ ptr->len0 = cpu_to_le16(len);
+ }
+ }
+}
+
+static void
+mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
+ void *txp_ptr, u32 id)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt7615_fw_txp *txp = txp_ptr;
+ int nbuf = tx_info->nbuf - 1;
+ int i;
+
+ for (i = 0; i < nbuf; i++) {
+ txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
+ txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
+ }
+ txp->nbuf = nbuf;
+
+ /* pass partial skb header to fw */
+ tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
+ tx_info->buf[1].len = MT_CT_PARSE_LEN;
+ tx_info->buf[1].skip_unmap = true;
+ tx_info->nbuf = MT_CT_DMA_BUF_NUM;
+
+ txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
+
+ if (!key)
+ txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
+
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
+
+ if (vif) {
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+
+ txp->bss_idx = mvif->idx;
+ }
+
+ txp->token = cpu_to_le16(id);
+ txp->rept_wds_wcid = 0xff;
+}
+
+int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ int pid, id;
+ u8 *txwi = (u8 *)txwi_ptr;
+ struct mt76_txwi_cache *t;
+ struct mt7615_sta *msta;
+ void *txp;
+
+ msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL;
+ if (!wcid)
+ wcid = &dev->mt76.global_wcid;
+
+ pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+
+ if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && msta) {
+ struct mt7615_phy *phy = &dev->phy;
+
+ if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && mdev->phy2)
+ phy = mdev->phy2->priv;
+
+ spin_lock_bh(&dev->mt76.lock);
+ mt7615_mac_set_rates(phy, msta, &info->control.rates[0],
+ msta->rates);
+ spin_unlock_bh(&dev->mt76.lock);
+ }
+
+ t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
+ t->skb = tx_info->skb;
+
+ spin_lock_bh(&dev->token_lock);
+ id = idr_alloc(&dev->token, t, 0, MT7615_TOKEN_SIZE, GFP_ATOMIC);
+ spin_unlock_bh(&dev->token_lock);
+ if (id < 0)
+ return id;
+
+ mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
+ pid, key, false);
+
+ txp = txwi + MT_TXD_SIZE;
+ memset(txp, 0, sizeof(struct mt7615_txp_common));
+ if (is_mt7615(&dev->mt76))
+ mt7615_write_fw_txp(dev, tx_info, txp, id);
+ else
+ mt7615_write_hw_txp(dev, tx_info, txp, id);
+
+ tx_info->skb = DMA_DUMMY_DATA;
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
new file mode 100644
index 000000000..61623f480
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
@@ -0,0 +1,582 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2019 MediaTek Inc. */
+
+#ifndef __MT7615_REGS_H
+#define __MT7615_REGS_H
+
+enum mt7615_reg_base {
+ MT_TOP_CFG_BASE,
+ MT_HW_BASE,
+ MT_DMA_SHDL_BASE,
+ MT_PCIE_REMAP_2,
+ MT_ARB_BASE,
+ MT_HIF_BASE,
+ MT_CSR_BASE,
+ MT_PLE_BASE,
+ MT_PSE_BASE,
+ MT_CFG_BASE,
+ MT_AGG_BASE,
+ MT_TMAC_BASE,
+ MT_RMAC_BASE,
+ MT_DMA_BASE,
+ MT_PF_BASE,
+ MT_WTBL_BASE_ON,
+ MT_WTBL_BASE_OFF,
+ MT_LPON_BASE,
+ MT_MIB_BASE,
+ MT_WTBL_BASE_ADDR,
+ MT_PCIE_REMAP_BASE2,
+ MT_TOP_MISC_BASE,
+ MT_EFUSE_ADDR_BASE,
+ MT_PP_BASE,
+ __MT_BASE_MAX,
+};
+
+#define MT_HW_INFO_BASE ((dev)->reg_map[MT_HW_BASE])
+#define MT_HW_INFO(ofs) (MT_HW_INFO_BASE + (ofs))
+#define MT_HW_REV MT_HW_INFO(0x000)
+#define MT_HW_CHIPID MT_HW_INFO(0x008)
+#define MT_TOP_STRAP_STA MT_HW_INFO(0x010)
+#define MT_TOP_3NSS BIT(24)
+
+#define MT_TOP_OFF_RSV 0x1128
+#define MT_TOP_OFF_RSV_FW_STATE GENMASK(18, 16)
+
+#define MT_TOP_MISC2 ((dev)->reg_map[MT_TOP_CFG_BASE] + 0x134)
+#define MT_TOP_MISC2_FW_STATE GENMASK(2, 0)
+
+#define MT7663_TOP_MISC2_FW_STATE GENMASK(3, 1)
+#define MT_TOP_MISC2_FW_PWR_ON BIT(1)
+
+#define MT_MCU_BASE 0x2000
+#define MT_MCU(ofs) (MT_MCU_BASE + (ofs))
+
+#define MT_MCU_PCIE_REMAP_1 MT_MCU(0x500)
+#define MT_MCU_PCIE_REMAP_1_OFFSET GENMASK(17, 0)
+#define MT_MCU_PCIE_REMAP_1_BASE GENMASK(31, 18)
+#define MT_PCIE_REMAP_BASE_1 0x40000
+
+#define MT_MCU_PCIE_REMAP_2 ((dev)->reg_map[MT_PCIE_REMAP_2])
+#define MT_MCU_PCIE_REMAP_2_OFFSET GENMASK(18, 0)
+#define MT_MCU_PCIE_REMAP_2_BASE GENMASK(31, 19)
+#define MT_PCIE_REMAP_BASE_2 ((dev)->reg_map[MT_PCIE_REMAP_BASE2])
+
+#define MT_HIF(ofs) ((dev)->reg_map[MT_HIF_BASE] + (ofs))
+#define MT_HIF_RST MT_HIF(0x100)
+#define MT_HIF_LOGIC_RST_N BIT(4)
+
+#define MT_PDMA_SLP_PROT MT_HIF(0x154)
+#define MT_PDMA_AXI_SLPPROT_ENABLE BIT(0)
+#define MT_PDMA_AXI_SLPPROT_RDY BIT(16)
+
+#define MT_PDMA_BUSY_STATUS MT_HIF(0x168)
+#define MT_PDMA_TX_IDX_BUSY BIT(2)
+#define MT_PDMA_BUSY_IDX BIT(31)
+
+#define MT_WPDMA_TX_RING0_CTRL0 MT_HIF(0x300)
+#define MT_WPDMA_TX_RING0_CTRL1 MT_HIF(0x304)
+
+#define MT7663_MCU_PCIE_REMAP_2_OFFSET GENMASK(15, 0)
+#define MT7663_MCU_PCIE_REMAP_2_BASE GENMASK(31, 16)
+
+#define MT_HIF2_BASE 0xf0000
+#define MT_HIF2(ofs) (MT_HIF2_BASE + (ofs))
+#define MT_PCIE_IRQ_ENABLE MT_HIF2(0x188)
+#define MT_PCIE_DOORBELL_PUSH MT_HIF2(0x1484)
+
+#define MT_CFG_LPCR_HOST MT_HIF(0x1f0)
+#define MT_CFG_LPCR_HOST_FW_OWN BIT(0)
+#define MT_CFG_LPCR_HOST_DRV_OWN BIT(1)
+
+#define MT_MCU_INT_EVENT MT_HIF(0x1f8)
+#define MT_MCU_INT_EVENT_PDMA_STOPPED BIT(0)
+#define MT_MCU_INT_EVENT_PDMA_INIT BIT(1)
+#define MT_MCU_INT_EVENT_SER_TRIGGER BIT(2)
+#define MT_MCU_INT_EVENT_RESET_DONE BIT(3)
+
+#define MT_INT_SOURCE_CSR MT_HIF(0x200)
+#define MT_INT_MASK_CSR MT_HIF(0x204)
+#define MT_DELAY_INT_CFG MT_HIF(0x210)
+
+#define MT_INT_RX_DONE(_n) BIT(_n)
+#define MT_INT_RX_DONE_ALL GENMASK(1, 0)
+#define MT_INT_TX_DONE_ALL GENMASK(19, 4)
+#define MT_INT_TX_DONE(_n) BIT((_n) + 4)
+#define MT_INT_MCU_CMD BIT(30)
+
+#define MT_WPDMA_GLO_CFG MT_HIF(0x208)
+#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0)
+#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
+#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2)
+#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
+#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4)
+#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6)
+#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
+#define MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT0 BIT(9)
+#define MT_WPDMA_GLO_CFG_BYPASS_TX_SCH BIT(9) /* MT7622 */
+#define MT_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
+#define MT_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
+#define MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT21 GENMASK(23, 22)
+#define MT_WPDMA_GLO_CFG_SW_RESET BIT(24)
+#define MT_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
+#define MT_WPDMA_GLO_CFG_OMIT_TX_INFO BIT(28)
+
+#define MT_WPDMA_RST_IDX MT_HIF(0x20c)
+
+#define MT_WPDMA_MEM_RNG_ERR MT_HIF(0x224)
+
+#define MT_MCU_CMD MT_HIF(0x234)
+#define MT_MCU_CMD_CLEAR_FW_OWN BIT(0)
+#define MT_MCU_CMD_STOP_PDMA_FW_RELOAD BIT(1)
+#define MT_MCU_CMD_STOP_PDMA BIT(2)
+#define MT_MCU_CMD_RESET_DONE BIT(3)
+#define MT_MCU_CMD_RECOVERY_DONE BIT(4)
+#define MT_MCU_CMD_NORMAL_STATE BIT(5)
+#define MT_MCU_CMD_LMAC_ERROR BIT(24)
+#define MT_MCU_CMD_PSE_ERROR BIT(25)
+#define MT_MCU_CMD_PLE_ERROR BIT(26)
+#define MT_MCU_CMD_PDMA_ERROR BIT(27)
+#define MT_MCU_CMD_PCIE_ERROR BIT(28)
+#define MT_MCU_CMD_ERROR_MASK (GENMASK(5, 1) | GENMASK(28, 24))
+
+#define MT_TX_RING_BASE MT_HIF(0x300)
+#define MT_RX_RING_BASE MT_HIF(0x400)
+
+#define MT_WPDMA_GLO_CFG1 MT_HIF(0x500)
+#define MT_WPDMA_TX_PRE_CFG MT_HIF(0x510)
+#define MT_WPDMA_RX_PRE_CFG MT_HIF(0x520)
+#define MT_WPDMA_ABT_CFG MT_HIF(0x530)
+#define MT_WPDMA_ABT_CFG1 MT_HIF(0x534)
+
+#define MT_CSR(ofs) ((dev)->reg_map[MT_CSR_BASE] + (ofs))
+#define MT_CONN_HIF_ON_LPCTL MT_CSR(0x000)
+
+#define MT_PLE(ofs) ((dev)->reg_map[MT_PLE_BASE] + (ofs))
+
+#define MT_PLE_PG_HIF0_GROUP MT_PLE(0x110)
+#define MT_HIF0_MIN_QUOTA GENMASK(11, 0)
+#define MT_PLE_FL_Q0_CTRL MT_PLE(0x1b0)
+#define MT_PLE_FL_Q1_CTRL MT_PLE(0x1b4)
+#define MT_PLE_FL_Q2_CTRL MT_PLE(0x1b8)
+#define MT_PLE_FL_Q3_CTRL MT_PLE(0x1bc)
+
+#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x300 + 0x10 * (ac) + \
+ ((n) << 2))
+
+#define MT_PSE(ofs) ((dev)->reg_map[MT_PSE_BASE] + (ofs))
+#define MT_PSE_PG_HIF0_GROUP MT_PSE(0x110)
+#define MT_HIF0_MIN_QUOTA GENMASK(11, 0)
+#define MT_PSE_PG_HIF1_GROUP MT_PSE(0x118)
+#define MT_HIF1_MIN_QUOTA GENMASK(11, 0)
+#define MT_PSE_QUEUE_EMPTY MT_PSE(0x0b4)
+#define MT_HIF_0_EMPTY_MASK BIT(16)
+#define MT_HIF_1_EMPTY_MASK BIT(17)
+#define MT_HIF_ALL_EMPTY_MASK GENMASK(17, 16)
+#define MT_PSE_PG_INFO MT_PSE(0x194)
+#define MT_PSE_SRC_CNT GENMASK(27, 16)
+
+#define MT_PP(ofs) ((dev)->reg_map[MT_PP_BASE] + (ofs))
+#define MT_PP_TXDWCNT MT_PP(0x0)
+#define MT_PP_TXDWCNT_TX0_ADD_DW_CNT GENMASK(7, 0)
+#define MT_PP_TXDWCNT_TX1_ADD_DW_CNT GENMASK(15, 8)
+
+#define MT_WF_PHY_BASE 0x82070000
+#define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs))
+
+#define MT_WF_PHY_WF2_RFCTRL0(n) MT_WF_PHY(0x1900 + (n) * 0x400)
+#define MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN BIT(9)
+
+#define MT_WF_PHY_R0_PHYMUX_5(_phy) MT_WF_PHY(0x0614 + ((_phy) << 9))
+#define MT7663_WF_PHY_R0_PHYMUX_5 MT_WF_PHY(0x0414)
+
+#define MT_WF_PHY_R0_PHYCTRL_STS0(_phy) MT_WF_PHY(0x020c + ((_phy) << 9))
+#define MT_WF_PHYCTRL_STAT_PD_OFDM GENMASK(31, 16)
+#define MT_WF_PHYCTRL_STAT_PD_CCK GENMASK(15, 0)
+
+#define MT7663_WF_PHY_R0_PHYCTRL_STS0(_phy) MT_WF_PHY(0x0210 + ((_phy) << 12))
+
+#define MT_WF_PHY_R0_PHYCTRL_STS5(_phy) MT_WF_PHY(0x0220 + ((_phy) << 9))
+#define MT_WF_PHYCTRL_STAT_MDRDY_OFDM GENMASK(31, 16)
+#define MT_WF_PHYCTRL_STAT_MDRDY_CCK GENMASK(15, 0)
+
+#define MT7663_WF_PHY_R0_PHYCTRL_STS5(_phy) MT_WF_PHY(0x0224 + ((_phy) << 12))
+
+#define MT_WF_PHY_MIN_PRI_PWR(_phy) MT_WF_PHY((_phy) ? 0x084 : 0x229c)
+#define MT_WF_PHY_PD_OFDM_MASK(_phy) ((_phy) ? GENMASK(24, 16) : \
+ GENMASK(28, 20))
+#define MT_WF_PHY_PD_OFDM(_phy, v) ((v) << ((_phy) ? 16 : 20))
+#define MT_WF_PHY_PD_BLK(_phy) ((_phy) ? BIT(25) : BIT(19))
+
+#define MT7663_WF_PHY_MIN_PRI_PWR(_phy) MT_WF_PHY((_phy) ? 0x2aec : 0x22f0)
+
+#define MT_WF_PHY_RXTD_BASE MT_WF_PHY(0x2200)
+#define MT_WF_PHY_RXTD(_n) (MT_WF_PHY_RXTD_BASE + ((_n) << 2))
+
+#define MT7663_WF_PHY_RXTD(_n) (MT_WF_PHY(0x25b0) + ((_n) << 2))
+
+#define MT_WF_PHY_RXTD_CCK_PD(_phy) MT_WF_PHY((_phy) ? 0x2314 : 0x2310)
+#define MT_WF_PHY_PD_CCK_MASK(_phy) (_phy) ? GENMASK(31, 24) : \
+ GENMASK(8, 1)
+#define MT_WF_PHY_PD_CCK(_phy, v) ((v) << ((_phy) ? 24 : 1))
+
+#define MT7663_WF_PHY_RXTD_CCK_PD(_phy) MT_WF_PHY((_phy) ? 0x2350 : 0x234c)
+
+#define MT_WF_PHY_RXTD2_BASE MT_WF_PHY(0x2a00)
+#define MT_WF_PHY_RXTD2(_n) (MT_WF_PHY_RXTD2_BASE + ((_n) << 2))
+
+#define MT_WF_PHY_RFINTF3_0(_n) MT_WF_PHY(0x1100 + (_n) * 0x400)
+#define MT_WF_PHY_RFINTF3_0_ANT GENMASK(7, 4)
+
+#define MT_WF_CFG_BASE ((dev)->reg_map[MT_CFG_BASE])
+#define MT_WF_CFG(ofs) (MT_WF_CFG_BASE + (ofs))
+
+#define MT_CFG_CCR MT_WF_CFG(0x000)
+#define MT_CFG_CCR_MAC_D1_1X_GC_EN BIT(24)
+#define MT_CFG_CCR_MAC_D0_1X_GC_EN BIT(25)
+#define MT_CFG_CCR_MAC_D1_2X_GC_EN BIT(30)
+#define MT_CFG_CCR_MAC_D0_2X_GC_EN BIT(31)
+
+#define MT_WF_AGG_BASE ((dev)->reg_map[MT_AGG_BASE])
+#define MT_WF_AGG(ofs) (MT_WF_AGG_BASE + (ofs))
+
+#define MT_AGG_ARCR MT_WF_AGG(0x010)
+#define MT_AGG_ARCR_INIT_RATE1 BIT(0)
+#define MT_AGG_ARCR_RTS_RATE_THR GENMASK(12, 8)
+#define MT_AGG_ARCR_RATE_DOWN_RATIO GENMASK(17, 16)
+#define MT_AGG_ARCR_RATE_DOWN_RATIO_EN BIT(19)
+#define MT_AGG_ARCR_RATE_UP_EXTRA_TH GENMASK(22, 20)
+
+#define MT_AGG_ARUCR(_band) MT_WF_AGG(0x018 + (_band) * 0x100)
+#define MT_AGG_ARDCR(_band) MT_WF_AGG(0x01c + (_band) * 0x100)
+#define MT_AGG_ARxCR_LIMIT_SHIFT(_n) (4 * (_n))
+#define MT_AGG_ARxCR_LIMIT(_n) GENMASK(2 + \
+ MT_AGG_ARxCR_LIMIT_SHIFT(_n), \
+ MT_AGG_ARxCR_LIMIT_SHIFT(_n))
+
+#define MT_AGG_ASRCR0 MT_WF_AGG(0x060)
+#define MT_AGG_ASRCR1 MT_WF_AGG(0x064)
+#define MT_AGG_ASRCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(5, 0))
+
+#define MT_AGG_ACR(_band) MT_WF_AGG(0x070 + (_band) * 0x100)
+#define MT_AGG_ACR_NO_BA_RULE BIT(0)
+#define MT_AGG_ACR_NO_BA_AR_RULE BIT(1)
+#define MT_AGG_ACR_PKT_TIME_EN BIT(2)
+#define MT_AGG_ACR_CFEND_RATE GENMASK(15, 4)
+#define MT_AGG_ACR_BAR_RATE GENMASK(31, 20)
+
+#define MT_AGG_SCR MT_WF_AGG(0x0fc)
+#define MT_AGG_SCR_NLNAV_MID_PTEC_DIS BIT(3)
+
+#define MT_WF_ARB_BASE ((dev)->reg_map[MT_ARB_BASE])
+#define MT_WF_ARB(ofs) (MT_WF_ARB_BASE + (ofs))
+
+#define MT_ARB_RQCR MT_WF_ARB(0x070)
+#define MT_ARB_RQCR_RX_START BIT(0)
+#define MT_ARB_RQCR_RXV_START BIT(4)
+#define MT_ARB_RQCR_RXV_R_EN BIT(7)
+#define MT_ARB_RQCR_RXV_T_EN BIT(8)
+#define MT_ARB_RQCR_BAND_SHIFT 16
+
+#define MT_ARB_SCR MT_WF_ARB(0x080)
+#define MT_ARB_SCR_TX0_DISABLE BIT(8)
+#define MT_ARB_SCR_RX0_DISABLE BIT(9)
+#define MT_ARB_SCR_TX1_DISABLE BIT(10)
+#define MT_ARB_SCR_RX1_DISABLE BIT(11)
+
+#define MT_WF_TMAC_BASE ((dev)->reg_map[MT_TMAC_BASE])
+#define MT_WF_TMAC(ofs) (MT_WF_TMAC_BASE + (ofs))
+
+#define MT_TMAC_CDTR MT_WF_TMAC(0x090)
+#define MT_TMAC_ODTR MT_WF_TMAC(0x094)
+#define MT_TIMEOUT_VAL_PLCP GENMASK(15, 0)
+#define MT_TIMEOUT_VAL_CCA GENMASK(31, 16)
+
+#define MT_TMAC_TRCR(_band) MT_WF_TMAC((_band) ? 0x070 : 0x09c)
+#define MT_TMAC_TRCR_CCA_SEL GENMASK(31, 30)
+#define MT_TMAC_TRCR_SEC_CCA_SEL GENMASK(29, 28)
+
+#define MT_TMAC_ICR(_band) MT_WF_TMAC((_band) ? 0x074 : 0x0a4)
+#define MT_IFS_EIFS GENMASK(8, 0)
+#define MT_IFS_RIFS GENMASK(14, 10)
+#define MT_IFS_SIFS GENMASK(22, 16)
+#define MT_IFS_SLOT GENMASK(30, 24)
+
+#define MT_TMAC_CTCR0 MT_WF_TMAC(0x0f4)
+#define MT_TMAC_CTCR0_INS_DDLMT_REFTIME GENMASK(5, 0)
+#define MT_TMAC_CTCR0_INS_DDLMT_DENSITY GENMASK(15, 12)
+#define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17)
+#define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN BIT(18)
+
+#define MT_WF_RMAC_BASE ((dev)->reg_map[MT_RMAC_BASE])
+#define MT_WF_RMAC(ofs) (MT_WF_RMAC_BASE + (ofs))
+
+#define MT_WF_RFCR(_band) MT_WF_RMAC((_band) ? 0x100 : 0x000)
+#define MT_WF_RFCR_DROP_STBC_MULTI BIT(0)
+#define MT_WF_RFCR_DROP_FCSFAIL BIT(1)
+#define MT_WF_RFCR_DROP_VERSION BIT(3)
+#define MT_WF_RFCR_DROP_PROBEREQ BIT(4)
+#define MT_WF_RFCR_DROP_MCAST BIT(5)
+#define MT_WF_RFCR_DROP_BCAST BIT(6)
+#define MT_WF_RFCR_DROP_MCAST_FILTERED BIT(7)
+#define MT_WF_RFCR_DROP_A3_MAC BIT(8)
+#define MT_WF_RFCR_DROP_A3_BSSID BIT(9)
+#define MT_WF_RFCR_DROP_A2_BSSID BIT(10)
+#define MT_WF_RFCR_DROP_OTHER_BEACON BIT(11)
+#define MT_WF_RFCR_DROP_FRAME_REPORT BIT(12)
+#define MT_WF_RFCR_DROP_CTL_RSV BIT(13)
+#define MT_WF_RFCR_DROP_CTS BIT(14)
+#define MT_WF_RFCR_DROP_RTS BIT(15)
+#define MT_WF_RFCR_DROP_DUPLICATE BIT(16)
+#define MT_WF_RFCR_DROP_OTHER_BSS BIT(17)
+#define MT_WF_RFCR_DROP_OTHER_UC BIT(18)
+#define MT_WF_RFCR_DROP_OTHER_TIM BIT(19)
+#define MT_WF_RFCR_DROP_NDPA BIT(20)
+#define MT_WF_RFCR_DROP_UNWANTED_CTL BIT(21)
+
+#define MT_WF_RFCR1(_band) MT_WF_RMAC((_band) ? 0x104 : 0x004)
+#define MT_WF_RFCR1_DROP_ACK BIT(4)
+#define MT_WF_RFCR1_DROP_BF_POLL BIT(5)
+#define MT_WF_RFCR1_DROP_BA BIT(6)
+#define MT_WF_RFCR1_DROP_CFEND BIT(7)
+#define MT_WF_RFCR1_DROP_CFACK BIT(8)
+
+#define MT_CHFREQ(_band) MT_WF_RMAC((_band) ? 0x130 : 0x030)
+
+#define MT_WF_RMAC_MIB_TIME0 MT_WF_RMAC(0x03c4)
+#define MT_WF_RMAC_MIB_RXTIME_CLR BIT(31)
+#define MT_WF_RMAC_MIB_RXTIME_EN BIT(30)
+
+#define MT_WF_RMAC_MIB_AIRTIME0 MT_WF_RMAC(0x0380)
+
+#define MT_WF_RMAC_MIB_TIME5 MT_WF_RMAC(0x03d8)
+#define MT_WF_RMAC_MIB_TIME6 MT_WF_RMAC(0x03dc)
+#define MT_MIB_OBSSTIME_MASK GENMASK(23, 0)
+
+#define MT_WF_DMA_BASE ((dev)->reg_map[MT_DMA_BASE])
+#define MT_WF_DMA(ofs) (MT_WF_DMA_BASE + (ofs))
+
+#define MT_DMA_DCR0 MT_WF_DMA(0x000)
+#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 2)
+#define MT_DMA_DCR0_RX_VEC_DROP BIT(17)
+
+#define MT_DMA_RCFR0(_band) MT_WF_DMA(0x070 + (_band) * 0x40)
+#define MT_DMA_RCFR0_MCU_RX_MGMT BIT(2)
+#define MT_DMA_RCFR0_MCU_RX_CTL_NON_BAR BIT(3)
+#define MT_DMA_RCFR0_MCU_RX_CTL_BAR BIT(4)
+#define MT_DMA_RCFR0_MCU_RX_TDLS BIT(19)
+#define MT_DMA_RCFR0_MCU_RX_BYPASS BIT(21)
+#define MT_DMA_RCFR0_RX_DROPPED_UCAST GENMASK(25, 24)
+#define MT_DMA_RCFR0_RX_DROPPED_MCAST GENMASK(27, 26)
+
+#define MT_WF_PF_BASE ((dev)->reg_map[MT_PF_BASE])
+#define MT_WF_PF(ofs) (MT_WF_PF_BASE + (ofs))
+
+#define MT_WF_PFCR MT_WF_PF(0x000)
+#define MT_WF_PFCR_TDLS_EN BIT(9)
+
+#define MT_WTBL_BASE(dev) ((dev)->reg_map[MT_WTBL_BASE_ADDR])
+#define MT_WTBL_ENTRY_SIZE 256
+
+#define MT_WTBL_OFF_BASE ((dev)->reg_map[MT_WTBL_BASE_OFF])
+#define MT_WTBL_OFF(n) (MT_WTBL_OFF_BASE + (n))
+
+#define MT_WTBL_W0_KEY_IDX GENMASK(24, 23)
+#define MT_WTBL_W0_RX_KEY_VALID BIT(26)
+#define MT_WTBL_W0_RX_IK_VALID BIT(27)
+
+#define MT_WTBL_W2_KEY_TYPE GENMASK(7, 4)
+
+#define MT_WTBL_UPDATE MT_WTBL_OFF(0x030)
+#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(7, 0)
+#define MT_WTBL_UPDATE_RXINFO_UPDATE BIT(11)
+#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12)
+#define MT_WTBL_UPDATE_RATE_UPDATE BIT(13)
+#define MT_WTBL_UPDATE_TX_COUNT_CLEAR BIT(14)
+#define MT_WTBL_UPDATE_BUSY BIT(31)
+
+#define MT_TOP_MISC(ofs) ((dev)->reg_map[MT_TOP_MISC_BASE] + (ofs))
+#define MT_CONN_ON_MISC MT_TOP_MISC(0x1140)
+#define MT_TOP_MISC2_FW_N9_RDY BIT(2)
+
+#define MT_WTBL_ON_BASE ((dev)->reg_map[MT_WTBL_BASE_ON])
+#define MT_WTBL_ON(_n) (MT_WTBL_ON_BASE + (_n))
+
+#define MT_WTBL_RICR0 MT_WTBL_ON(0x010)
+#define MT_WTBL_RICR1 MT_WTBL_ON(0x014)
+
+#define MT_WTBL_RIUCR0 MT_WTBL_ON(0x020)
+
+#define MT_WTBL_RIUCR1 MT_WTBL_ON(0x024)
+#define MT_WTBL_RIUCR1_RATE0 GENMASK(11, 0)
+#define MT_WTBL_RIUCR1_RATE1 GENMASK(23, 12)
+#define MT_WTBL_RIUCR1_RATE2_LO GENMASK(31, 24)
+
+#define MT_WTBL_RIUCR2 MT_WTBL_ON(0x028)
+#define MT_WTBL_RIUCR2_RATE2_HI GENMASK(3, 0)
+#define MT_WTBL_RIUCR2_RATE3 GENMASK(15, 4)
+#define MT_WTBL_RIUCR2_RATE4 GENMASK(27, 16)
+#define MT_WTBL_RIUCR2_RATE5_LO GENMASK(31, 28)
+
+#define MT_WTBL_RIUCR3 MT_WTBL_ON(0x02c)
+#define MT_WTBL_RIUCR3_RATE5_HI GENMASK(7, 0)
+#define MT_WTBL_RIUCR3_RATE6 GENMASK(19, 8)
+#define MT_WTBL_RIUCR3_RATE7 GENMASK(31, 20)
+
+#define MT_WTBL_W5_CHANGE_BW_RATE GENMASK(7, 5)
+#define MT_WTBL_W5_SHORT_GI_20 BIT(8)
+#define MT_WTBL_W5_SHORT_GI_40 BIT(9)
+#define MT_WTBL_W5_SHORT_GI_80 BIT(10)
+#define MT_WTBL_W5_SHORT_GI_160 BIT(11)
+#define MT_WTBL_W5_BW_CAP GENMASK(13, 12)
+#define MT_WTBL_W5_MPDU_FAIL_COUNT GENMASK(25, 23)
+#define MT_WTBL_W5_MPDU_OK_COUNT GENMASK(28, 26)
+#define MT_WTBL_W5_RATE_IDX GENMASK(31, 29)
+
+#define MT_WTBL_W27_CC_BW_SEL GENMASK(6, 5)
+
+#define MT_LPON(_n) ((dev)->reg_map[MT_LPON_BASE] + (_n))
+
+#define MT_LPON_T0CR MT_LPON(0x010)
+#define MT_LPON_T0CR_MODE GENMASK(1, 0)
+#define MT_LPON_T0CR_WRITE BIT(0)
+
+#define MT_LPON_UTTR0 MT_LPON(0x018)
+#define MT_LPON_UTTR1 MT_LPON(0x01c)
+
+#define MT_WF_MIB_BASE (dev->reg_map[MT_MIB_BASE])
+#define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE + (ofs) + (_band) * 0x200)
+
+#define MT_WF_MIB_SCR0 MT_WF_MIB(0, 0)
+#define MT_MIB_SCR0_AGG_CNT_RANGE_EN BIT(21)
+
+#define MT_MIB_M0_MISC_CR(_band) MT_WF_MIB(_band, 0x00c)
+
+#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, 0x014)
+#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(15, 0)
+
+#define MT_MIB_SDR9(_band) MT_WF_MIB(_band, 0x02c)
+#define MT_MIB_SDR9_BUSY_MASK GENMASK(23, 0)
+
+#define MT_MIB_SDR14(_band) MT_WF_MIB(_band, 0x040)
+#define MT_MIB_AMPDU_MPDU_COUNT GENMASK(23, 0)
+
+#define MT_MIB_SDR15(_band) MT_WF_MIB(_band, 0x044)
+#define MT_MIB_AMPDU_ACK_COUNT GENMASK(23, 0)
+
+#define MT_MIB_SDR16(_band) MT_WF_MIB(_band, 0x048)
+#define MT_MIB_SDR16_BUSY_MASK GENMASK(23, 0)
+
+#define MT_MIB_SDR36(_band) MT_WF_MIB(_band, 0x098)
+#define MT_MIB_SDR36_TXTIME_MASK GENMASK(23, 0)
+#define MT_MIB_SDR37(_band) MT_WF_MIB(_band, 0x09c)
+#define MT_MIB_SDR37_RXTIME_MASK GENMASK(23, 0)
+
+#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(_band, 0x100 + ((n) << 4))
+#define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16)
+#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0)
+
+#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(_band, 0x104 + ((n) << 4))
+#define MT_MIB_BA_MISS_COUNT_MASK GENMASK(15, 0)
+#define MT_MIB_ACK_FAIL_COUNT_MASK GENMASK(31, 16)
+
+#define MT_MIB_ARNG(n) MT_WF_MIB(0, 0x4b8 + ((n) << 2))
+
+#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, 0xa8 + ((n) << 2))
+
+#define MT_DMA_SHDL(ofs) (dev->reg_map[MT_DMA_SHDL_BASE] + (ofs))
+
+#define MT_DMASHDL_BASE 0x5000a000
+#define MT_DMASHDL_OPTIONAL 0x008
+#define MT_DMASHDL_PAGE 0x00c
+
+#define MT_DMASHDL_REFILL 0x010
+
+#define MT_DMASHDL_PKT_MAX_SIZE 0x01c
+#define MT_DMASHDL_PKT_MAX_SIZE_PLE GENMASK(11, 0)
+#define MT_DMASHDL_PKT_MAX_SIZE_PSE GENMASK(27, 16)
+
+#define MT_DMASHDL_GROUP_QUOTA(_n) (0x020 + ((_n) << 2))
+#define MT_DMASHDL_GROUP_QUOTA_MIN GENMASK(11, 0)
+#define MT_DMASHDL_GROUP_QUOTA_MAX GENMASK(27, 16)
+
+#define MT_DMASHDL_SCHED_SET0 0x0b0
+#define MT_DMASHDL_SCHED_SET1 0x0b4
+
+#define MT_DMASHDL_Q_MAP(_n) (0x0d0 + ((_n) << 2))
+#define MT_DMASHDL_Q_MAP_MASK GENMASK(3, 0)
+#define MT_DMASHDL_Q_MAP_SHIFT(_n) (4 * ((_n) % 8))
+
+#define MT_LED_BASE_PHYS 0x80024000
+#define MT_LED_PHYS(_n) (MT_LED_BASE_PHYS + (_n))
+
+#define MT_LED_CTRL MT_LED_PHYS(0x00)
+
+#define MT_LED_CTRL_REPLAY(_n) BIT(0 + (8 * (_n)))
+#define MT_LED_CTRL_POLARITY(_n) BIT(1 + (8 * (_n)))
+#define MT_LED_CTRL_TX_BLINK_MODE(_n) BIT(2 + (8 * (_n)))
+#define MT_LED_CTRL_TX_MANUAL_BLINK(_n) BIT(3 + (8 * (_n)))
+#define MT_LED_CTRL_TX_OVER_BLINK(_n) BIT(5 + (8 * (_n)))
+#define MT_LED_CTRL_KICK(_n) BIT(7 + (8 * (_n)))
+
+#define MT_LED_STATUS_0(_n) MT_LED_PHYS(0x10 + ((_n) * 8))
+#define MT_LED_STATUS_1(_n) MT_LED_PHYS(0x14 + ((_n) * 8))
+#define MT_LED_STATUS_OFF GENMASK(31, 24)
+#define MT_LED_STATUS_ON GENMASK(23, 16)
+#define MT_LED_STATUS_DURATION GENMASK(15, 0)
+
+#define MT_PDMA_BUSY 0x82000504
+#define MT_PDMA_TX_BUSY BIT(0)
+#define MT_PDMA_RX_BUSY BIT(1)
+
+#define MT_EFUSE_BASE ((dev)->reg_map[MT_EFUSE_ADDR_BASE])
+#define MT_EFUSE_BASE_CTRL 0x000
+#define MT_EFUSE_BASE_CTRL_EMPTY BIT(30)
+
+#define MT_EFUSE_CTRL 0x008
+#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0)
+#define MT_EFUSE_CTRL_MODE GENMASK(7, 6)
+#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8)
+#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14)
+#define MT_EFUSE_CTRL_AIN GENMASK(25, 16)
+#define MT_EFUSE_CTRL_VALID BIT(29)
+#define MT_EFUSE_CTRL_KICK BIT(30)
+#define MT_EFUSE_CTRL_SEL BIT(31)
+
+#define MT_EFUSE_WDATA(_i) (0x010 + ((_i) * 4))
+#define MT_EFUSE_RDATA(_i) (0x030 + ((_i) * 4))
+
+/* INFRACFG host register range on MT7622 */
+#define MT_INFRACFG_MISC 0x700
+#define MT_INFRACFG_MISC_AP2CONN_WAKE BIT(1)
+
+#define MT_UMAC_BASE 0x7c000000
+#define MT_UMAC(ofs) (MT_UMAC_BASE + (ofs))
+#define MT_UDMA_TX_QSEL MT_UMAC(0x008)
+#define MT_FW_DL_EN BIT(3)
+
+#define MT_UDMA_WLCFG_1 MT_UMAC(0x00c)
+#define MT_WL_RX_AGG_PKT_LMT GENMASK(7, 0)
+#define MT_WL_TX_TMOUT_LMT GENMASK(27, 8)
+
+#define MT_UDMA_WLCFG_0 MT_UMAC(0x18)
+#define MT_WL_RX_AGG_TO GENMASK(7, 0)
+#define MT_WL_RX_AGG_LMT GENMASK(15, 8)
+#define MT_WL_TX_TMOUT_FUNC_EN BIT(16)
+#define MT_WL_TX_DPH_CHK_EN BIT(17)
+#define MT_WL_RX_MPSZ_PAD0 BIT(18)
+#define MT_WL_RX_FLUSH BIT(19)
+#define MT_TICK_1US_EN BIT(20)
+#define MT_WL_RX_AGG_EN BIT(21)
+#define MT_WL_RX_EN BIT(22)
+#define MT_WL_TX_EN BIT(23)
+#define MT_WL_RX_BUSY BIT(30)
+#define MT_WL_TX_BUSY BIT(31)
+
+#define MT_MCU_PTA_BASE 0x81060000
+#define MT_MCU_PTA(_n) (MT_MCU_PTA_BASE + (_n))
+
+#define MT_ANT_SWITCH_CON(_n) MT_MCU_PTA(0x0c8 + ((_n) - 1) * 4)
+#define MT_ANT_SWITCH_CON_MODE(_n) (GENMASK(4, 0) << (_n * 8))
+#define MT_ANT_SWITCH_CON_MODE1(_n) (GENMASK(3, 0) << (_n * 8))
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
new file mode 100644
index 000000000..874c929d8
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
@@ -0,0 +1,496 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc.
+ *
+ * Author: Felix Fietkau <nbd@nbd.name>
+ * Lorenzo Bianconi <lorenzo@kernel.org>
+ * Sean Wang <sean.wang@mediatek.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio_func.h>
+
+#include "mt7615.h"
+#include "sdio.h"
+#include "mac.h"
+
+static const struct sdio_device_id mt7663s_table[] = {
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7603) },
+ { } /* Terminating entry */
+};
+
+static u32 mt7663s_read_whisr(struct mt76_dev *dev)
+{
+ return sdio_readl(dev->sdio.func, MCR_WHISR, NULL);
+}
+
+u32 mt7663s_read_pcr(struct mt7615_dev *dev)
+{
+ struct mt76_sdio *sdio = &dev->mt76.sdio;
+
+ return sdio_readl(sdio->func, MCR_WHLPCR, NULL);
+}
+
+static u32 mt7663s_read_mailbox(struct mt76_dev *dev, u32 offset)
+{
+ struct sdio_func *func = dev->sdio.func;
+ u32 val = ~0, status;
+ int err;
+
+ sdio_claim_host(func);
+
+ sdio_writel(func, offset, MCR_H2DSM0R, &err);
+ if (err < 0) {
+ dev_err(dev->dev, "failed setting address [err=%d]\n", err);
+ goto out;
+ }
+
+ sdio_writel(func, H2D_SW_INT_READ, MCR_WSICR, &err);
+ if (err < 0) {
+ dev_err(dev->dev, "failed setting read mode [err=%d]\n", err);
+ goto out;
+ }
+
+ err = readx_poll_timeout(mt7663s_read_whisr, dev, status,
+ status & H2D_SW_INT_READ, 0, 1000000);
+ if (err < 0) {
+ dev_err(dev->dev, "query whisr timeout\n");
+ goto out;
+ }
+
+ sdio_writel(func, H2D_SW_INT_READ, MCR_WHISR, &err);
+ if (err < 0) {
+ dev_err(dev->dev, "failed setting read mode [err=%d]\n", err);
+ goto out;
+ }
+
+ val = sdio_readl(func, MCR_H2DSM0R, &err);
+ if (err < 0) {
+ dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err);
+ goto out;
+ }
+
+ if (val != offset) {
+ dev_err(dev->dev, "register mismatch\n");
+ val = ~0;
+ goto out;
+ }
+
+ val = sdio_readl(func, MCR_D2HRM1R, &err);
+ if (err < 0)
+ dev_err(dev->dev, "failed reading d2hrm1r [err=%d]\n", err);
+
+out:
+ sdio_release_host(func);
+
+ return val;
+}
+
+static void mt7663s_write_mailbox(struct mt76_dev *dev, u32 offset, u32 val)
+{
+ struct sdio_func *func = dev->sdio.func;
+ u32 status;
+ int err;
+
+ sdio_claim_host(func);
+
+ sdio_writel(func, offset, MCR_H2DSM0R, &err);
+ if (err < 0) {
+ dev_err(dev->dev, "failed setting address [err=%d]\n", err);
+ goto out;
+ }
+
+ sdio_writel(func, val, MCR_H2DSM1R, &err);
+ if (err < 0) {
+ dev_err(dev->dev,
+ "failed setting write value [err=%d]\n", err);
+ goto out;
+ }
+
+ sdio_writel(func, H2D_SW_INT_WRITE, MCR_WSICR, &err);
+ if (err < 0) {
+ dev_err(dev->dev, "failed setting write mode [err=%d]\n", err);
+ goto out;
+ }
+
+ err = readx_poll_timeout(mt7663s_read_whisr, dev, status,
+ status & H2D_SW_INT_WRITE, 0, 1000000);
+ if (err < 0) {
+ dev_err(dev->dev, "query whisr timeout\n");
+ goto out;
+ }
+
+ sdio_writel(func, H2D_SW_INT_WRITE, MCR_WHISR, &err);
+ if (err < 0) {
+ dev_err(dev->dev, "failed setting write mode [err=%d]\n", err);
+ goto out;
+ }
+
+ val = sdio_readl(func, MCR_H2DSM0R, &err);
+ if (err < 0) {
+ dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err);
+ goto out;
+ }
+
+ if (val != offset)
+ dev_err(dev->dev, "register mismatch\n");
+
+out:
+ sdio_release_host(func);
+}
+
+static u32 mt7663s_rr(struct mt76_dev *dev, u32 offset)
+{
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
+ return dev->mcu_ops->mcu_rr(dev, offset);
+ else
+ return mt7663s_read_mailbox(dev, offset);
+}
+
+static void mt7663s_wr(struct mt76_dev *dev, u32 offset, u32 val)
+{
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
+ dev->mcu_ops->mcu_wr(dev, offset, val);
+ else
+ mt7663s_write_mailbox(dev, offset, val);
+}
+
+static u32 mt7663s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
+{
+ val |= mt7663s_rr(dev, offset) & ~mask;
+ mt7663s_wr(dev, offset, val);
+
+ return val;
+}
+
+static void mt7663s_write_copy(struct mt76_dev *dev, u32 offset,
+ const void *data, int len)
+{
+ const u32 *val = data;
+ int i;
+
+ for (i = 0; i < len / sizeof(u32); i++) {
+ mt7663s_wr(dev, offset, val[i]);
+ offset += sizeof(u32);
+ }
+}
+
+static void mt7663s_read_copy(struct mt76_dev *dev, u32 offset,
+ void *data, int len)
+{
+ u32 *val = data;
+ int i;
+
+ for (i = 0; i < len / sizeof(u32); i++) {
+ val[i] = mt7663s_rr(dev, offset);
+ offset += sizeof(u32);
+ }
+}
+
+static int mt7663s_wr_rp(struct mt76_dev *dev, u32 base,
+ const struct mt76_reg_pair *data,
+ int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ mt7663s_wr(dev, data->reg, data->value);
+ data++;
+ }
+
+ return 0;
+}
+
+static int mt7663s_rd_rp(struct mt76_dev *dev, u32 base,
+ struct mt76_reg_pair *data,
+ int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ data->value = mt7663s_rr(dev, data->reg);
+ data++;
+ }
+
+ return 0;
+}
+
+static void mt7663s_init_work(struct work_struct *work)
+{
+ struct mt7615_dev *dev;
+
+ dev = container_of(work, struct mt7615_dev, mcu_work);
+ if (mt7663s_mcu_init(dev))
+ return;
+
+ mt7615_mcu_set_eeprom(dev);
+ mt7615_mac_init(dev);
+ mt7615_phy_init(dev);
+ mt7615_mcu_del_wtbl_all(dev);
+ mt7615_check_offload_capability(dev);
+}
+
+static int mt7663s_hw_init(struct mt7615_dev *dev, struct sdio_func *func)
+{
+ u32 status, ctrl;
+ int ret;
+
+ sdio_claim_host(func);
+
+ ret = sdio_enable_func(func);
+ if (ret < 0)
+ goto release;
+
+ /* Get ownership from the device */
+ sdio_writel(func, WHLPCR_INT_EN_CLR | WHLPCR_FW_OWN_REQ_CLR,
+ MCR_WHLPCR, &ret);
+ if (ret < 0)
+ goto disable_func;
+
+ ret = readx_poll_timeout(mt7663s_read_pcr, dev, status,
+ status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
+ if (ret < 0) {
+ dev_err(dev->mt76.dev, "Cannot get ownership from device");
+ goto disable_func;
+ }
+
+ ret = sdio_set_block_size(func, 512);
+ if (ret < 0)
+ goto disable_func;
+
+ /* Enable interrupt */
+ sdio_writel(func, WHLPCR_INT_EN_SET, MCR_WHLPCR, &ret);
+ if (ret < 0)
+ goto disable_func;
+
+ ctrl = WHIER_RX0_DONE_INT_EN | WHIER_TX_DONE_INT_EN;
+ sdio_writel(func, ctrl, MCR_WHIER, &ret);
+ if (ret < 0)
+ goto disable_func;
+
+ /* set WHISR as read clear and Rx aggregation number as 16 */
+ ctrl = FIELD_PREP(MAX_HIF_RX_LEN_NUM, 16);
+ sdio_writel(func, ctrl, MCR_WHCR, &ret);
+ if (ret < 0)
+ goto disable_func;
+
+ ret = sdio_claim_irq(func, mt7663s_sdio_irq);
+ if (ret < 0)
+ goto disable_func;
+
+ sdio_release_host(func);
+
+ return 0;
+
+disable_func:
+ sdio_disable_func(func);
+release:
+ sdio_release_host(func);
+
+ return ret;
+}
+
+static int mt7663s_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ struct mt76_sdio *sdio = &mdev->sdio;
+ u32 pse, ple;
+ int err;
+
+ err = mt7615_mac_sta_add(mdev, vif, sta);
+ if (err < 0)
+ return err;
+
+ /* init sched data quota */
+ pse = mt76_get_field(dev, MT_PSE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA);
+ ple = mt76_get_field(dev, MT_PLE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA);
+
+ mutex_lock(&sdio->sched.lock);
+ sdio->sched.pse_data_quota = pse;
+ sdio->sched.ple_data_quota = ple;
+ mutex_unlock(&sdio->sched.lock);
+
+ return 0;
+}
+
+static int mt7663s_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ .txwi_size = MT_USB_TXD_SIZE,
+ .drv_flags = MT_DRV_RX_DMA_HDR,
+ .tx_prepare_skb = mt7663_usb_sdio_tx_prepare_skb,
+ .tx_complete_skb = mt7663_usb_sdio_tx_complete_skb,
+ .tx_status_data = mt7663_usb_sdio_tx_status_data,
+ .rx_skb = mt7615_queue_rx_skb,
+ .sta_ps = mt7615_sta_ps,
+ .sta_add = mt7663s_sta_add,
+ .sta_remove = mt7615_mac_sta_remove,
+ .update_survey = mt7615_update_channel,
+ };
+ static const struct mt76_bus_ops mt7663s_ops = {
+ .rr = mt7663s_rr,
+ .rmw = mt7663s_rmw,
+ .wr = mt7663s_wr,
+ .write_copy = mt7663s_write_copy,
+ .read_copy = mt7663s_read_copy,
+ .wr_rp = mt7663s_wr_rp,
+ .rd_rp = mt7663s_rd_rp,
+ .type = MT76_BUS_SDIO,
+ };
+ struct ieee80211_ops *ops;
+ struct mt7615_dev *dev;
+ struct mt76_dev *mdev;
+ int i, ret;
+
+ ops = devm_kmemdup(&func->dev, &mt7615_ops, sizeof(mt7615_ops),
+ GFP_KERNEL);
+ if (!ops)
+ return -ENOMEM;
+
+ mdev = mt76_alloc_device(&func->dev, sizeof(*dev), ops, &drv_ops);
+ if (!mdev)
+ return -ENOMEM;
+
+ dev = container_of(mdev, struct mt7615_dev, mt76);
+
+ INIT_WORK(&dev->mcu_work, mt7663s_init_work);
+ dev->reg_map = mt7663_usb_sdio_reg_map;
+ dev->ops = ops;
+ sdio_set_drvdata(func, dev);
+
+ ret = mt76s_init(mdev, func, &mt7663s_ops);
+ if (ret < 0)
+ goto err_free;
+
+ INIT_WORK(&mdev->sdio.tx.xmit_work, mt7663s_tx_work);
+ INIT_WORK(&mdev->sdio.rx.recv_work, mt7663s_rx_work);
+
+ ret = mt7663s_hw_init(dev, func);
+ if (ret)
+ goto err_deinit;
+
+ mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
+ (mt76_rr(dev, MT_HW_REV) & 0xff);
+ dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+ mdev->sdio.intr_data = devm_kmalloc(mdev->dev,
+ sizeof(struct mt76s_intr),
+ GFP_KERNEL);
+ if (!mdev->sdio.intr_data) {
+ ret = -ENOMEM;
+ goto err_deinit;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mdev->sdio.xmit_buf); i++) {
+ mdev->sdio.xmit_buf[i] = devm_kmalloc(mdev->dev,
+ MT76S_XMIT_BUF_SZ,
+ GFP_KERNEL);
+ if (!mdev->sdio.xmit_buf[i]) {
+ ret = -ENOMEM;
+ goto err_deinit;
+ }
+ }
+
+ ret = mt76s_alloc_queues(&dev->mt76);
+ if (ret)
+ goto err_deinit;
+
+ ret = mt7663_usb_sdio_register_device(dev);
+ if (ret)
+ goto err_deinit;
+
+ return 0;
+
+err_deinit:
+ mt76s_deinit(&dev->mt76);
+err_free:
+ mt76_free_device(&dev->mt76);
+
+ return ret;
+}
+
+static void mt7663s_remove(struct sdio_func *func)
+{
+ struct mt7615_dev *dev = sdio_get_drvdata(func);
+
+ if (!test_and_clear_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+ return;
+
+ ieee80211_unregister_hw(dev->mt76.hw);
+ mt76s_deinit(&dev->mt76);
+ mt76_free_device(&dev->mt76);
+}
+
+#ifdef CONFIG_PM
+static int mt7663s_suspend(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct mt7615_dev *mdev = sdio_get_drvdata(func);
+
+ if (!test_bit(MT76_STATE_SUSPEND, &mdev->mphy.state) &&
+ mt7615_firmware_offload(mdev)) {
+ int err;
+
+ err = mt7615_mcu_set_hif_suspend(mdev, true);
+ if (err < 0)
+ return err;
+ }
+
+ sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+
+ mt76s_stop_txrx(&mdev->mt76);
+
+ return mt7615_mcu_set_fw_ctrl(mdev);
+}
+
+static int mt7663s_resume(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct mt7615_dev *mdev = sdio_get_drvdata(func);
+ int err;
+
+ err = mt7615_mcu_set_drv_ctrl(mdev);
+ if (err)
+ return err;
+
+ if (!test_bit(MT76_STATE_SUSPEND, &mdev->mphy.state) &&
+ mt7615_firmware_offload(mdev))
+ err = mt7615_mcu_set_hif_suspend(mdev, false);
+
+ return err;
+}
+
+static const struct dev_pm_ops mt7663s_pm_ops = {
+ .suspend = mt7663s_suspend,
+ .resume = mt7663s_resume,
+};
+#endif
+
+MODULE_DEVICE_TABLE(sdio, mt7663s_table);
+MODULE_FIRMWARE(MT7663_OFFLOAD_FIRMWARE_N9);
+MODULE_FIRMWARE(MT7663_OFFLOAD_ROM_PATCH);
+MODULE_FIRMWARE(MT7663_FIRMWARE_N9);
+MODULE_FIRMWARE(MT7663_ROM_PATCH);
+
+static struct sdio_driver mt7663s_driver = {
+ .name = KBUILD_MODNAME,
+ .probe = mt7663s_probe,
+ .remove = mt7663s_remove,
+ .id_table = mt7663s_table,
+#ifdef CONFIG_PM
+ .drv = {
+ .pm = &mt7663s_pm_ops,
+ }
+#endif
+};
+module_sdio_driver(mt7663s_driver);
+
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.h b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.h
new file mode 100644
index 000000000..05180971d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.h
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc.
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ */
+
+#ifndef __MT76S_H
+#define __MT76S_H
+
+#define MT_PSE_PAGE_SZ 128
+
+#define MCR_WCIR 0x0000
+#define MCR_WHLPCR 0x0004
+#define WHLPCR_FW_OWN_REQ_CLR BIT(9)
+#define WHLPCR_FW_OWN_REQ_SET BIT(8)
+#define WHLPCR_IS_DRIVER_OWN BIT(8)
+#define WHLPCR_INT_EN_CLR BIT(1)
+#define WHLPCR_INT_EN_SET BIT(0)
+
+#define MCR_WSDIOCSR 0x0008
+#define MCR_WHCR 0x000C
+#define W_INT_CLR_CTRL BIT(1)
+#define RECV_MAILBOX_RD_CLR_EN BIT(2)
+#define MAX_HIF_RX_LEN_NUM GENMASK(13, 8)
+#define RX_ENHANCE_MODE BIT(16)
+
+#define MCR_WHISR 0x0010
+#define MCR_WHIER 0x0014
+#define WHIER_D2H_SW_INT GENMASK(31, 8)
+#define WHIER_FW_OWN_BACK_INT_EN BIT(7)
+#define WHIER_ABNORMAL_INT_EN BIT(6)
+#define WHIER_RX1_DONE_INT_EN BIT(2)
+#define WHIER_RX0_DONE_INT_EN BIT(1)
+#define WHIER_TX_DONE_INT_EN BIT(0)
+#define WHIER_DEFAULT (WHIER_RX0_DONE_INT_EN | \
+ WHIER_RX1_DONE_INT_EN | \
+ WHIER_TX_DONE_INT_EN | \
+ WHIER_ABNORMAL_INT_EN | \
+ WHIER_D2H_SW_INT)
+
+#define MCR_WASR 0x0020
+#define MCR_WSICR 0x0024
+#define MCR_WTSR0 0x0028
+#define TQ0_CNT GENMASK(7, 0)
+#define TQ1_CNT GENMASK(15, 8)
+#define TQ2_CNT GENMASK(23, 16)
+#define TQ3_CNT GENMASK(31, 24)
+
+#define MCR_WTSR1 0x002c
+#define TQ4_CNT GENMASK(7, 0)
+#define TQ5_CNT GENMASK(15, 8)
+#define TQ6_CNT GENMASK(23, 16)
+#define TQ7_CNT GENMASK(31, 24)
+
+#define MCR_WTDR1 0x0034
+#define MCR_WRDR0 0x0050
+#define MCR_WRDR1 0x0054
+#define MCR_WRDR(p) (0x0050 + 4 * (p))
+#define MCR_H2DSM0R 0x0070
+#define H2D_SW_INT_READ BIT(16)
+#define H2D_SW_INT_WRITE BIT(17)
+
+#define MCR_H2DSM1R 0x0074
+#define MCR_D2HRM0R 0x0078
+#define MCR_D2HRM1R 0x007c
+#define MCR_D2HRM2R 0x0080
+#define MCR_WRPLR 0x0090
+#define RX0_PACKET_LENGTH GENMASK(15, 0)
+#define RX1_PACKET_LENGTH GENMASK(31, 16)
+
+#define MCR_WTMDR 0x00b0
+#define MCR_WTMCR 0x00b4
+#define MCR_WTMDPCR0 0x00b8
+#define MCR_WTMDPCR1 0x00bc
+#define MCR_WPLRCR 0x00d4
+#define MCR_WSR 0x00D8
+#define MCR_CLKIOCR 0x0100
+#define MCR_CMDIOCR 0x0104
+#define MCR_DAT0IOCR 0x0108
+#define MCR_DAT1IOCR 0x010C
+#define MCR_DAT2IOCR 0x0110
+#define MCR_DAT3IOCR 0x0114
+#define MCR_CLKDLYCR 0x0118
+#define MCR_CMDDLYCR 0x011C
+#define MCR_ODATDLYCR 0x0120
+#define MCR_IDATDLYCR1 0x0124
+#define MCR_IDATDLYCR2 0x0128
+#define MCR_ILCHCR 0x012C
+#define MCR_WTQCR0 0x0130
+#define MCR_WTQCR1 0x0134
+#define MCR_WTQCR2 0x0138
+#define MCR_WTQCR3 0x013C
+#define MCR_WTQCR4 0x0140
+#define MCR_WTQCR5 0x0144
+#define MCR_WTQCR6 0x0148
+#define MCR_WTQCR7 0x014C
+#define MCR_WTQCR(x) (0x130 + 4 * (x))
+#define TXQ_CNT_L GENMASK(15, 0)
+#define TXQ_CNT_H GENMASK(31, 16)
+
+#define MCR_SWPCDBGR 0x0154
+
+struct mt76s_intr {
+ u32 isr;
+ struct {
+ u32 wtqcr[8];
+ } tx;
+ struct {
+ u16 num[2];
+ u16 len[2][16];
+ } rx;
+ u32 rec_mb[2];
+} __packed;
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
new file mode 100644
index 000000000..38670c003
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2020 MediaTek Inc.
+ *
+ * Author: Felix Fietkau <nbd@nbd.name>
+ * Lorenzo Bianconi <lorenzo@kernel.org>
+ * Sean Wang <sean.wang@mediatek.com>
+ */
+#include <linux/kernel.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+
+#include "mt7615.h"
+#include "mac.h"
+#include "mcu.h"
+#include "regs.h"
+#include "sdio.h"
+
+static int mt7663s_mcu_init_sched(struct mt7615_dev *dev)
+{
+ struct mt76_sdio *sdio = &dev->mt76.sdio;
+ u32 pse0, ple, pse1, txdwcnt;
+
+ pse0 = mt76_get_field(dev, MT_PSE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA);
+ pse1 = mt76_get_field(dev, MT_PSE_PG_HIF1_GROUP, MT_HIF1_MIN_QUOTA);
+ ple = mt76_get_field(dev, MT_PLE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA);
+ txdwcnt = mt76_get_field(dev, MT_PP_TXDWCNT,
+ MT_PP_TXDWCNT_TX1_ADD_DW_CNT);
+
+ mutex_lock(&sdio->sched.lock);
+
+ sdio->sched.pse_data_quota = pse0;
+ sdio->sched.ple_data_quota = ple;
+ sdio->sched.pse_mcu_quota = pse1;
+ sdio->sched.deficit = txdwcnt << 2;
+
+ mutex_unlock(&sdio->sched.lock);
+
+ return 0;
+}
+
+static int
+mt7663s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
+ int cmd, bool wait_resp)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ int ret, seq;
+
+ mutex_lock(&mdev->mcu.mutex);
+
+ mt7615_mcu_fill_msg(dev, skb, cmd, &seq);
+ ret = mt76_tx_queue_skb_raw(dev, MT_TXQ_MCU, skb, 0);
+ if (ret)
+ goto out;
+
+ mt76_queue_kick(dev, mdev->q_tx[MT_TXQ_MCU]);
+ if (wait_resp)
+ ret = mt7615_mcu_wait_response(dev, cmd, seq);
+
+out:
+ mutex_unlock(&mdev->mcu.mutex);
+
+ return ret;
+}
+
+static int mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
+{
+ struct sdio_func *func = dev->mt76.sdio.func;
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ u32 status;
+ int ret;
+
+ if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state))
+ goto out;
+
+ sdio_claim_host(func);
+
+ sdio_writel(func, WHLPCR_FW_OWN_REQ_CLR, MCR_WHLPCR, NULL);
+
+ ret = readx_poll_timeout(mt7663s_read_pcr, dev, status,
+ status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
+ if (ret < 0) {
+ dev_err(dev->mt76.dev, "Cannot get ownership from device");
+ set_bit(MT76_STATE_PM, &mphy->state);
+ sdio_release_host(func);
+
+ return ret;
+ }
+
+ sdio_release_host(func);
+
+out:
+ dev->pm.last_activity = jiffies;
+
+ return 0;
+}
+
+static int mt7663s_mcu_fw_pmctrl(struct mt7615_dev *dev)
+{
+ struct sdio_func *func = dev->mt76.sdio.func;
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ u32 status;
+ int ret;
+
+ if (test_and_set_bit(MT76_STATE_PM, &mphy->state))
+ return 0;
+
+ sdio_claim_host(func);
+
+ sdio_writel(func, WHLPCR_FW_OWN_REQ_SET, MCR_WHLPCR, NULL);
+
+ ret = readx_poll_timeout(mt7663s_read_pcr, dev, status,
+ !(status & WHLPCR_IS_DRIVER_OWN), 2000, 1000000);
+ if (ret < 0) {
+ dev_err(dev->mt76.dev, "Cannot set ownership to device");
+ clear_bit(MT76_STATE_PM, &mphy->state);
+ }
+
+ sdio_release_host(func);
+
+ return ret;
+}
+
+int mt7663s_mcu_init(struct mt7615_dev *dev)
+{
+ static const struct mt76_mcu_ops mt7663s_mcu_ops = {
+ .headroom = sizeof(struct mt7615_mcu_txd),
+ .tailroom = MT_USB_TAIL_SIZE,
+ .mcu_skb_send_msg = mt7663s_mcu_send_message,
+ .mcu_send_msg = mt7615_mcu_msg_send,
+ .mcu_restart = mt7615_mcu_restart,
+ .mcu_rr = mt7615_mcu_reg_rr,
+ .mcu_wr = mt7615_mcu_reg_wr,
+ };
+ struct mt7615_mcu_ops *mcu_ops;
+ int ret;
+
+ ret = mt7663s_mcu_drv_pmctrl(dev);
+ if (ret)
+ return ret;
+
+ dev->mt76.mcu_ops = &mt7663s_mcu_ops,
+
+ ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY);
+ if (ret) {
+ mt7615_mcu_restart(&dev->mt76);
+ if (!mt76_poll_msec(dev, MT_CONN_ON_MISC,
+ MT_TOP_MISC2_FW_N9_RDY, 0, 500))
+ return -EIO;
+ }
+
+ ret = __mt7663_load_firmware(dev);
+ if (ret)
+ return ret;
+
+ mcu_ops = devm_kmemdup(dev->mt76.dev, dev->mcu_ops, sizeof(*mcu_ops),
+ GFP_KERNEL);
+ if (!mcu_ops)
+ return -ENOMEM;
+
+ mcu_ops->set_drv_ctrl = mt7663s_mcu_drv_pmctrl;
+ mcu_ops->set_fw_ctrl = mt7663s_mcu_fw_pmctrl;
+ dev->mcu_ops = mcu_ops;
+
+ ret = mt7663s_mcu_init_sched(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
new file mode 100644
index 000000000..d7d61a5b6
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc.
+ *
+ * Author: Felix Fietkau <nbd@nbd.name>
+ * Lorenzo Bianconi <lorenzo@kernel.org>
+ * Sean Wang <sean.wang@mediatek.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio_func.h>
+
+#include "../trace.h"
+#include "mt7615.h"
+#include "sdio.h"
+#include "mac.h"
+
+static int mt7663s_refill_sched_quota(struct mt76_dev *dev, u32 *data)
+{
+ u32 ple_ac_data_quota[] = {
+ FIELD_GET(TXQ_CNT_L, data[4]), /* VO */
+ FIELD_GET(TXQ_CNT_H, data[3]), /* VI */
+ FIELD_GET(TXQ_CNT_L, data[3]), /* BE */
+ FIELD_GET(TXQ_CNT_H, data[2]), /* BK */
+ };
+ u32 pse_ac_data_quota[] = {
+ FIELD_GET(TXQ_CNT_H, data[1]), /* VO */
+ FIELD_GET(TXQ_CNT_L, data[1]), /* VI */
+ FIELD_GET(TXQ_CNT_H, data[0]), /* BE */
+ FIELD_GET(TXQ_CNT_L, data[0]), /* BK */
+ };
+ u32 pse_mcu_quota = FIELD_GET(TXQ_CNT_L, data[2]);
+ u32 pse_data_quota = 0, ple_data_quota = 0;
+ struct mt76_sdio *sdio = &dev->sdio;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pse_ac_data_quota); i++) {
+ pse_data_quota += pse_ac_data_quota[i];
+ ple_data_quota += ple_ac_data_quota[i];
+ }
+
+ if (!pse_data_quota && !ple_data_quota && !pse_mcu_quota)
+ return 0;
+
+ mutex_lock(&sdio->sched.lock);
+ sdio->sched.pse_mcu_quota += pse_mcu_quota;
+ sdio->sched.pse_data_quota += pse_data_quota;
+ sdio->sched.ple_data_quota += ple_data_quota;
+ mutex_unlock(&sdio->sched.lock);
+
+ return pse_data_quota + ple_data_quota + pse_mcu_quota;
+}
+
+static struct sk_buff *mt7663s_build_rx_skb(void *data, int data_len,
+ int buf_len)
+{
+ int len = min_t(int, data_len, MT_SKB_HEAD_LEN);
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_put_data(skb, data, len);
+ if (data_len > len) {
+ struct page *page;
+
+ data += len;
+ page = virt_to_head_page(data);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ page, data - page_address(page),
+ data_len - len, buf_len);
+ get_page(page);
+ }
+
+ return skb;
+}
+
+static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
+ struct mt76s_intr *intr)
+{
+ struct mt76_queue *q = &dev->q_rx[qid];
+ struct mt76_sdio *sdio = &dev->sdio;
+ int len = 0, err, i;
+ struct page *page;
+ u8 *buf;
+
+ for (i = 0; i < intr->rx.num[qid]; i++)
+ len += round_up(intr->rx.len[qid][i] + 4, 4);
+
+ if (!len)
+ return 0;
+
+ if (len > sdio->func->cur_blksize)
+ len = roundup(len, sdio->func->cur_blksize);
+
+ page = __dev_alloc_pages(GFP_KERNEL, get_order(len));
+ if (!page)
+ return -ENOMEM;
+
+ buf = page_address(page);
+
+ sdio_claim_host(sdio->func);
+ err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len);
+ sdio_release_host(sdio->func);
+
+ if (err < 0) {
+ dev_err(dev->dev, "sdio read data failed:%d\n", err);
+ put_page(page);
+ return err;
+ }
+
+ for (i = 0; i < intr->rx.num[qid]; i++) {
+ int index = (q->head + i) % q->ndesc;
+ struct mt76_queue_entry *e = &q->entry[index];
+
+ len = intr->rx.len[qid][i];
+ e->skb = mt7663s_build_rx_skb(buf, len, round_up(len + 4, 4));
+ if (!e->skb)
+ break;
+
+ buf += round_up(len + 4, 4);
+ if (q->queued + i + 1 == q->ndesc)
+ break;
+ }
+ put_page(page);
+
+ spin_lock_bh(&q->lock);
+ q->head = (q->head + i) % q->ndesc;
+ q->queued += i;
+ spin_unlock_bh(&q->lock);
+
+ return i;
+}
+
+static int mt7663s_tx_pick_quota(struct mt76_sdio *sdio, enum mt76_txq_id qid,
+ int buf_sz, int *pse_size, int *ple_size)
+{
+ int pse_sz;
+
+ pse_sz = DIV_ROUND_UP(buf_sz + sdio->sched.deficit, MT_PSE_PAGE_SZ);
+
+ if (qid == MT_TXQ_MCU) {
+ if (sdio->sched.pse_mcu_quota < *pse_size + pse_sz)
+ return -EBUSY;
+ } else {
+ if (sdio->sched.pse_data_quota < *pse_size + pse_sz ||
+ sdio->sched.ple_data_quota < *ple_size + 1)
+ return -EBUSY;
+
+ *ple_size = *ple_size + 1;
+ }
+ *pse_size = *pse_size + pse_sz;
+
+ return 0;
+}
+
+static void mt7663s_tx_update_quota(struct mt76_sdio *sdio, enum mt76_txq_id qid,
+ int pse_size, int ple_size)
+{
+ mutex_lock(&sdio->sched.lock);
+ if (qid == MT_TXQ_MCU) {
+ sdio->sched.pse_mcu_quota -= pse_size;
+ } else {
+ sdio->sched.pse_data_quota -= pse_size;
+ sdio->sched.ple_data_quota -= ple_size;
+ }
+ mutex_unlock(&sdio->sched.lock);
+}
+
+static int __mt7663s_xmit_queue(struct mt76_dev *dev, u8 *data, int len)
+{
+ struct mt76_sdio *sdio = &dev->sdio;
+ int err;
+
+ if (len > sdio->func->cur_blksize)
+ len = roundup(len, sdio->func->cur_blksize);
+
+ sdio_claim_host(sdio->func);
+ err = sdio_writesb(sdio->func, MCR_WTDR1, data, len);
+ sdio_release_host(sdio->func);
+
+ if (err)
+ dev_err(dev->dev, "sdio write failed: %d\n", err);
+
+ return err;
+}
+
+static int mt7663s_tx_run_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
+{
+ int err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0;
+ struct mt76_queue *q = dev->q_tx[qid];
+ struct mt76_sdio *sdio = &dev->sdio;
+ u8 pad;
+
+ while (q->first != q->head) {
+ struct mt76_queue_entry *e = &q->entry[q->first];
+ struct sk_buff *iter;
+
+ smp_rmb();
+
+ if (!test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) {
+ __skb_put_zero(e->skb, 4);
+ err = __mt7663s_xmit_queue(dev, e->skb->data,
+ e->skb->len);
+ if (err)
+ return err;
+
+ goto next;
+ }
+
+ pad = roundup(e->skb->len, 4) - e->skb->len;
+ if (len + e->skb->len + pad + 4 > MT76S_XMIT_BUF_SZ)
+ break;
+
+ if (mt7663s_tx_pick_quota(sdio, qid, e->buf_sz, &pse_sz,
+ &ple_sz))
+ break;
+
+ memcpy(sdio->xmit_buf[qid] + len, e->skb->data,
+ skb_headlen(e->skb));
+ len += skb_headlen(e->skb);
+ nframes++;
+
+ skb_walk_frags(e->skb, iter) {
+ memcpy(sdio->xmit_buf[qid] + len, iter->data,
+ iter->len);
+ len += iter->len;
+ nframes++;
+ }
+
+ if (unlikely(pad)) {
+ memset(sdio->xmit_buf[qid] + len, 0, pad);
+ len += pad;
+ }
+next:
+ q->first = (q->first + 1) % q->ndesc;
+ e->done = true;
+ }
+
+ if (nframes) {
+ memset(sdio->xmit_buf[qid] + len, 0, 4);
+ err = __mt7663s_xmit_queue(dev, sdio->xmit_buf[qid], len + 4);
+ if (err)
+ return err;
+ }
+ mt7663s_tx_update_quota(sdio, qid, pse_sz, ple_sz);
+
+ return nframes;
+}
+
+void mt7663s_tx_work(struct work_struct *work)
+{
+ struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
+ tx.xmit_work);
+ struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
+ int i, nframes = 0;
+
+ for (i = 0; i < MT_TXQ_MCU_WA; i++) {
+ int ret;
+
+ ret = mt7663s_tx_run_queue(dev, i);
+ if (ret < 0)
+ break;
+
+ nframes += ret;
+ }
+ if (nframes)
+ queue_work(sdio->txrx_wq, &sdio->tx.xmit_work);
+
+ queue_work(sdio->txrx_wq, &sdio->tx.status_work);
+}
+
+void mt7663s_rx_work(struct work_struct *work)
+{
+ struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
+ rx.recv_work);
+ struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
+ struct mt76s_intr *intr = sdio->intr_data;
+ int nframes = 0, ret;
+
+ /* disable interrupt */
+ sdio_claim_host(sdio->func);
+ sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL);
+ ret = sdio_readsb(sdio->func, intr, MCR_WHISR, sizeof(*intr));
+ sdio_release_host(sdio->func);
+
+ if (ret < 0)
+ goto out;
+
+ trace_dev_irq(dev, intr->isr, 0);
+
+ if (intr->isr & WHIER_RX0_DONE_INT_EN) {
+ ret = mt7663s_rx_run_queue(dev, 0, intr);
+ if (ret > 0) {
+ queue_work(sdio->txrx_wq, &sdio->rx.net_work);
+ nframes += ret;
+ }
+ }
+
+ if (intr->isr & WHIER_RX1_DONE_INT_EN) {
+ ret = mt7663s_rx_run_queue(dev, 1, intr);
+ if (ret > 0) {
+ queue_work(sdio->txrx_wq, &sdio->rx.net_work);
+ nframes += ret;
+ }
+ }
+
+ if (mt7663s_refill_sched_quota(dev, intr->tx.wtqcr))
+ queue_work(sdio->txrx_wq, &sdio->tx.xmit_work);
+
+ if (nframes) {
+ queue_work(sdio->txrx_wq, &sdio->rx.recv_work);
+ return;
+ }
+out:
+ /* enable interrupt */
+ sdio_claim_host(sdio->func);
+ sdio_writel(sdio->func, WHLPCR_INT_EN_SET, MCR_WHLPCR, NULL);
+ sdio_release_host(sdio->func);
+}
+
+void mt7663s_sdio_irq(struct sdio_func *func)
+{
+ struct mt7615_dev *dev = sdio_get_drvdata(func);
+ struct mt76_sdio *sdio = &dev->mt76.sdio;
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.phy.state))
+ return;
+
+ queue_work(sdio->txrx_wq, &sdio->rx.recv_work);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/soc.c b/drivers/net/wireless/mediatek/mt76/mt7615/soc.c
new file mode 100644
index 000000000..9aa5183c7
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/soc.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ * Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include "mt7615.h"
+
+int mt7622_wmac_init(struct mt7615_dev *dev)
+{
+ struct device_node *np = dev->mt76.dev->of_node;
+
+ if (!is_mt7622(&dev->mt76))
+ return 0;
+
+ dev->infracfg = syscon_regmap_lookup_by_phandle(np, "mediatek,infracfg");
+ if (IS_ERR(dev->infracfg)) {
+ dev_err(dev->mt76.dev, "Cannot find infracfg controller\n");
+ return PTR_ERR(dev->infracfg);
+ }
+
+ return 0;
+}
+
+static int mt7622_wmac_probe(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ void __iomem *mem_base;
+ int irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ mem_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mem_base)) {
+ dev_err(&pdev->dev, "Failed to get memory resource\n");
+ return PTR_ERR(mem_base);
+ }
+
+ return mt7615_mmio_probe(&pdev->dev, mem_base, irq, mt7615e_reg_map);
+}
+
+static int mt7622_wmac_remove(struct platform_device *pdev)
+{
+ struct mt7615_dev *dev = platform_get_drvdata(pdev);
+
+ mt7615_unregister_device(dev);
+
+ return 0;
+}
+
+static const struct of_device_id mt7622_wmac_of_match[] = {
+ { .compatible = "mediatek,mt7622-wmac" },
+ {},
+};
+
+struct platform_driver mt7622_wmac_driver = {
+ .driver = {
+ .name = "mt7622-wmac",
+ .of_match_table = mt7622_wmac_of_match,
+ },
+ .probe = mt7622_wmac_probe,
+ .remove = mt7622_wmac_remove,
+};
+
+MODULE_FIRMWARE(MT7622_FIRMWARE_N9);
+MODULE_FIRMWARE(MT7622_ROM_PATCH);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
new file mode 100644
index 000000000..e4dc62314
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+
+#include "mt7615.h"
+#include "eeprom.h"
+#include "mcu.h"
+
+enum {
+ TM_CHANGED_TXPOWER_CTRL,
+ TM_CHANGED_TXPOWER,
+ TM_CHANGED_FREQ_OFFSET,
+
+ /* must be last */
+ NUM_TM_CHANGED
+};
+
+
+static const u8 tm_change_map[] = {
+ [TM_CHANGED_TXPOWER_CTRL] = MT76_TM_ATTR_TX_POWER_CONTROL,
+ [TM_CHANGED_TXPOWER] = MT76_TM_ATTR_TX_POWER,
+ [TM_CHANGED_FREQ_OFFSET] = MT76_TM_ATTR_FREQ_OFFSET,
+};
+
+static const u32 reg_backup_list[] = {
+ MT_WF_PHY_RFINTF3_0(0),
+ MT_WF_PHY_RFINTF3_0(1),
+ MT_WF_PHY_RFINTF3_0(2),
+ MT_WF_PHY_RFINTF3_0(3),
+ MT_ANT_SWITCH_CON(2),
+ MT_ANT_SWITCH_CON(3),
+ MT_ANT_SWITCH_CON(4),
+ MT_ANT_SWITCH_CON(6),
+ MT_ANT_SWITCH_CON(7),
+ MT_ANT_SWITCH_CON(8),
+};
+
+static const struct {
+ u16 wf;
+ u16 reg;
+} rf_backup_list[] = {
+ { 0, 0x48 },
+ { 1, 0x48 },
+ { 2, 0x48 },
+ { 3, 0x48 },
+};
+
+static int
+mt7615_tm_set_tx_power(struct mt7615_phy *phy)
+{
+ struct mt7615_dev *dev = phy->dev;
+ struct mt76_phy *mphy = phy->mt76;
+ int i, ret, n_chains = hweight8(mphy->antenna_mask);
+ struct cfg80211_chan_def *chandef = &mphy->chandef;
+ int freq = chandef->center_freq1, len, target_chains;
+ u8 *data, *eep = (u8 *)dev->mt76.eeprom.data;
+ enum nl80211_band band = chandef->chan->band;
+ struct sk_buff *skb;
+ struct {
+ u8 center_chan;
+ u8 dbdc_idx;
+ u8 band;
+ u8 rsv;
+ } __packed req_hdr = {
+ .center_chan = ieee80211_frequency_to_channel(freq),
+ .band = band,
+ .dbdc_idx = phy != &dev->phy,
+ };
+ u8 *tx_power = NULL;
+
+ if (dev->mt76.test.state != MT76_TM_STATE_OFF)
+ tx_power = dev->mt76.test.tx_power;
+
+ len = MT7615_EE_MAX - MT_EE_NIC_CONF_0;
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req_hdr) + len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_data(skb, &req_hdr, sizeof(req_hdr));
+ data = skb_put_data(skb, eep + MT_EE_NIC_CONF_0, len);
+
+ target_chains = mt7615_ext_pa_enabled(dev, band) ? 1 : n_chains;
+ for (i = 0; i < target_chains; i++) {
+ ret = mt7615_eeprom_get_target_power_index(dev, chandef->chan, i);
+ if (ret < 0) {
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ if (tx_power && tx_power[i])
+ data[ret - MT_EE_NIC_CONF_0] = tx_power[i];
+ }
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_SET_TX_POWER_CTRL, false);
+}
+
+static void
+mt7615_tm_reg_backup_restore(struct mt7615_dev *dev)
+{
+ u32 *b = dev->test.reg_backup;
+ int n_regs = ARRAY_SIZE(reg_backup_list);
+ int n_rf_regs = ARRAY_SIZE(rf_backup_list);
+ int i;
+
+ if (dev->mt76.test.state == MT76_TM_STATE_OFF) {
+ for (i = 0; i < n_regs; i++)
+ mt76_wr(dev, reg_backup_list[i], b[i]);
+
+ for (i = 0; i < n_rf_regs; i++)
+ mt7615_rf_wr(dev, rf_backup_list[i].wf,
+ rf_backup_list[i].reg, b[n_regs + i]);
+ return;
+ }
+
+ if (b)
+ return;
+
+ b = devm_kzalloc(dev->mt76.dev, 4 * (n_regs + n_rf_regs),
+ GFP_KERNEL);
+ if (!b)
+ return;
+
+ dev->test.reg_backup = b;
+ for (i = 0; i < n_regs; i++)
+ b[i] = mt76_rr(dev, reg_backup_list[i]);
+ for (i = 0; i < n_rf_regs; i++)
+ b[n_regs + i] = mt7615_rf_rr(dev, rf_backup_list[i].wf,
+ rf_backup_list[i].reg);
+}
+
+
+static void
+mt7615_tm_init_phy(struct mt7615_dev *dev, struct mt7615_phy *phy)
+{
+ unsigned int total_flags = ~0;
+
+ if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
+ return;
+
+ mutex_unlock(&dev->mt76.mutex);
+ mt7615_set_channel(phy);
+ mt7615_ops.configure_filter(phy->mt76->hw, 0, &total_flags, 0);
+ mutex_lock(&dev->mt76.mutex);
+
+ mt7615_tm_reg_backup_restore(dev);
+}
+
+static void
+mt7615_tm_init(struct mt7615_dev *dev)
+{
+ mt7615_tm_init_phy(dev, &dev->phy);
+
+ if (dev->mt76.phy2)
+ mt7615_tm_init_phy(dev, dev->mt76.phy2->priv);
+}
+
+static void
+mt7615_tm_set_rx_enable(struct mt7615_dev *dev, bool en)
+{
+ u32 rqcr_mask = (MT_ARB_RQCR_RX_START |
+ MT_ARB_RQCR_RXV_START |
+ MT_ARB_RQCR_RXV_R_EN |
+ MT_ARB_RQCR_RXV_T_EN) *
+ (BIT(0) | BIT(MT_ARB_RQCR_BAND_SHIFT));
+
+ if (en) {
+ mt76_clear(dev, MT_ARB_SCR,
+ MT_ARB_SCR_RX0_DISABLE | MT_ARB_SCR_RX1_DISABLE);
+ mt76_set(dev, MT_ARB_RQCR, rqcr_mask);
+ } else {
+ mt76_set(dev, MT_ARB_SCR,
+ MT_ARB_SCR_RX0_DISABLE | MT_ARB_SCR_RX1_DISABLE);
+ mt76_clear(dev, MT_ARB_RQCR, rqcr_mask);
+ }
+}
+
+static void
+mt7615_tm_set_tx_antenna(struct mt7615_dev *dev, bool en)
+{
+ struct mt76_testmode_data *td = &dev->mt76.test;
+ u8 mask = td->tx_antenna_mask;
+ int i;
+
+ if (!mask)
+ return;
+
+ if (!en)
+ mask = dev->phy.chainmask;
+
+ for (i = 0; i < 4; i++) {
+ mt76_rmw_field(dev, MT_WF_PHY_RFINTF3_0(i),
+ MT_WF_PHY_RFINTF3_0_ANT,
+ (td->tx_antenna_mask & BIT(i)) ? 0 : 0xa);
+
+ }
+
+ /* 2.4 GHz band */
+ mt76_rmw_field(dev, MT_ANT_SWITCH_CON(3), MT_ANT_SWITCH_CON_MODE(0),
+ (td->tx_antenna_mask & BIT(0)) ? 0x8 : 0x1b);
+ mt76_rmw_field(dev, MT_ANT_SWITCH_CON(4), MT_ANT_SWITCH_CON_MODE(2),
+ (td->tx_antenna_mask & BIT(1)) ? 0xe : 0x1b);
+ mt76_rmw_field(dev, MT_ANT_SWITCH_CON(6), MT_ANT_SWITCH_CON_MODE1(0),
+ (td->tx_antenna_mask & BIT(2)) ? 0x0 : 0xf);
+ mt76_rmw_field(dev, MT_ANT_SWITCH_CON(7), MT_ANT_SWITCH_CON_MODE1(2),
+ (td->tx_antenna_mask & BIT(3)) ? 0x6 : 0xf);
+
+ /* 5 GHz band */
+ mt76_rmw_field(dev, MT_ANT_SWITCH_CON(4), MT_ANT_SWITCH_CON_MODE(1),
+ (td->tx_antenna_mask & BIT(0)) ? 0xd : 0x1b);
+ mt76_rmw_field(dev, MT_ANT_SWITCH_CON(2), MT_ANT_SWITCH_CON_MODE(3),
+ (td->tx_antenna_mask & BIT(1)) ? 0x13 : 0x1b);
+ mt76_rmw_field(dev, MT_ANT_SWITCH_CON(7), MT_ANT_SWITCH_CON_MODE1(1),
+ (td->tx_antenna_mask & BIT(2)) ? 0x5 : 0xf);
+ mt76_rmw_field(dev, MT_ANT_SWITCH_CON(8), MT_ANT_SWITCH_CON_MODE1(3),
+ (td->tx_antenna_mask & BIT(3)) ? 0xb : 0xf);
+
+ for (i = 0; i < 4; i++) {
+ u32 val;
+
+ val = mt7615_rf_rr(dev, i, 0x48);
+ val &= ~(0x3ff << 20);
+ if (td->tx_antenna_mask & BIT(i))
+ val |= 3 << 20;
+ else
+ val |= (2 << 28) | (2 << 26) | (8 << 20);
+ mt7615_rf_wr(dev, i, 0x48, val);
+ }
+}
+
+static void
+mt7615_tm_set_tx_frames(struct mt7615_dev *dev, bool en)
+{
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb = dev->mt76.test.tx_skb;
+
+ mt7615_mcu_set_chan_info(&dev->phy, MCU_EXT_CMD_SET_RX_PATH);
+ mt7615_tm_set_tx_antenna(dev, en);
+ mt7615_tm_set_rx_enable(dev, !en);
+ if (!en || !skb)
+ return;
+
+ info = IEEE80211_SKB_CB(skb);
+ info->control.vif = dev->phy.monitor_vif;
+}
+
+static void
+mt7615_tm_update_params(struct mt7615_dev *dev, u32 changed)
+{
+ struct mt76_testmode_data *td = &dev->mt76.test;
+ bool en = dev->mt76.test.state != MT76_TM_STATE_OFF;
+
+ if (changed & BIT(TM_CHANGED_TXPOWER_CTRL))
+ mt7615_mcu_set_test_param(dev, MCU_ATE_SET_TX_POWER_CONTROL,
+ en, en && td->tx_power_control);
+ if (changed & BIT(TM_CHANGED_FREQ_OFFSET))
+ mt7615_mcu_set_test_param(dev, MCU_ATE_SET_FREQ_OFFSET,
+ en, en ? td->freq_offset : 0);
+ if (changed & BIT(TM_CHANGED_TXPOWER))
+ mt7615_tm_set_tx_power(&dev->phy);
+}
+
+static int
+mt7615_tm_set_state(struct mt76_dev *mdev, enum mt76_testmode_state state)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ struct mt76_testmode_data *td = &mdev->test;
+ enum mt76_testmode_state prev_state = td->state;
+
+ mdev->test.state = state;
+
+ if (prev_state == MT76_TM_STATE_TX_FRAMES)
+ mt7615_tm_set_tx_frames(dev, false);
+ else if (state == MT76_TM_STATE_TX_FRAMES)
+ mt7615_tm_set_tx_frames(dev, true);
+
+ if (state <= MT76_TM_STATE_IDLE)
+ mt7615_tm_init(dev);
+
+ if ((state == MT76_TM_STATE_IDLE &&
+ prev_state == MT76_TM_STATE_OFF) ||
+ (state == MT76_TM_STATE_OFF &&
+ prev_state == MT76_TM_STATE_IDLE)) {
+ u32 changed = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tm_change_map); i++) {
+ u16 cur = tm_change_map[i];
+
+ if (td->param_set[cur / 32] & BIT(cur % 32))
+ changed |= BIT(i);
+ }
+
+ mt7615_tm_update_params(dev, changed);
+ }
+
+ return 0;
+}
+
+static int
+mt7615_tm_set_params(struct mt76_dev *mdev, struct nlattr **tb,
+ enum mt76_testmode_state new_state)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ struct mt76_testmode_data *td = &dev->mt76.test;
+ u32 changed = 0;
+ int i;
+
+ BUILD_BUG_ON(NUM_TM_CHANGED >= 32);
+
+ if (new_state == MT76_TM_STATE_OFF ||
+ td->state == MT76_TM_STATE_OFF)
+ return 0;
+
+ if (td->tx_antenna_mask & ~dev->phy.chainmask)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(tm_change_map); i++) {
+ if (tb[tm_change_map[i]])
+ changed |= BIT(i);
+ }
+
+ mt7615_tm_update_params(dev, changed);
+
+ return 0;
+}
+
+static int
+mt7615_tm_dump_stats(struct mt76_dev *mdev, struct sk_buff *msg)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ void *rx, *rssi;
+ int i;
+
+ rx = nla_nest_start(msg, MT76_TM_STATS_ATTR_LAST_RX);
+ if (!rx)
+ return -ENOMEM;
+
+ if (nla_put_s32(msg, MT76_TM_RX_ATTR_FREQ_OFFSET, dev->test.last_freq_offset) ||
+ nla_put_s32(msg, MT76_TM_RX_ATTR_IB_RSSI, dev->test.last_ib_rssi) ||
+ nla_put_s32(msg, MT76_TM_RX_ATTR_WB_RSSI, dev->test.last_wb_rssi))
+ return -ENOMEM;
+
+ rssi = nla_nest_start(msg, MT76_TM_RX_ATTR_RCPI);
+ if (!rssi)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(dev->test.last_rcpi); i++)
+ if (nla_put_u8(msg, i, dev->test.last_rcpi[i]))
+ return -ENOMEM;
+
+ nla_nest_end(msg, rssi);
+
+ nla_nest_end(msg, rx);
+
+ return 0;
+}
+
+const struct mt76_testmode_ops mt7615_testmode_ops = {
+ .set_state = mt7615_tm_set_state,
+ .set_params = mt7615_tm_set_params,
+ .dump_stats = mt7615_tm_dump_stats,
+};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/trace.c b/drivers/net/wireless/mediatek/mt76/mt7615/trace.c
new file mode 100644
index 000000000..6c02d5aff
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/trace.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2019 Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "mt7615_trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
new file mode 100644
index 000000000..f0ad83af9
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Felix Fietkau <nbd@nbd.name>
+ * Lorenzo Bianconi <lorenzo@kernel.org>
+ * Sean Wang <sean.wang@mediatek.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "mt7615.h"
+#include "mac.h"
+#include "mcu.h"
+#include "regs.h"
+
+static const struct usb_device_id mt7615_device_table[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7663, 0xff, 0xff, 0xff) },
+ { },
+};
+
+static void mt7663u_stop(struct ieee80211_hw *hw)
+{
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ struct mt7615_dev *dev = hw->priv;
+
+ clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
+ del_timer_sync(&phy->roc_timer);
+ cancel_work_sync(&phy->roc_work);
+ cancel_delayed_work_sync(&phy->scan_work);
+ cancel_delayed_work_sync(&phy->mac_work);
+ mt76u_stop_tx(&dev->mt76);
+}
+
+static void mt7663u_cleanup(struct mt7615_dev *dev)
+{
+ clear_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+ mt76u_queues_deinit(&dev->mt76);
+}
+
+static void mt7663u_init_work(struct work_struct *work)
+{
+ struct mt7615_dev *dev;
+
+ dev = container_of(work, struct mt7615_dev, mcu_work);
+ if (mt7663u_mcu_init(dev))
+ return;
+
+ mt7615_mcu_set_eeprom(dev);
+ mt7615_mac_init(dev);
+ mt7615_phy_init(dev);
+ mt7615_mcu_del_wtbl_all(dev);
+ mt7615_check_offload_capability(dev);
+}
+
+static int mt7663u_probe(struct usb_interface *usb_intf,
+ const struct usb_device_id *id)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ .txwi_size = MT_USB_TXD_SIZE,
+ .drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ,
+ .tx_prepare_skb = mt7663_usb_sdio_tx_prepare_skb,
+ .tx_complete_skb = mt7663_usb_sdio_tx_complete_skb,
+ .tx_status_data = mt7663_usb_sdio_tx_status_data,
+ .rx_skb = mt7615_queue_rx_skb,
+ .sta_ps = mt7615_sta_ps,
+ .sta_add = mt7615_mac_sta_add,
+ .sta_remove = mt7615_mac_sta_remove,
+ .update_survey = mt7615_update_channel,
+ };
+ struct usb_device *udev = interface_to_usbdev(usb_intf);
+ struct ieee80211_ops *ops;
+ struct mt7615_dev *dev;
+ struct mt76_dev *mdev;
+ int ret;
+
+ ops = devm_kmemdup(&usb_intf->dev, &mt7615_ops, sizeof(mt7615_ops),
+ GFP_KERNEL);
+ if (!ops)
+ return -ENOMEM;
+
+ ops->stop = mt7663u_stop;
+
+ mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), ops, &drv_ops);
+ if (!mdev)
+ return -ENOMEM;
+
+ dev = container_of(mdev, struct mt7615_dev, mt76);
+ udev = usb_get_dev(udev);
+ usb_reset_device(udev);
+
+ usb_set_intfdata(usb_intf, dev);
+
+ INIT_WORK(&dev->mcu_work, mt7663u_init_work);
+ dev->reg_map = mt7663_usb_sdio_reg_map;
+ dev->ops = ops;
+ ret = mt76u_init(mdev, usb_intf, true);
+ if (ret < 0)
+ goto error;
+
+ mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
+ (mt76_rr(dev, MT_HW_REV) & 0xff);
+ dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+ if (mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON,
+ FW_STATE_PWR_ON << 1, 500)) {
+ dev_dbg(dev->mt76.dev, "Usb device already powered on\n");
+ set_bit(MT76_STATE_POWER_OFF, &dev->mphy.state);
+ goto alloc_queues;
+ }
+
+ ret = mt76u_vendor_request(&dev->mt76, MT_VEND_POWER_ON,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0x0, 0x1, NULL, 0);
+ if (ret)
+ goto error;
+
+ if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON,
+ FW_STATE_PWR_ON << 1, 500)) {
+ dev_err(dev->mt76.dev, "Timeout for power on\n");
+ ret = -EIO;
+ goto error;
+ }
+
+alloc_queues:
+ ret = mt76u_alloc_mcu_queue(&dev->mt76);
+ if (ret)
+ goto error_free_q;
+
+ ret = mt76u_alloc_queues(&dev->mt76);
+ if (ret)
+ goto error_free_q;
+
+ ret = mt7663_usb_sdio_register_device(dev);
+ if (ret)
+ goto error_free_q;
+
+ return 0;
+
+error_free_q:
+ mt76u_queues_deinit(&dev->mt76);
+error:
+ usb_set_intfdata(usb_intf, NULL);
+ usb_put_dev(interface_to_usbdev(usb_intf));
+
+ mt76_free_device(&dev->mt76);
+
+ return ret;
+}
+
+static void mt7663u_disconnect(struct usb_interface *usb_intf)
+{
+ struct mt7615_dev *dev = usb_get_intfdata(usb_intf);
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+ return;
+
+ ieee80211_unregister_hw(dev->mt76.hw);
+ mt7663u_cleanup(dev);
+
+ usb_set_intfdata(usb_intf, NULL);
+ usb_put_dev(interface_to_usbdev(usb_intf));
+
+ mt76_free_device(&dev->mt76);
+}
+
+#ifdef CONFIG_PM
+static int mt7663u_suspend(struct usb_interface *intf, pm_message_t state)
+{
+ struct mt7615_dev *dev = usb_get_intfdata(intf);
+
+ if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) &&
+ mt7615_firmware_offload(dev)) {
+ int err;
+
+ err = mt7615_mcu_set_hif_suspend(dev, true);
+ if (err < 0)
+ return err;
+ }
+
+ mt76u_stop_rx(&dev->mt76);
+ mt76u_stop_tx(&dev->mt76);
+
+ return 0;
+}
+
+static int mt7663u_resume(struct usb_interface *intf)
+{
+ struct mt7615_dev *dev = usb_get_intfdata(intf);
+ int err;
+
+ err = mt76u_vendor_request(&dev->mt76, MT_VEND_FEATURE_SET,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0x5, 0x0, NULL, 0);
+ if (err)
+ return err;
+
+ err = mt76u_resume_rx(&dev->mt76);
+ if (err < 0)
+ return err;
+
+ if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) &&
+ mt7615_firmware_offload(dev))
+ err = mt7615_mcu_set_hif_suspend(dev, false);
+
+ return err;
+}
+#endif /* CONFIG_PM */
+
+MODULE_DEVICE_TABLE(usb, mt7615_device_table);
+MODULE_FIRMWARE(MT7663_OFFLOAD_FIRMWARE_N9);
+MODULE_FIRMWARE(MT7663_OFFLOAD_ROM_PATCH);
+MODULE_FIRMWARE(MT7663_FIRMWARE_N9);
+MODULE_FIRMWARE(MT7663_ROM_PATCH);
+
+static struct usb_driver mt7663u_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt7615_device_table,
+ .probe = mt7663u_probe,
+ .disconnect = mt7663u_disconnect,
+#ifdef CONFIG_PM
+ .suspend = mt7663u_suspend,
+ .resume = mt7663u_resume,
+ .reset_resume = mt7663u_resume,
+#endif /* CONFIG_PM */
+ .soft_unbind = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(mt7663u_driver);
+
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
new file mode 100644
index 000000000..4d8be366a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Felix Fietkau <nbd@nbd.name>
+ * Lorenzo Bianconi <lorenzo@kernel.org>
+ * Sean Wang <sean.wang@mediatek.com>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "mt7615.h"
+#include "mac.h"
+#include "mcu.h"
+#include "regs.h"
+
+static int
+mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
+ int cmd, bool wait_resp)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ int ret, seq, ep, len, pad;
+
+ mutex_lock(&mdev->mcu.mutex);
+
+ mt7615_mcu_fill_msg(dev, skb, cmd, &seq);
+ if (cmd != MCU_CMD_FW_SCATTER)
+ ep = MT_EP_OUT_INBAND_CMD;
+ else
+ ep = MT_EP_OUT_AC_BE;
+
+ len = skb->len;
+ put_unaligned_le32(len, skb_push(skb, sizeof(len)));
+ pad = round_up(skb->len, 4) + 4 - skb->len;
+ ret = mt76_skb_adjust_pad(skb, pad);
+ if (ret < 0)
+ goto out;
+
+ ret = mt76u_bulk_msg(&dev->mt76, skb->data, skb->len, NULL,
+ 1000, ep);
+ if (ret < 0)
+ goto out;
+
+ if (wait_resp)
+ ret = mt7615_mcu_wait_response(dev, cmd, seq);
+
+out:
+ mutex_unlock(&mdev->mcu.mutex);
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
+int mt7663u_mcu_init(struct mt7615_dev *dev)
+{
+ static const struct mt76_mcu_ops mt7663u_mcu_ops = {
+ .headroom = MT_USB_HDR_SIZE + sizeof(struct mt7615_mcu_txd),
+ .tailroom = MT_USB_TAIL_SIZE,
+ .mcu_skb_send_msg = mt7663u_mcu_send_message,
+ .mcu_send_msg = mt7615_mcu_msg_send,
+ .mcu_restart = mt7615_mcu_restart,
+ };
+ int ret;
+
+ dev->mt76.mcu_ops = &mt7663u_mcu_ops,
+
+ /* usb does not support runtime-pm */
+ clear_bit(MT76_STATE_PM, &dev->mphy.state);
+ mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
+
+ if (test_and_clear_bit(MT76_STATE_POWER_OFF, &dev->mphy.state)) {
+ mt7615_mcu_restart(&dev->mt76);
+ if (!mt76_poll_msec(dev, MT_CONN_ON_MISC,
+ MT_TOP_MISC2_FW_PWR_ON, 0, 500))
+ return -EIO;
+
+ ret = mt76u_vendor_request(&dev->mt76, MT_VEND_POWER_ON,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0x0, 0x1, NULL, 0);
+ if (ret)
+ return ret;
+
+ if (!mt76_poll_msec(dev, MT_CONN_ON_MISC,
+ MT_TOP_MISC2_FW_PWR_ON,
+ FW_STATE_PWR_ON << 1, 500)) {
+ dev_err(dev->mt76.dev, "Timeout for power on\n");
+ return -EIO;
+ }
+ }
+
+ ret = __mt7663_load_firmware(dev);
+ if (ret)
+ return ret;
+
+ mt76_clear(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
new file mode 100644
index 000000000..18082b4ce
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
@@ -0,0 +1,402 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc.
+ *
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ * Sean Wang <sean.wang@mediatek.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "mt7615.h"
+#include "mac.h"
+#include "mcu.h"
+#include "regs.h"
+
+const u32 mt7663_usb_sdio_reg_map[] = {
+ [MT_TOP_CFG_BASE] = 0x80020000,
+ [MT_HW_BASE] = 0x80000000,
+ [MT_DMA_SHDL_BASE] = 0x5000a000,
+ [MT_HIF_BASE] = 0x50000000,
+ [MT_CSR_BASE] = 0x40000000,
+ [MT_EFUSE_ADDR_BASE] = 0x78011000,
+ [MT_TOP_MISC_BASE] = 0x81020000,
+ [MT_PLE_BASE] = 0x82060000,
+ [MT_PSE_BASE] = 0x82068000,
+ [MT_PP_BASE] = 0x8206c000,
+ [MT_WTBL_BASE_ADDR] = 0x820e0000,
+ [MT_CFG_BASE] = 0x820f0000,
+ [MT_AGG_BASE] = 0x820f2000,
+ [MT_ARB_BASE] = 0x820f3000,
+ [MT_TMAC_BASE] = 0x820f4000,
+ [MT_RMAC_BASE] = 0x820f5000,
+ [MT_DMA_BASE] = 0x820f7000,
+ [MT_PF_BASE] = 0x820f8000,
+ [MT_WTBL_BASE_ON] = 0x820f9000,
+ [MT_WTBL_BASE_OFF] = 0x820f9800,
+ [MT_LPON_BASE] = 0x820fb000,
+ [MT_MIB_BASE] = 0x820fd000,
+};
+EXPORT_SYMBOL_GPL(mt7663_usb_sdio_reg_map);
+
+static void
+mt7663_usb_sdio_write_txwi(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ enum mt76_txq_id qid, struct ieee80211_sta *sta,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ __le32 *txwi;
+ int pid;
+
+ if (!wcid)
+ wcid = &dev->mt76.global_wcid;
+
+ pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
+
+ txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE);
+ memset(txwi, 0, MT_USB_TXD_SIZE);
+ mt7615_mac_write_txwi(dev, txwi, skb, wcid, sta, pid, key, false);
+ skb_push(skb, MT_USB_TXD_SIZE);
+}
+
+static int
+mt7663_usb_sdio_set_rates(struct mt7615_dev *dev,
+ struct mt7615_wtbl_desc *wd)
+{
+ struct mt7615_rate_desc *rate = &wd->rate;
+ struct mt7615_sta *sta = wd->sta;
+ u32 w5, w27, addr, val;
+
+ lockdep_assert_held(&dev->mt76.mutex);
+
+ if (!sta)
+ return -EINVAL;
+
+ if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
+ return -ETIMEDOUT;
+
+ addr = mt7615_mac_wtbl_addr(dev, sta->wcid.idx);
+
+ w27 = mt76_rr(dev, addr + 27 * 4);
+ w27 &= ~MT_WTBL_W27_CC_BW_SEL;
+ w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, rate->bw);
+
+ w5 = mt76_rr(dev, addr + 5 * 4);
+ w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE |
+ MT_WTBL_W5_MPDU_OK_COUNT |
+ MT_WTBL_W5_MPDU_FAIL_COUNT |
+ MT_WTBL_W5_RATE_IDX);
+ w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, rate->bw) |
+ FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE,
+ rate->bw_idx ? rate->bw_idx - 1 : 7);
+
+ mt76_wr(dev, MT_WTBL_RIUCR0, w5);
+
+ mt76_wr(dev, MT_WTBL_RIUCR1,
+ FIELD_PREP(MT_WTBL_RIUCR1_RATE0, rate->probe_val) |
+ FIELD_PREP(MT_WTBL_RIUCR1_RATE1, rate->val[0]) |
+ FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, rate->val[1]));
+
+ mt76_wr(dev, MT_WTBL_RIUCR2,
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, rate->val[1] >> 8) |
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE3, rate->val[1]) |
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE4, rate->val[2]) |
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, rate->val[2]));
+
+ mt76_wr(dev, MT_WTBL_RIUCR3,
+ FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, rate->val[2] >> 4) |
+ FIELD_PREP(MT_WTBL_RIUCR3_RATE6, rate->val[3]) |
+ FIELD_PREP(MT_WTBL_RIUCR3_RATE7, rate->val[3]));
+
+ mt76_wr(dev, MT_WTBL_UPDATE,
+ FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, sta->wcid.idx) |
+ MT_WTBL_UPDATE_RATE_UPDATE |
+ MT_WTBL_UPDATE_TX_COUNT_CLEAR);
+
+ mt76_wr(dev, addr + 27 * 4, w27);
+
+ sta->rate_probe = sta->rateset[rate->rateset].probe_rate.idx != -1;
+
+ mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
+ val = mt76_rr(dev, MT_LPON_UTTR0);
+ sta->rate_set_tsf = (val & ~BIT(0)) | rate->rateset;
+
+ if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET))
+ mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
+
+ sta->rate_count = 2 * MT7615_RATE_RETRY * sta->n_rates;
+ sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+
+ return 0;
+}
+
+static int
+mt7663_usb_sdio_set_key(struct mt7615_dev *dev,
+ struct mt7615_wtbl_desc *wd)
+{
+ struct mt7615_key_desc *key = &wd->key;
+ struct mt7615_sta *sta = wd->sta;
+ enum mt7615_cipher_type cipher;
+ struct mt76_wcid *wcid;
+ int err;
+
+ lockdep_assert_held(&dev->mt76.mutex);
+
+ if (!sta) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ cipher = mt7615_mac_get_cipher(key->cipher);
+ if (cipher == MT_CIPHER_NONE) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ wcid = &wd->sta->wcid;
+
+ mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, key->cmd);
+ err = mt7615_mac_wtbl_update_key(dev, wcid, key->key, key->keylen,
+ cipher, key->cmd);
+ if (err < 0)
+ goto out;
+
+ err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx,
+ key->cmd);
+ if (err < 0)
+ goto out;
+
+ if (key->cmd == SET_KEY)
+ wcid->cipher |= BIT(cipher);
+ else
+ wcid->cipher &= ~BIT(cipher);
+out:
+ kfree(key->key);
+
+ return err;
+}
+
+void mt7663_usb_sdio_wtbl_work(struct work_struct *work)
+{
+ struct mt7615_wtbl_desc *wd, *wd_next;
+ struct list_head wd_list;
+ struct mt7615_dev *dev;
+
+ dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
+ wtbl_work);
+
+ INIT_LIST_HEAD(&wd_list);
+ spin_lock_bh(&dev->mt76.lock);
+ list_splice_init(&dev->wd_head, &wd_list);
+ spin_unlock_bh(&dev->mt76.lock);
+
+ list_for_each_entry_safe(wd, wd_next, &wd_list, node) {
+ list_del(&wd->node);
+
+ mt7615_mutex_acquire(dev);
+
+ switch (wd->type) {
+ case MT7615_WTBL_RATE_DESC:
+ mt7663_usb_sdio_set_rates(dev, wd);
+ break;
+ case MT7615_WTBL_KEY_DESC:
+ mt7663_usb_sdio_set_key(dev, wd);
+ break;
+ }
+
+ mt7615_mutex_release(dev);
+
+ kfree(wd);
+ }
+}
+EXPORT_SYMBOL_GPL(mt7663_usb_sdio_wtbl_work);
+
+bool mt7663_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+
+ mt7615_mutex_acquire(dev);
+ mt7615_mac_sta_poll(dev);
+ mt7615_mutex_release(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_status_data);
+
+void mt7663_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
+ struct mt76_queue_entry *e)
+{
+ unsigned int headroom = MT_USB_TXD_SIZE;
+
+ if (mt76_is_usb(mdev))
+ headroom += MT_USB_HDR_SIZE;
+ skb_pull(e->skb, headroom);
+
+ mt76_tx_complete_skb(mdev, e->wcid, e->skb);
+}
+EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_complete_skb);
+
+int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ struct sk_buff *skb = tx_info->skb;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt7615_sta *msta;
+ int pad;
+
+ msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL;
+ if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) &&
+ msta && !msta->rate_probe) {
+ /* request to configure sampling rate */
+ spin_lock_bh(&dev->mt76.lock);
+ mt7615_mac_set_rates(&dev->phy, msta, &info->control.rates[0],
+ msta->rates);
+ spin_unlock_bh(&dev->mt76.lock);
+ }
+
+ mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, skb);
+ if (mt76_is_usb(mdev)) {
+ u32 len = skb->len;
+
+ put_unaligned_le32(len, skb_push(skb, sizeof(len)));
+ pad = round_up(skb->len, 4) + 4 - skb->len;
+ } else {
+ pad = round_up(skb->len, 4) - skb->len;
+ }
+
+ return mt76_skb_adjust_pad(skb, pad);
+}
+EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_prepare_skb);
+
+static int mt7663u_dma_sched_init(struct mt7615_dev *dev)
+{
+ int i;
+
+ mt76_rmw(dev, MT_DMA_SHDL(MT_DMASHDL_PKT_MAX_SIZE),
+ MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE,
+ FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) |
+ FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 8));
+
+ /* disable refill group 5 - group 15 and raise group 2
+ * and 3 as high priority.
+ */
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_REFILL), 0xffe00006);
+ mt76_clear(dev, MT_DMA_SHDL(MT_DMASHDL_PAGE), BIT(16));
+
+ for (i = 0; i < 5; i++)
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(i)),
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x3) |
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x1ff));
+
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(0)), 0x42104210);
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(1)), 0x42104210);
+
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(2)), 0x4444);
+
+ /* group pririority from high to low:
+ * 15 (cmd groups) > 4 > 3 > 2 > 1 > 0.
+ */
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET0), 0x6501234f);
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET1), 0xedcba987);
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_OPTIONAL), 0x7004801c);
+
+ mt76_wr(dev, MT_UDMA_WLCFG_1,
+ FIELD_PREP(MT_WL_TX_TMOUT_LMT, 80000) |
+ FIELD_PREP(MT_WL_RX_AGG_PKT_LMT, 1));
+
+ /* setup UDMA Rx Flush */
+ mt76_clear(dev, MT_UDMA_WLCFG_0, MT_WL_RX_FLUSH);
+ /* hif reset */
+ mt76_set(dev, MT_HIF_RST, MT_HIF_LOGIC_RST_N);
+
+ mt76_set(dev, MT_UDMA_WLCFG_0,
+ MT_WL_RX_AGG_EN | MT_WL_RX_EN | MT_WL_TX_EN |
+ MT_WL_RX_MPSZ_PAD0 | MT_TICK_1US_EN |
+ MT_WL_TX_TMOUT_FUNC_EN);
+ mt76_rmw(dev, MT_UDMA_WLCFG_0, MT_WL_RX_AGG_LMT | MT_WL_RX_AGG_TO,
+ FIELD_PREP(MT_WL_RX_AGG_LMT, 32) |
+ FIELD_PREP(MT_WL_RX_AGG_TO, 100));
+
+ return 0;
+}
+
+static int mt7663_usb_sdio_init_hardware(struct mt7615_dev *dev)
+{
+ int ret, idx;
+
+ ret = mt7615_eeprom_init(dev, MT_EFUSE_BASE);
+ if (ret < 0)
+ return ret;
+
+ if (mt76_is_usb(&dev->mt76)) {
+ ret = mt7663u_dma_sched_init(dev);
+ if (ret)
+ return ret;
+ }
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+
+ /* Beacon and mgmt frames should occupy wcid 0 */
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
+ if (idx)
+ return -ENOSPC;
+
+ dev->mt76.global_wcid.idx = idx;
+ dev->mt76.global_wcid.hw_key_idx = -1;
+ rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid);
+
+ return 0;
+}
+
+int mt7663_usb_sdio_register_device(struct mt7615_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ int err;
+
+ INIT_WORK(&dev->wtbl_work, mt7663_usb_sdio_wtbl_work);
+ INIT_LIST_HEAD(&dev->wd_head);
+ mt7615_init_device(dev);
+
+ err = mt7663_usb_sdio_init_hardware(dev);
+ if (err)
+ return err;
+
+ hw->extra_tx_headroom += MT_USB_TXD_SIZE;
+ if (mt76_is_usb(&dev->mt76)) {
+ hw->extra_tx_headroom += MT_USB_HDR_SIZE;
+ /* check hw sg support in order to enable AMSDU */
+ if (dev->mt76.usb.sg_en)
+ hw->max_tx_fragments = MT_HW_TXP_MAX_BUF_NUM;
+ else
+ hw->max_tx_fragments = 1;
+ }
+
+ err = mt76_register_device(&dev->mt76, true, mt7615_rates,
+ ARRAY_SIZE(mt7615_rates));
+ if (err < 0)
+ return err;
+
+ if (!dev->mt76.usb.sg_en) {
+ struct ieee80211_sta_vht_cap *vht_cap;
+
+ /* decrease max A-MSDU size if SG is not supported */
+ vht_cap = &dev->mphy.sband_5g.sband.vht_cap;
+ vht_cap->cap &= ~IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
+ }
+
+ ieee80211_queue_work(hw, &dev->mcu_work);
+ mt7615_init_txpower(dev, &dev->mphy.sband_2g.sband);
+ mt7615_init_txpower(dev, &dev->mphy.sband_5g.sband);
+
+ return mt7615_init_debugfs(dev);
+}
+EXPORT_SYMBOL_GPL(mt7663_usb_sdio_register_device);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig b/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig
new file mode 100644
index 000000000..7c88ed8b8
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config MT76x0_COMMON
+ tristate
+ select MT76x02_LIB
+
+config MT76x0U
+ tristate "MediaTek MT76x0U (USB) support"
+ select MT76x0_COMMON
+ select MT76x02_USB
+ depends on MAC80211
+ depends on USB
+ help
+ This adds support for MT7610U-based wireless USB 2.0 dongles,
+ which comply with IEEE 802.11ac standards and support 1x1
+ 433Mbps PHY rate.
+
+ To compile this driver as a module, choose M here.
+
+config MT76x0E
+ tristate "MediaTek MT76x0E (PCIe) support"
+ select MT76x0_COMMON
+ depends on MAC80211
+ depends on PCI
+ help
+ This adds support for MT7610/MT7630-based wireless PCIe devices,
+ which comply with IEEE 802.11ac standards and support 1x1
+ 433Mbps PHY rate.
+
+ To compile this driver as a module, choose M here.
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile
new file mode 100644
index 000000000..8dcfb4cb4
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MT76x0U) += mt76x0u.o
+obj-$(CONFIG_MT76x0E) += mt76x0e.o
+obj-$(CONFIG_MT76x0_COMMON) += mt76x0-common.o
+
+mt76x0-common-y := init.o main.o eeprom.o phy.o
+
+mt76x0u-y := usb.o usb_mcu.o
+mt76x0e-y := pci.o pci_mcu.o
+
+# ccflags-y := -DDEBUG
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
new file mode 100644
index 000000000..ebf4c9653
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+#include "mt76x0.h"
+#include "eeprom.h"
+#include "../mt76x02_phy.h"
+
+#define MT_MAP_READS DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16)
+static int
+mt76x0_efuse_physical_size_check(struct mt76x02_dev *dev)
+{
+ u8 data[MT_MAP_READS * 16];
+ int ret, i;
+ u32 start = 0, end = 0, cnt_free;
+
+ ret = mt76x02_get_efuse_data(dev, MT_EE_USAGE_MAP_START, data,
+ sizeof(data), MT_EE_PHYSICAL_READ);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < MT_EFUSE_USAGE_MAP_SIZE; i++)
+ if (!data[i]) {
+ if (!start)
+ start = MT_EE_USAGE_MAP_START + i;
+ end = MT_EE_USAGE_MAP_START + i;
+ }
+ cnt_free = end - start + 1;
+
+ if (MT_EFUSE_USAGE_MAP_SIZE - cnt_free < 5) {
+ dev_err(dev->mt76.dev,
+ "driver does not support default EEPROM\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void mt76x0_set_chip_cap(struct mt76x02_dev *dev)
+{
+ u16 nic_conf0 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
+ u16 nic_conf1 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
+
+ mt76x02_eeprom_parse_hw_cap(dev);
+ dev_dbg(dev->mt76.dev, "2GHz %d 5GHz %d\n",
+ dev->mphy.cap.has_2ghz, dev->mphy.cap.has_5ghz);
+
+ if (dev->no_2ghz) {
+ dev->mphy.cap.has_2ghz = false;
+ dev_dbg(dev->mt76.dev, "mask out 2GHz support\n");
+ }
+
+ if (is_mt7630(dev)) {
+ dev->mphy.cap.has_5ghz = false;
+ dev_dbg(dev->mt76.dev, "mask out 5GHz support\n");
+ }
+
+ if (!mt76x02_field_valid(nic_conf1 & 0xff))
+ nic_conf1 &= 0xff00;
+
+ if (nic_conf1 & MT_EE_NIC_CONF_1_HW_RF_CTRL)
+ dev_err(dev->mt76.dev,
+ "driver does not support HW RF ctrl\n");
+
+ if (!mt76x02_field_valid(nic_conf0 >> 8))
+ return;
+
+ if (FIELD_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 ||
+ FIELD_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1)
+ dev_err(dev->mt76.dev, "invalid tx-rx stream\n");
+}
+
+static void mt76x0_set_temp_offset(struct mt76x02_dev *dev)
+{
+ u8 val;
+
+ val = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER) >> 8;
+ if (mt76x02_field_valid(val))
+ dev->cal.rx.temp_offset = mt76x02_sign_extend(val, 8);
+ else
+ dev->cal.rx.temp_offset = -10;
+}
+
+static void mt76x0_set_freq_offset(struct mt76x02_dev *dev)
+{
+ struct mt76x02_rx_freq_cal *caldata = &dev->cal.rx;
+ u8 val;
+
+ val = mt76x02_eeprom_get(dev, MT_EE_FREQ_OFFSET);
+ if (!mt76x02_field_valid(val))
+ val = 0;
+ caldata->freq_offset = val;
+
+ val = mt76x02_eeprom_get(dev, MT_EE_TSSI_BOUND4) >> 8;
+ if (!mt76x02_field_valid(val))
+ val = 0;
+
+ caldata->freq_offset -= mt76x02_sign_extend(val, 8);
+}
+
+void mt76x0_read_rx_gain(struct mt76x02_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
+ struct mt76x02_rx_freq_cal *caldata = &dev->cal.rx;
+ s8 val, lna_5g[3], lna_2g;
+ u16 rssi_offset;
+ int i;
+
+ mt76x02_get_rx_gain(dev, chan->band, &rssi_offset, &lna_2g, lna_5g);
+ caldata->lna_gain = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
+
+ for (i = 0; i < ARRAY_SIZE(caldata->rssi_offset); i++) {
+ val = rssi_offset >> (8 * i);
+ if (val < -10 || val > 10)
+ val = 0;
+
+ caldata->rssi_offset[i] = val;
+ }
+}
+
+static s8 mt76x0_get_delta(struct mt76x02_dev *dev)
+{
+ struct cfg80211_chan_def *chandef = &dev->mphy.chandef;
+ u8 val;
+
+ if (chandef->width == NL80211_CHAN_WIDTH_80) {
+ val = mt76x02_eeprom_get(dev, MT_EE_5G_TARGET_POWER) >> 8;
+ } else if (chandef->width == NL80211_CHAN_WIDTH_40) {
+ u16 data;
+
+ data = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW40);
+ if (chandef->chan->band == NL80211_BAND_5GHZ)
+ val = data >> 8;
+ else
+ val = data;
+ } else {
+ return 0;
+ }
+
+ return mt76x02_rate_power_val(val);
+}
+
+void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev,
+ struct ieee80211_channel *chan,
+ struct mt76_rate_power *t)
+{
+ bool is_2ghz = chan->band == NL80211_BAND_2GHZ;
+ u16 val, addr;
+ s8 delta;
+
+ memset(t, 0, sizeof(*t));
+
+ /* cck 1M, 2M, 5.5M, 11M */
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_BYRATE_BASE);
+ t->cck[0] = t->cck[1] = s6_to_s8(val);
+ t->cck[2] = t->cck[3] = s6_to_s8(val >> 8);
+
+ /* ofdm 6M, 9M, 12M, 18M */
+ addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 2 : 0x120;
+ val = mt76x02_eeprom_get(dev, addr);
+ t->ofdm[0] = t->ofdm[1] = s6_to_s8(val);
+ t->ofdm[2] = t->ofdm[3] = s6_to_s8(val >> 8);
+
+ /* ofdm 24M, 36M, 48M, 54M */
+ addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 4 : 0x122;
+ val = mt76x02_eeprom_get(dev, addr);
+ t->ofdm[4] = t->ofdm[5] = s6_to_s8(val);
+ t->ofdm[6] = t->ofdm[7] = s6_to_s8(val >> 8);
+
+ /* ht-vht mcs 1ss 0, 1, 2, 3 */
+ addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 6 : 0x124;
+ val = mt76x02_eeprom_get(dev, addr);
+ t->ht[0] = t->ht[1] = t->vht[0] = t->vht[1] = s6_to_s8(val);
+ t->ht[2] = t->ht[3] = t->vht[2] = t->vht[3] = s6_to_s8(val >> 8);
+
+ /* ht-vht mcs 1ss 4, 5, 6 */
+ addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 8 : 0x126;
+ val = mt76x02_eeprom_get(dev, addr);
+ t->ht[4] = t->ht[5] = t->vht[4] = t->vht[5] = s6_to_s8(val);
+ t->ht[6] = t->ht[7] = t->vht[6] = t->vht[7] = s6_to_s8(val >> 8);
+
+ /* ht-vht mcs 1ss 0, 1, 2, 3 stbc */
+ addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 14 : 0xec;
+ val = mt76x02_eeprom_get(dev, addr);
+ t->stbc[0] = t->stbc[1] = s6_to_s8(val);
+ t->stbc[2] = t->stbc[3] = s6_to_s8(val >> 8);
+
+ /* ht-vht mcs 1ss 4, 5, 6 stbc */
+ addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 16 : 0xee;
+ val = mt76x02_eeprom_get(dev, addr);
+ t->stbc[4] = t->stbc[5] = s6_to_s8(val);
+ t->stbc[6] = t->stbc[7] = s6_to_s8(val >> 8);
+
+ /* vht mcs 8, 9 5GHz */
+ val = mt76x02_eeprom_get(dev, 0x132);
+ t->vht[8] = s6_to_s8(val);
+ t->vht[9] = s6_to_s8(val >> 8);
+
+ delta = mt76x0_tssi_enabled(dev) ? 0 : mt76x0_get_delta(dev);
+ mt76x02_add_rate_power_offset(t, delta);
+}
+
+void mt76x0_get_power_info(struct mt76x02_dev *dev,
+ struct ieee80211_channel *chan, s8 *tp)
+{
+ static const struct mt76x0_chan_map {
+ u8 chan;
+ u8 offset;
+ } chan_map[] = {
+ { 2, 0 }, { 4, 2 }, { 6, 4 }, { 8, 6 },
+ { 10, 8 }, { 12, 10 }, { 14, 12 }, { 38, 0 },
+ { 44, 2 }, { 48, 4 }, { 54, 6 }, { 60, 8 },
+ { 64, 10 }, { 102, 12 }, { 108, 14 }, { 112, 16 },
+ { 118, 18 }, { 124, 20 }, { 128, 22 }, { 134, 24 },
+ { 140, 26 }, { 151, 28 }, { 157, 30 }, { 161, 32 },
+ { 167, 34 }, { 171, 36 }, { 175, 38 },
+ };
+ u8 offset, addr;
+ int i, idx = 0;
+ u16 data;
+
+ if (mt76x0_tssi_enabled(dev)) {
+ s8 target_power;
+
+ if (chan->band == NL80211_BAND_5GHZ)
+ data = mt76x02_eeprom_get(dev, MT_EE_5G_TARGET_POWER);
+ else
+ data = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER);
+ target_power = (data & 0xff) - dev->mt76.rate_power.ofdm[7];
+ *tp = target_power + mt76x0_get_delta(dev);
+
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(chan_map); i++) {
+ if (chan->hw_value <= chan_map[i].chan) {
+ idx = (chan->hw_value == chan_map[i].chan);
+ offset = chan_map[i].offset;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(chan_map))
+ offset = chan_map[0].offset;
+
+ if (chan->band == NL80211_BAND_2GHZ) {
+ addr = MT_EE_TX_POWER_DELTA_BW80 + offset;
+ } else {
+ switch (chan->hw_value) {
+ case 42:
+ offset = 2;
+ break;
+ case 58:
+ offset = 8;
+ break;
+ case 106:
+ offset = 14;
+ break;
+ case 122:
+ offset = 20;
+ break;
+ case 155:
+ offset = 30;
+ break;
+ default:
+ break;
+ }
+ addr = MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE + 2 + offset;
+ }
+
+ data = mt76x02_eeprom_get(dev, addr);
+ *tp = data >> (8 * idx);
+ if (*tp < 0 || *tp > 0x3f)
+ *tp = 5;
+}
+
+static int mt76x0_check_eeprom(struct mt76x02_dev *dev)
+{
+ u16 val;
+
+ val = get_unaligned_le16(dev->mt76.eeprom.data);
+ if (!val)
+ val = get_unaligned_le16(dev->mt76.eeprom.data +
+ MT_EE_PCI_ID);
+
+ switch (val) {
+ case 0x7650:
+ case 0x7610:
+ return 0;
+ default:
+ dev_err(dev->mt76.dev, "EEPROM data check failed: %04x\n",
+ val);
+ return -EINVAL;
+ }
+}
+
+static int mt76x0_load_eeprom(struct mt76x02_dev *dev)
+{
+ int found;
+
+ found = mt76_eeprom_init(&dev->mt76, MT76X0_EEPROM_SIZE);
+ if (found < 0)
+ return found;
+
+ if (found && !mt76x0_check_eeprom(dev))
+ return 0;
+
+ found = mt76x0_efuse_physical_size_check(dev);
+ if (found < 0)
+ return found;
+
+ return mt76x02_get_efuse_data(dev, 0, dev->mt76.eeprom.data,
+ MT76X0_EEPROM_SIZE, MT_EE_READ);
+}
+
+int mt76x0_eeprom_init(struct mt76x02_dev *dev)
+{
+ u8 version, fae;
+ u16 data;
+ int err;
+
+ err = mt76x0_load_eeprom(dev);
+ if (err < 0)
+ return err;
+
+ data = mt76x02_eeprom_get(dev, MT_EE_VERSION);
+ version = data >> 8;
+ fae = data;
+
+ if (version > MT76X0U_EE_MAX_VER)
+ dev_warn(dev->mt76.dev,
+ "Warning: unsupported EEPROM version %02hhx\n",
+ version);
+ dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n",
+ version, fae);
+
+ memcpy(dev->mt76.macaddr, (u8 *)dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
+ ETH_ALEN);
+ mt76_eeprom_override(&dev->mt76);
+ mt76x02_mac_setaddr(dev, dev->mt76.macaddr);
+
+ mt76x0_set_chip_cap(dev);
+ mt76x0_set_freq_offset(dev);
+ mt76x0_set_temp_offset(dev);
+
+ return 0;
+}
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
new file mode 100644
index 000000000..15540ce8d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ */
+
+#ifndef __MT76X0U_EEPROM_H
+#define __MT76X0U_EEPROM_H
+
+#include "../mt76x02_eeprom.h"
+
+struct mt76x02_dev;
+
+#define MT76X0U_EE_MAX_VER 0x0c
+#define MT76X0_EEPROM_SIZE 512
+
+int mt76x0_eeprom_init(struct mt76x02_dev *dev);
+void mt76x0_read_rx_gain(struct mt76x02_dev *dev);
+void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev,
+ struct ieee80211_channel *chan,
+ struct mt76_rate_power *t);
+void mt76x0_get_power_info(struct mt76x02_dev *dev,
+ struct ieee80211_channel *chan, s8 *tp);
+
+static inline s8 s6_to_s8(u32 val)
+{
+ s8 ret = val & GENMASK(5, 0);
+
+ if (ret & BIT(5))
+ ret -= BIT(6);
+ return ret;
+}
+
+static inline bool mt76x0_tssi_enabled(struct mt76x02_dev *dev)
+{
+ return (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
+ MT_EE_NIC_CONF_1_TX_ALC_EN);
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
new file mode 100644
index 000000000..0bac39bf3
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ */
+
+#include "mt76x0.h"
+#include "eeprom.h"
+#include "mcu.h"
+#include "initvals.h"
+#include "initvals_init.h"
+#include "../mt76x02_phy.h"
+
+static void
+mt76x0_set_wlan_state(struct mt76x02_dev *dev, u32 val, bool enable)
+{
+ u32 mask = MT_CMB_CTRL_XTAL_RDY | MT_CMB_CTRL_PLL_LD;
+
+ /* Note: we don't turn off WLAN_CLK because that makes the device
+ * not respond properly on the probe path.
+ * In case anyone (PSM?) wants to use this function we can
+ * bring the clock stuff back and fixup the probe path.
+ */
+
+ if (enable)
+ val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
+ MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+ else
+ val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN);
+
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ /* Note: vendor driver tries to disable/enable wlan here and retry
+ * but the code which does it is so buggy it must have never
+ * triggered, so don't bother.
+ */
+ if (enable && !mt76_poll(dev, MT_CMB_CTRL, mask, mask, 2000))
+ dev_err(dev->mt76.dev, "PLL and XTAL check failed\n");
+}
+
+void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+ if (reset) {
+ val |= MT_WLAN_FUN_CTRL_GPIO_OUT_EN;
+ val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
+
+ if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
+ val |= (MT_WLAN_FUN_CTRL_WLAN_RESET |
+ MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ val &= ~(MT_WLAN_FUN_CTRL_WLAN_RESET |
+ MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
+ }
+ }
+
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ mt76x0_set_wlan_state(dev, val, enable);
+}
+EXPORT_SYMBOL_GPL(mt76x0_chip_onoff);
+
+static void mt76x0_reset_csr_bbp(struct mt76x02_dev *dev)
+{
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_RESET_CSR |
+ MT_MAC_SYS_CTRL_RESET_BBP);
+ msleep(200);
+ mt76_clear(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_RESET_CSR |
+ MT_MAC_SYS_CTRL_RESET_BBP);
+}
+
+#define RANDOM_WRITE(dev, tab) \
+ mt76_wr_rp(dev, MT_MCU_MEMMAP_WLAN, \
+ tab, ARRAY_SIZE(tab))
+
+static int mt76x0_init_bbp(struct mt76x02_dev *dev)
+{
+ int ret, i;
+
+ ret = mt76x0_phy_wait_bbp_ready(dev);
+ if (ret)
+ return ret;
+
+ RANDOM_WRITE(dev, mt76x0_bbp_init_tab);
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_bbp_switch_tab); i++) {
+ const struct mt76x0_bbp_switch_item *item = &mt76x0_bbp_switch_tab[i];
+ const struct mt76_reg_pair *pair = &item->reg_pair;
+
+ if (((RF_G_BAND | RF_BW_20) & item->bw_band) == (RF_G_BAND | RF_BW_20))
+ mt76_wr(dev, pair->reg, pair->value);
+ }
+
+ RANDOM_WRITE(dev, mt76x0_dcoc_tab);
+
+ return 0;
+}
+
+static void mt76x0_init_mac_registers(struct mt76x02_dev *dev)
+{
+ RANDOM_WRITE(dev, common_mac_reg_table);
+
+ /* Enable PBF and MAC clock SYS_CTRL[11:10] = 0x3 */
+ RANDOM_WRITE(dev, mt76x0_mac_reg_table);
+
+ /* Release BBP and MAC reset MAC_SYS_CTRL[1:0] = 0x0 */
+ mt76_clear(dev, MT_MAC_SYS_CTRL, 0x3);
+
+ /* Set 0x141C[15:12]=0xF */
+ mt76_set(dev, MT_EXT_CCA_CFG, 0xf000);
+
+ mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
+
+ /*
+ * tx_ring 9 is for mgmt frame
+ * tx_ring 8 is for in-band command frame.
+ * WMM_RG0_TXQMA: this register setting is for FCE to
+ * define the rule of tx_ring 9
+ * WMM_RG1_TXQMA: this register setting is for FCE to
+ * define the rule of tx_ring 8
+ */
+ mt76_rmw(dev, MT_WMM_CTRL, 0x3ff, 0x201);
+}
+
+void mt76x0_mac_stop(struct mt76x02_dev *dev)
+{
+ int i = 200, ok = 0;
+
+ mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
+
+ /* Page count on TxQ */
+ while (i-- && ((mt76_rr(dev, 0x0438) & 0xffffffff) ||
+ (mt76_rr(dev, 0x0a30) & 0x000000ff) ||
+ (mt76_rr(dev, 0x0a34) & 0x00ff00ff)))
+ msleep(10);
+
+ if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX, 0, 1000))
+ dev_warn(dev->mt76.dev, "Warning: MAC TX did not stop!\n");
+
+ mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX |
+ MT_MAC_SYS_CTRL_ENABLE_TX);
+
+ /* Page count on RxQ */
+ for (i = 0; i < 200; i++) {
+ if (!(mt76_rr(dev, MT_RXQ_STA) & 0x00ff0000) &&
+ !mt76_rr(dev, 0x0a30) &&
+ !mt76_rr(dev, 0x0a34)) {
+ if (ok++ > 5)
+ break;
+ continue;
+ }
+ msleep(1);
+ }
+
+ if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 1000))
+ dev_warn(dev->mt76.dev, "Warning: MAC RX did not stop!\n");
+}
+EXPORT_SYMBOL_GPL(mt76x0_mac_stop);
+
+int mt76x0_init_hardware(struct mt76x02_dev *dev)
+{
+ int ret, i, k;
+
+ if (!mt76x02_wait_for_wpdma(&dev->mt76, 1000))
+ return -EIO;
+
+ /* Wait for ASIC ready after FW load. */
+ if (!mt76x02_wait_for_mac(&dev->mt76))
+ return -ETIMEDOUT;
+
+ mt76x0_reset_csr_bbp(dev);
+ ret = mt76x02_mcu_function_select(dev, Q_SELECT, 1);
+ if (ret)
+ return ret;
+
+ mt76x0_init_mac_registers(dev);
+
+ if (!mt76x02_wait_for_txrx_idle(&dev->mt76))
+ return -EIO;
+
+ ret = mt76x0_init_bbp(dev);
+ if (ret)
+ return ret;
+
+ dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
+
+ for (i = 0; i < 16; i++)
+ for (k = 0; k < 4; k++)
+ mt76x02_mac_shared_key_setup(dev, i, k, NULL);
+
+ for (i = 0; i < 256; i++)
+ mt76x02_mac_wcid_setup(dev, i, 0, NULL);
+
+ ret = mt76x0_eeprom_init(dev);
+ if (ret)
+ return ret;
+
+ mt76x0_phy_init(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x0_init_hardware);
+
+static void
+mt76x0_init_txpower(struct mt76x02_dev *dev,
+ struct ieee80211_supported_band *sband)
+{
+ struct ieee80211_channel *chan;
+ struct mt76_rate_power t;
+ s8 tp;
+ int i;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ chan = &sband->channels[i];
+
+ mt76x0_get_tx_power_per_rate(dev, chan, &t);
+ mt76x0_get_power_info(dev, chan, &tp);
+
+ chan->orig_mpwr = (mt76x02_get_max_rate_power(&t) + tp) / 2;
+ chan->max_power = min_t(int, chan->max_reg_power,
+ chan->orig_mpwr);
+ }
+}
+
+int mt76x0_register_device(struct mt76x02_dev *dev)
+{
+ int ret;
+
+ mt76x02_init_device(dev);
+ mt76x02_config_mac_addr_list(dev);
+
+ ret = mt76_register_device(&dev->mt76, true, mt76x02_rates,
+ ARRAY_SIZE(mt76x02_rates));
+ if (ret)
+ return ret;
+
+ if (dev->mphy.cap.has_5ghz) {
+ struct ieee80211_supported_band *sband;
+
+ sband = &dev->mphy.sband_5g.sband;
+ sband->vht_cap.cap &= ~IEEE80211_VHT_CAP_RXLDPC;
+ mt76x0_init_txpower(dev, sband);
+ }
+
+ if (dev->mphy.cap.has_2ghz)
+ mt76x0_init_txpower(dev, &dev->mphy.sband_2g.sband);
+
+ mt76x02_init_debugfs(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x0_register_device);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
new file mode 100644
index 000000000..99808ed0c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#ifndef __MT76X0U_INITVALS_H
+#define __MT76X0U_INITVALS_H
+
+#include "phy.h"
+
+static const struct mt76x0_bbp_switch_item mt76x0_bbp_switch_tab[] = {
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 4), 0x1FEDA049 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 4), 0x1FECA054 } },
+
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 6), 0x00000045 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 6), 0x0000000A } },
+
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 8), 0x16344EF0 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 8), 0x122C54F2 } },
+
+ { RF_G_BAND | RF_BW_20, { MT_BBP(AGC, 12), 0x05052879 } },
+ { RF_G_BAND | RF_BW_40, { MT_BBP(AGC, 12), 0x050528F9 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 12), 0x050528F9 } },
+
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 13), 0x35050004 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 13), 0x2C3A0406 } },
+
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 14), 0x310F2E3C } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 14), 0x310F2A3F } },
+
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 26), 0x007C2005 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 26), 0x007C2005 } },
+
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 27), 0x000000E1 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 27), 0x000000EC } },
+
+ { RF_G_BAND | RF_BW_20, { MT_BBP(AGC, 28), 0x00060806 } },
+ { RF_G_BAND | RF_BW_40, { MT_BBP(AGC, 28), 0x00050806 } },
+ { RF_A_BAND | RF_BW_40, { MT_BBP(AGC, 28), 0x00060801 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_80, { MT_BBP(AGC, 28), 0x00060806 } },
+
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(RXO, 28), 0x0000008A } },
+
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 31), 0x00000E23 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 31), 0x00000E13 } },
+
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 32), 0x00003218 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 32), 0x0000181C } },
+
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 33), 0x00003240 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 33), 0x00003218 } },
+
+ { RF_G_BAND | RF_BW_20, { MT_BBP(AGC, 35), 0x11111616 } },
+ { RF_G_BAND | RF_BW_40, { MT_BBP(AGC, 35), 0x11111516 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 35), 0x11111111 } },
+
+ { RF_G_BAND | RF_BW_20, { MT_BBP(AGC, 39), 0x2A2A3036 } },
+ { RF_G_BAND | RF_BW_40, { MT_BBP(AGC, 39), 0x2A2A2C36 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 39), 0x2A2A2A2A } },
+
+ { RF_G_BAND | RF_BW_20, { MT_BBP(AGC, 43), 0x27273438 } },
+ { RF_G_BAND | RF_BW_40, { MT_BBP(AGC, 43), 0x27272D38 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 43), 0x27271A1A } },
+
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 51), 0x17171C1C } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 51), 0xFFFFFFFF } },
+
+ { RF_G_BAND | RF_BW_20, { MT_BBP(AGC, 53), 0x26262A2F } },
+ { RF_G_BAND | RF_BW_40, { MT_BBP(AGC, 53), 0x2626322F } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 53), 0xFFFFFFFF } },
+
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 55), 0x40404040 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 55), 0xFFFFFFFF } },
+
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 58), 0x00001010 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 58), 0x00000000 } },
+
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(RXFE, 0), 0x3D5000E0 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(RXFE, 0), 0x895000E0 } },
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_init.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_init.h
new file mode 100644
index 000000000..9e99ba75f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_init.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#ifndef __MT76X0U_INITVALS_INIT_H
+#define __MT76X0U_INITVALS_INIT_H
+
+#include "phy.h"
+
+static const struct mt76_reg_pair common_mac_reg_table[] = {
+ { MT_BCN_OFFSET(0), 0xf8f0e8e0 },
+ { MT_BCN_OFFSET(1), 0x6f77d0c8 },
+ { MT_LEGACY_BASIC_RATE, 0x0000013f },
+ { MT_HT_BASIC_RATE, 0x00008003 },
+ { MT_MAC_SYS_CTRL, 0x00000000 },
+ { MT_RX_FILTR_CFG, 0x00017f97 },
+ { MT_BKOFF_SLOT_CFG, 0x00000209 },
+ { MT_TX_SW_CFG0, 0x00000000 },
+ { MT_TX_SW_CFG1, 0x00080606 },
+ { MT_TX_LINK_CFG, 0x00001020 },
+ { MT_TX_TIMEOUT_CFG, 0x000a2090 },
+ { MT_MAX_LEN_CFG, 0xa0fff | 0x00001000 },
+ { MT_LED_CFG, 0x7f031e46 },
+ { MT_PBF_TX_MAX_PCNT, 0x1fbf1f1f },
+ { MT_PBF_RX_MAX_PCNT, 0x0000fe9f },
+ { MT_TX_RETRY_CFG, 0x47d01f0f },
+ { MT_AUTO_RSP_CFG, 0x00000013 },
+ { MT_CCK_PROT_CFG, 0x07f40003 },
+ { MT_OFDM_PROT_CFG, 0x07f42004 },
+ { MT_PBF_CFG, 0x00f40006 },
+ { MT_WPDMA_GLO_CFG, 0x00000030 },
+ { MT_GF20_PROT_CFG, 0x01742004 },
+ { MT_GF40_PROT_CFG, 0x03f42084 },
+ { MT_MM20_PROT_CFG, 0x01742004 },
+ { MT_MM40_PROT_CFG, 0x03f42084 },
+ { MT_TXOP_CTRL_CFG, 0x0000583f },
+ { MT_TX_RTS_CFG, 0x00ffff20 },
+ { MT_EXP_ACK_TIME, 0x002400ca },
+ { MT_TXOP_HLDR_ET, 0x00000002 },
+ { MT_XIFS_TIME_CFG, 0x33a41010 },
+ { MT_PWR_PIN_CFG, 0x00000000 },
+};
+
+static const struct mt76_reg_pair mt76x0_mac_reg_table[] = {
+ { MT_IOCFG_6, 0xa0040080 },
+ { MT_PBF_SYS_CTRL, 0x00080c00 },
+ { MT_PBF_CFG, 0x77723c1f },
+ { MT_FCE_PSE_CTRL, 0x00000001 },
+ { MT_AMPDU_MAX_LEN_20M1S, 0xAAA99887 },
+ { MT_TX_SW_CFG0, 0x00000601 },
+ { MT_TX_SW_CFG1, 0x00040000 },
+ { MT_TX_SW_CFG2, 0x00000000 },
+ { 0xa44, 0x00000000 },
+ { MT_HEADER_TRANS_CTRL_REG, 0x00000000 },
+ { MT_TSO_CTRL, 0x00000000 },
+ { MT_BB_PA_MODE_CFG1, 0x00500055 },
+ { MT_RF_PA_MODE_CFG1, 0x00500055 },
+ { MT_TX_ALC_CFG_0, 0x2F2F000C },
+ { MT_TX0_BB_GAIN_ATTEN, 0x00000000 },
+ { MT_TX_PWR_CFG_0, 0x3A3A3A3A },
+ { MT_TX_PWR_CFG_1, 0x3A3A3A3A },
+ { MT_TX_PWR_CFG_2, 0x3A3A3A3A },
+ { MT_TX_PWR_CFG_3, 0x3A3A3A3A },
+ { MT_TX_PWR_CFG_4, 0x3A3A3A3A },
+ { MT_TX_PWR_CFG_7, 0x3A3A3A3A },
+ { MT_TX_PWR_CFG_8, 0x0000003A },
+ { MT_TX_PWR_CFG_9, 0x0000003A },
+ { 0x150C, 0x00000002 },
+ { 0x1238, 0x001700C8 },
+ { MT_LDO_CTRL_0, 0x00A647B6 },
+ { MT_LDO_CTRL_1, 0x6B006464 },
+ { MT_HT_BASIC_RATE, 0x00004003 },
+ { MT_HT_CTRL_CFG, 0x000001FF },
+ { MT_TXOP_HLDR_ET, 0x00000000 },
+ { MT_PN_PAD_MODE, 0x00000003 },
+ { MT_TX_PROT_CFG6, 0xe3f42004 },
+ { MT_TX_PROT_CFG7, 0xe3f42084 },
+ { MT_TX_PROT_CFG8, 0xe3f42104 },
+ { MT_VHT_HT_FBK_CFG1, 0xedcba980 },
+};
+
+static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = {
+ { MT_BBP(CORE, 1), 0x00000002 },
+ { MT_BBP(CORE, 4), 0x00000000 },
+ { MT_BBP(CORE, 24), 0x00000000 },
+ { MT_BBP(CORE, 32), 0x4003000a },
+ { MT_BBP(CORE, 42), 0x00000000 },
+ { MT_BBP(CORE, 44), 0x00000000 },
+ { MT_BBP(IBI, 11), 0x0FDE8081 },
+ { MT_BBP(AGC, 0), 0x00021400 },
+ { MT_BBP(AGC, 1), 0x00000003 },
+ { MT_BBP(AGC, 2), 0x003A6464 },
+ { MT_BBP(AGC, 15), 0x88A28CB8 },
+ { MT_BBP(AGC, 22), 0x00001E21 },
+ { MT_BBP(AGC, 23), 0x0000272C },
+ { MT_BBP(AGC, 24), 0x00002F3A },
+ { MT_BBP(AGC, 25), 0x8000005A },
+ { MT_BBP(AGC, 26), 0x007C2005 },
+ { MT_BBP(AGC, 33), 0x00003238 },
+ { MT_BBP(AGC, 34), 0x000A0C0C },
+ { MT_BBP(AGC, 37), 0x2121262C },
+ { MT_BBP(AGC, 41), 0x38383E45 },
+ { MT_BBP(AGC, 57), 0x00001010 },
+ { MT_BBP(AGC, 59), 0xBAA20E96 },
+ { MT_BBP(AGC, 63), 0x00000001 },
+ { MT_BBP(TXC, 0), 0x00280403 },
+ { MT_BBP(TXC, 1), 0x00000000 },
+ { MT_BBP(RXC, 1), 0x00000012 },
+ { MT_BBP(RXC, 2), 0x00000011 },
+ { MT_BBP(RXC, 3), 0x00000005 },
+ { MT_BBP(RXC, 4), 0x00000000 },
+ { MT_BBP(RXC, 5), 0xF977C4EC },
+ { MT_BBP(RXC, 7), 0x00000090 },
+ { MT_BBP(TXO, 8), 0x00000000 },
+ { MT_BBP(TXBE, 0), 0x00000000 },
+ { MT_BBP(TXBE, 4), 0x00000004 },
+ { MT_BBP(TXBE, 6), 0x00000000 },
+ { MT_BBP(TXBE, 8), 0x00000014 },
+ { MT_BBP(TXBE, 9), 0x20000000 },
+ { MT_BBP(TXBE, 10), 0x00000000 },
+ { MT_BBP(TXBE, 12), 0x00000000 },
+ { MT_BBP(TXBE, 13), 0x00000000 },
+ { MT_BBP(TXBE, 14), 0x00000000 },
+ { MT_BBP(TXBE, 15), 0x00000000 },
+ { MT_BBP(TXBE, 16), 0x00000000 },
+ { MT_BBP(TXBE, 17), 0x00000000 },
+ { MT_BBP(RXFE, 1), 0x00008800 },
+ { MT_BBP(RXFE, 3), 0x00000000 },
+ { MT_BBP(RXFE, 4), 0x00000000 },
+ { MT_BBP(RXO, 13), 0x00000192 },
+ { MT_BBP(RXO, 14), 0x00060612 },
+ { MT_BBP(RXO, 15), 0xC8321B18 },
+ { MT_BBP(RXO, 16), 0x0000001E },
+ { MT_BBP(RXO, 17), 0x00000000 },
+ { MT_BBP(RXO, 18), 0xCC00A993 },
+ { MT_BBP(RXO, 19), 0xB9CB9CB9 },
+ { MT_BBP(RXO, 20), 0x26c00057 },
+ { MT_BBP(RXO, 21), 0x00000001 },
+ { MT_BBP(RXO, 24), 0x00000006 },
+ { MT_BBP(RXO, 28), 0x0000003F },
+};
+
+static const struct mt76_reg_pair mt76x0_dcoc_tab[] = {
+ { MT_BBP(CAL, 47), 0x000010F0 },
+ { MT_BBP(CAL, 48), 0x00008080 },
+ { MT_BBP(CAL, 49), 0x00000F07 },
+ { MT_BBP(CAL, 50), 0x00000040 },
+ { MT_BBP(CAL, 51), 0x00000404 },
+ { MT_BBP(CAL, 52), 0x00080803 },
+ { MT_BBP(CAL, 53), 0x00000704 },
+ { MT_BBP(CAL, 54), 0x00002828 },
+ { MT_BBP(CAL, 55), 0x00005050 },
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h
new file mode 100644
index 000000000..42a79887b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h
@@ -0,0 +1,633 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ */
+
+#ifndef __MT76X0U_PHY_INITVALS_H
+#define __MT76X0U_PHY_INITVALS_H
+
+static const struct mt76_reg_pair mt76x0_rf_central_tab[] = {
+ { MT_RF(0, 1), 0x01 },
+ { MT_RF(0, 2), 0x11 },
+ /* R3 ~ R7: VCO Cal */
+ { MT_RF(0, 3), 0x73 }, /* VCO Freq Cal */
+ { MT_RF(0, 4), 0x30 }, /* R4 b<7>=1, VCO cal */
+ { MT_RF(0, 5), 0x00 },
+ { MT_RF(0, 6), 0x41 },
+ { MT_RF(0, 7), 0x00 },
+ { MT_RF(0, 8), 0x00 },
+ { MT_RF(0, 9), 0x00 },
+ { MT_RF(0, 10), 0x0C },
+ { MT_RF(0, 11), 0x00 },
+ { MT_RF(0, 12), 0x00 },
+ /* BG */
+ { MT_RF(0, 13), 0x00 },
+ { MT_RF(0, 14), 0x00 },
+ { MT_RF(0, 15), 0x00 },
+ /* LDO */
+ { MT_RF(0, 19), 0x20 },
+ { MT_RF(0, 20), 0x22 },
+ { MT_RF(0, 21), 0x12 },
+ { MT_RF(0, 23), 0x00 },
+ { MT_RF(0, 24), 0x33 },
+ { MT_RF(0, 25), 0x00 },
+ /* PLL */
+ { MT_RF(0, 26), 0x00 },
+ { MT_RF(0, 27), 0x00 },
+ { MT_RF(0, 28), 0x00 },
+ { MT_RF(0, 29), 0x00 },
+ { MT_RF(0, 30), 0x00 },
+ { MT_RF(0, 31), 0x00 },
+ { MT_RF(0, 32), 0x00 },
+ { MT_RF(0, 33), 0x00 },
+ { MT_RF(0, 34), 0x00 },
+ { MT_RF(0, 35), 0x00 },
+ { MT_RF(0, 36), 0x00 },
+ { MT_RF(0, 37), 0x00 },
+ /* LO Buffer */
+ { MT_RF(0, 38), 0x2F },
+ /* Test Ports */
+ { MT_RF(0, 64), 0x00 },
+ { MT_RF(0, 65), 0x80 },
+ { MT_RF(0, 66), 0x01 },
+ { MT_RF(0, 67), 0x04 },
+ /* ADC-DAC */
+ { MT_RF(0, 68), 0x00 },
+ { MT_RF(0, 69), 0x08 },
+ { MT_RF(0, 70), 0x08 },
+ { MT_RF(0, 71), 0x40 },
+ { MT_RF(0, 72), 0xD0 },
+ { MT_RF(0, 73), 0x93 },
+};
+
+static const struct mt76_reg_pair mt76x0_rf_2g_channel_0_tab[] = {
+ /* RX logic operation */
+ { MT_RF(5, 2), 0x0C }, /* 5G+2G */
+ { MT_RF(5, 3), 0x00 },
+ /* TX logic operation */
+ { MT_RF(5, 4), 0x00 },
+ { MT_RF(5, 5), 0x84 },
+ { MT_RF(5, 6), 0x02 },
+ /* LDO */
+ { MT_RF(5, 7), 0x00 },
+ { MT_RF(5, 8), 0x00 },
+ { MT_RF(5, 9), 0x00 },
+ /* RX */
+ { MT_RF(5, 10), 0x51 },
+ { MT_RF(5, 11), 0x22 },
+ { MT_RF(5, 12), 0x22 },
+ { MT_RF(5, 13), 0x0F },
+ { MT_RF(5, 14), 0x47 },
+ { MT_RF(5, 15), 0x25 },
+ { MT_RF(5, 16), 0xC7 },
+ { MT_RF(5, 17), 0x00 },
+ { MT_RF(5, 18), 0x00 },
+ { MT_RF(5, 19), 0x30 },
+ { MT_RF(5, 20), 0x33 },
+ { MT_RF(5, 21), 0x02 },
+ { MT_RF(5, 22), 0x32 },
+ { MT_RF(5, 23), 0x00 },
+ { MT_RF(5, 24), 0x25 },
+ { MT_RF(5, 26), 0x00 },
+ { MT_RF(5, 27), 0x12 },
+ { MT_RF(5, 28), 0x0F },
+ { MT_RF(5, 29), 0x00 },
+ /* LOGEN */
+ { MT_RF(5, 30), 0x51 },
+ { MT_RF(5, 31), 0x35 },
+ { MT_RF(5, 32), 0x31 },
+ { MT_RF(5, 33), 0x31 },
+ { MT_RF(5, 34), 0x34 },
+ { MT_RF(5, 35), 0x03 },
+ { MT_RF(5, 36), 0x00 },
+ /* TX */
+ { MT_RF(5, 37), 0xDD },
+ { MT_RF(5, 38), 0xB3 },
+ { MT_RF(5, 39), 0x33 },
+ { MT_RF(5, 40), 0xB1 },
+ { MT_RF(5, 41), 0x71 },
+ { MT_RF(5, 42), 0xF2 },
+ { MT_RF(5, 43), 0x47 },
+ { MT_RF(5, 44), 0x77 },
+ { MT_RF(5, 45), 0x0E },
+ { MT_RF(5, 46), 0x10 },
+ { MT_RF(5, 47), 0x00 },
+ { MT_RF(5, 48), 0x53 },
+ { MT_RF(5, 49), 0x03 },
+ { MT_RF(5, 50), 0xEF },
+ { MT_RF(5, 51), 0xC7 },
+ { MT_RF(5, 52), 0x62 },
+ { MT_RF(5, 53), 0x62 },
+ { MT_RF(5, 54), 0x00 },
+ { MT_RF(5, 55), 0x00 },
+ { MT_RF(5, 56), 0x0F },
+ { MT_RF(5, 57), 0x0F },
+ { MT_RF(5, 58), 0x16 },
+ { MT_RF(5, 59), 0x16 },
+ { MT_RF(5, 60), 0x10 },
+ { MT_RF(5, 61), 0x10 },
+ { MT_RF(5, 62), 0xD0 },
+ { MT_RF(5, 63), 0x6C },
+ { MT_RF(5, 64), 0x58 },
+ { MT_RF(5, 65), 0x58 },
+ { MT_RF(5, 66), 0xF2 },
+ { MT_RF(5, 67), 0xE8 },
+ { MT_RF(5, 68), 0xF0 },
+ { MT_RF(5, 69), 0xF0 },
+ { MT_RF(5, 127), 0x04 },
+};
+
+static const struct mt76_reg_pair mt76x0_rf_5g_channel_0_tab[] = {
+ /* RX logic operation */
+ { MT_RF(6, 2), 0x0C },
+ { MT_RF(6, 3), 0x00 },
+ /* TX logic operation */
+ { MT_RF(6, 4), 0x00 },
+ { MT_RF(6, 5), 0x84 },
+ { MT_RF(6, 6), 0x02 },
+ /* LDO */
+ { MT_RF(6, 7), 0x00 },
+ { MT_RF(6, 8), 0x00 },
+ { MT_RF(6, 9), 0x00 },
+ /* RX */
+ { MT_RF(6, 10), 0x00 },
+ { MT_RF(6, 11), 0x01 },
+ { MT_RF(6, 13), 0x23 },
+ { MT_RF(6, 14), 0x00 },
+ { MT_RF(6, 15), 0x04 },
+ { MT_RF(6, 16), 0x22 },
+ { MT_RF(6, 18), 0x08 },
+ { MT_RF(6, 19), 0x00 },
+ { MT_RF(6, 20), 0x00 },
+ { MT_RF(6, 21), 0x00 },
+ { MT_RF(6, 22), 0xFB },
+ /* LOGEN5G */
+ { MT_RF(6, 25), 0x76 },
+ { MT_RF(6, 26), 0x24 },
+ { MT_RF(6, 27), 0x04 },
+ { MT_RF(6, 28), 0x00 },
+ { MT_RF(6, 29), 0x00 },
+ /* TX */
+ { MT_RF(6, 37), 0xBB },
+ { MT_RF(6, 38), 0xB3 },
+ { MT_RF(6, 40), 0x33 },
+ { MT_RF(6, 41), 0x33 },
+ { MT_RF(6, 43), 0x03 },
+ { MT_RF(6, 44), 0xB3 },
+ { MT_RF(6, 46), 0x17 },
+ { MT_RF(6, 47), 0x0E },
+ { MT_RF(6, 48), 0x10 },
+ { MT_RF(6, 49), 0x07 },
+ { MT_RF(6, 62), 0x00 },
+ { MT_RF(6, 63), 0x00 },
+ { MT_RF(6, 64), 0xF1 },
+ { MT_RF(6, 65), 0x0F },
+};
+
+static const struct mt76_reg_pair mt76x0_rf_vga_channel_0_tab[] = {
+ /* E3 CR */
+ { MT_RF(7, 0), 0x47 },
+ { MT_RF(7, 1), 0x00 },
+ { MT_RF(7, 2), 0x00 },
+ { MT_RF(7, 3), 0x00 },
+ { MT_RF(7, 4), 0x00 },
+ { MT_RF(7, 10), 0x13 },
+ { MT_RF(7, 11), 0x0F },
+ { MT_RF(7, 12), 0x13 },
+ { MT_RF(7, 13), 0x13 },
+ { MT_RF(7, 14), 0x13 },
+ { MT_RF(7, 15), 0x20 },
+ { MT_RF(7, 16), 0x22 },
+ { MT_RF(7, 17), 0x7C },
+ { MT_RF(7, 18), 0x00 },
+ { MT_RF(7, 19), 0x00 },
+ { MT_RF(7, 20), 0x00 },
+ { MT_RF(7, 21), 0xF1 },
+ { MT_RF(7, 22), 0x11 },
+ { MT_RF(7, 23), 0xC2 },
+ { MT_RF(7, 24), 0x41 },
+ { MT_RF(7, 25), 0x20 },
+ { MT_RF(7, 26), 0x40 },
+ { MT_RF(7, 27), 0xD7 },
+ { MT_RF(7, 28), 0xA2 },
+ { MT_RF(7, 29), 0x60 },
+ { MT_RF(7, 30), 0x49 },
+ { MT_RF(7, 31), 0x20 },
+ { MT_RF(7, 32), 0x44 },
+ { MT_RF(7, 33), 0xC1 },
+ { MT_RF(7, 34), 0x60 },
+ { MT_RF(7, 35), 0xC0 },
+ { MT_RF(7, 61), 0x01 },
+ { MT_RF(7, 72), 0x3C },
+ { MT_RF(7, 73), 0x34 },
+ { MT_RF(7, 74), 0x00 },
+};
+
+static const struct mt76x0_rf_switch_item mt76x0_rf_bw_switch_tab[] = {
+ /* bank, reg bw/band value */
+ { MT_RF(0, 17), RF_G_BAND | RF_BW_20, 0x00 },
+ { MT_RF(0, 17), RF_G_BAND | RF_BW_40, 0x00 },
+ { MT_RF(0, 17), RF_A_BAND | RF_BW_20, 0x00 },
+ { MT_RF(0, 17), RF_A_BAND | RF_BW_40, 0x00 },
+ { MT_RF(0, 17), RF_A_BAND | RF_BW_80, 0x00 },
+ { MT_RF(7, 6), RF_G_BAND | RF_BW_20, 0x40 },
+ { MT_RF(7, 6), RF_G_BAND | RF_BW_40, 0x1C },
+ { MT_RF(7, 6), RF_A_BAND | RF_BW_20, 0x40 },
+ { MT_RF(7, 6), RF_A_BAND | RF_BW_40, 0x20 },
+ { MT_RF(7, 6), RF_A_BAND | RF_BW_80, 0x10 },
+ { MT_RF(7, 7), RF_G_BAND | RF_BW_20, 0x40 },
+ { MT_RF(7, 7), RF_G_BAND | RF_BW_40, 0x20 },
+ { MT_RF(7, 7), RF_A_BAND | RF_BW_20, 0x40 },
+ { MT_RF(7, 7), RF_A_BAND | RF_BW_40, 0x20 },
+ { MT_RF(7, 7), RF_A_BAND | RF_BW_80, 0x10 },
+ { MT_RF(7, 8), RF_G_BAND | RF_BW_20, 0x03 },
+ { MT_RF(7, 8), RF_G_BAND | RF_BW_40, 0x01 },
+ { MT_RF(7, 8), RF_A_BAND | RF_BW_20, 0x03 },
+ { MT_RF(7, 8), RF_A_BAND | RF_BW_40, 0x01 },
+ { MT_RF(7, 8), RF_A_BAND | RF_BW_80, 0x00 },
+ { MT_RF(7, 58), RF_G_BAND | RF_BW_20, 0x40 },
+ { MT_RF(7, 58), RF_G_BAND | RF_BW_40, 0x40 },
+ { MT_RF(7, 58), RF_A_BAND | RF_BW_20, 0x40 },
+ { MT_RF(7, 58), RF_A_BAND | RF_BW_40, 0x40 },
+ { MT_RF(7, 58), RF_A_BAND | RF_BW_80, 0x10 },
+ { MT_RF(7, 59), RF_G_BAND | RF_BW_20, 0x40 },
+ { MT_RF(7, 59), RF_G_BAND | RF_BW_40, 0x40 },
+ { MT_RF(7, 59), RF_A_BAND | RF_BW_20, 0x40 },
+ { MT_RF(7, 59), RF_A_BAND | RF_BW_40, 0x40 },
+ { MT_RF(7, 59), RF_A_BAND | RF_BW_80, 0x10 },
+ { MT_RF(7, 60), RF_G_BAND | RF_BW_20, 0xAA },
+ { MT_RF(7, 60), RF_G_BAND | RF_BW_40, 0xAA },
+ { MT_RF(7, 60), RF_A_BAND | RF_BW_20, 0xAA },
+ { MT_RF(7, 60), RF_A_BAND | RF_BW_40, 0xAA },
+ { MT_RF(7, 60), RF_A_BAND | RF_BW_80, 0xAA },
+ { MT_RF(7, 76), RF_BW_20, 0x40 },
+ { MT_RF(7, 76), RF_BW_40, 0x40 },
+ { MT_RF(7, 76), RF_BW_80, 0x10 },
+ { MT_RF(7, 77), RF_BW_20, 0x40 },
+ { MT_RF(7, 77), RF_BW_40, 0x40 },
+ { MT_RF(7, 77), RF_BW_80, 0x10 },
+};
+
+static const struct mt76x0_rf_switch_item mt76x0_rf_band_switch_tab[] = {
+ /* bank, reg bw/band value */
+ { MT_RF(0, 16), RF_G_BAND, 0x20 },
+ { MT_RF(0, 16), RF_A_BAND, 0x20 },
+ { MT_RF(0, 18), RF_G_BAND, 0x00 },
+ { MT_RF(0, 18), RF_A_BAND, 0x00 },
+ { MT_RF(0, 39), RF_G_BAND, 0x36 },
+ { MT_RF(0, 39), RF_A_BAND_LB, 0x34 },
+ { MT_RF(0, 39), RF_A_BAND_MB, 0x33 },
+ { MT_RF(0, 39), RF_A_BAND_HB, 0x31 },
+ { MT_RF(0, 39), RF_A_BAND_11J, 0x36 },
+ { MT_RF(6, 12), RF_A_BAND_LB, 0x44 },
+ { MT_RF(6, 12), RF_A_BAND_MB, 0x44 },
+ { MT_RF(6, 12), RF_A_BAND_HB, 0x55 },
+ { MT_RF(6, 12), RF_A_BAND_11J, 0x44 },
+ { MT_RF(6, 17), RF_A_BAND_LB, 0x02 },
+ { MT_RF(6, 17), RF_A_BAND_MB, 0x00 },
+ { MT_RF(6, 17), RF_A_BAND_HB, 0x00 },
+ { MT_RF(6, 17), RF_A_BAND_11J, 0x05 },
+ { MT_RF(6, 24), RF_A_BAND_LB, 0xA1 },
+ { MT_RF(6, 24), RF_A_BAND_MB, 0x41 },
+ { MT_RF(6, 24), RF_A_BAND_HB, 0x21 },
+ { MT_RF(6, 24), RF_A_BAND_11J, 0xE1 },
+ { MT_RF(6, 39), RF_A_BAND_LB, 0x36 },
+ { MT_RF(6, 39), RF_A_BAND_MB, 0x34 },
+ { MT_RF(6, 39), RF_A_BAND_HB, 0x32 },
+ { MT_RF(6, 39), RF_A_BAND_11J, 0x37 },
+ { MT_RF(6, 42), RF_A_BAND_LB, 0xFB },
+ { MT_RF(6, 42), RF_A_BAND_MB, 0xF3 },
+ { MT_RF(6, 42), RF_A_BAND_HB, 0xEB },
+ { MT_RF(6, 42), RF_A_BAND_11J, 0xEB },
+ { MT_RF(6, 127), RF_G_BAND, 0x84 },
+ { MT_RF(6, 127), RF_A_BAND, 0x04 },
+ { MT_RF(7, 5), RF_G_BAND, 0x40 },
+ { MT_RF(7, 5), RF_A_BAND, 0x00 },
+ { MT_RF(7, 9), RF_G_BAND, 0x00 },
+ { MT_RF(7, 9), RF_A_BAND, 0x00 },
+ { MT_RF(7, 70), RF_G_BAND, 0x00 },
+ { MT_RF(7, 70), RF_A_BAND, 0x6D },
+ { MT_RF(7, 71), RF_G_BAND, 0x00 },
+ { MT_RF(7, 71), RF_A_BAND, 0xB0 },
+ { MT_RF(7, 78), RF_G_BAND, 0x00 },
+ { MT_RF(7, 78), RF_A_BAND, 0x55 },
+ { MT_RF(7, 79), RF_G_BAND, 0x00 },
+ { MT_RF(7, 79), RF_A_BAND, 0x55 },
+};
+
+static const struct mt76x0_freq_item mt76x0_frequency_plan[] = {
+ { 1, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xE2, 0x40, 0x02, 0x40, 0x02, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3 }, /* Freq 2412 */
+ { 2, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xE4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA1, 0, 0x30, 0, 0, 0x1 }, /* Freq 2417 */
+ { 3, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xE2, 0x40, 0x07, 0x40, 0x0B, 0, 0, 1, 0x50, 0, 0x30, 0, 0, 0x0 }, /* Freq 2422 */
+ { 4, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xD4, 0x40, 0x02, 0x40, 0x09, 0, 0, 1, 0x50, 0, 0x30, 0, 0, 0x0 }, /* Freq 2427 */
+ { 5, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA2, 0, 0x30, 0, 0, 0x1 }, /* Freq 2432 */
+ { 6, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x07, 0, 0, 1, 0xA2, 0, 0x30, 0, 0, 0x1 }, /* Freq 2437 */
+ { 7, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xE2, 0x40, 0x02, 0x40, 0x07, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3 }, /* Freq 2442 */
+ { 8, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA3, 0, 0x30, 0, 0, 0x1 }, /* Freq 2447 */
+ { 9, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xF2, 0x40, 0x07, 0x40, 0x0D, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3 }, /* Freq 2452 */
+ { 10, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xD4, 0x40, 0x02, 0x40, 0x09, 0, 0, 1, 0x51, 0, 0x30, 0, 0, 0x0 }, /* Freq 2457 */
+ { 11, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA4, 0, 0x30, 0, 0, 0x1 }, /* Freq 2462 */
+ { 12, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x07, 0, 0, 1, 0xA4, 0, 0x30, 0, 0, 0x1 }, /* Freq 2467 */
+ { 13, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xF2, 0x40, 0x02, 0x40, 0x02, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 2472 */
+ { 14, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xF2, 0x40, 0x02, 0x40, 0x04, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 2484 */
+ { 183, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3 }, /* Freq 4915 */
+ { 184, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x00, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4920 */
+ { 185, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4925 */
+ { 187, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4935 */
+ { 188, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4940 */
+ { 189, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4945 */
+ { 192, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4960 */
+ { 196, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4980 */
+ { 36, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5180 */
+ { 37, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5185 */
+ { 38, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5190 */
+ { 39, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5195 */
+ { 40, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5200 */
+ { 41, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5205 */
+ { 42, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5210 */
+ { 43, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5215 */
+ { 44, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5220 */
+ { 45, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5225 */
+ { 46, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5230 */
+ { 47, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5235 */
+ { 48, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5240 */
+ { 49, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5245 */
+ { 50, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5250 */
+ { 51, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5255 */
+ { 52, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5260 */
+ { 53, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5265 */
+ { 54, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5270 */
+ { 55, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5275 */
+ { 56, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5280 */
+ { 57, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5285 */
+ { 58, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5290 */
+ { 59, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5295 */
+ { 60, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5300 */
+ { 61, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5305 */
+ { 62, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5310 */
+ { 63, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5315 */
+ { 64, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5320 */
+ { 100, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3 }, /* Freq 5500 */
+ { 101, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3 }, /* Freq 5505 */
+ { 102, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3 }, /* Freq 5510 */
+ { 103, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3 }, /* Freq 5515 */
+ { 104, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5520 */
+ { 105, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5525 */
+ { 106, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5530 */
+ { 107, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5535 */
+ { 108, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5540 */
+ { 109, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5545 */
+ { 110, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5550 */
+ { 111, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5555 */
+ { 112, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5560 */
+ { 113, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5565 */
+ { 114, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5570 */
+ { 115, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5575 */
+ { 116, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5580 */
+ { 117, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5585 */
+ { 118, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5590 */
+ { 119, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5595 */
+ { 120, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5600 */
+ { 121, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5605 */
+ { 122, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5610 */
+ { 123, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5615 */
+ { 124, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5620 */
+ { 125, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5625 */
+ { 126, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5630 */
+ { 127, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5635 */
+ { 128, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5640 */
+ { 129, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5645 */
+ { 130, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5650 */
+ { 131, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5655 */
+ { 132, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5660 */
+ { 133, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5665 */
+ { 134, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5670 */
+ { 135, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5675 */
+ { 136, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5680 */
+ { 137, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5685 */
+ { 138, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5690 */
+ { 139, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5695 */
+ { 140, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5700 */
+ { 141, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5705 */
+ { 142, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5710 */
+ { 143, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5715 */
+ { 144, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5720 */
+ { 145, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5725 */
+ { 146, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5730 */
+ { 147, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5735 */
+ { 148, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5740 */
+ { 149, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5745 */
+ { 150, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5750 */
+ { 151, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5755 */
+ { 152, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5760 */
+ { 153, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5765 */
+ { 154, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5770 */
+ { 155, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5775 */
+ { 156, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5780 */
+ { 157, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5785 */
+ { 158, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5790 */
+ { 159, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5795 */
+ { 160, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5800 */
+ { 161, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5805 */
+ { 162, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5810 */
+ { 163, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5815 */
+ { 164, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5820 */
+ { 165, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5825 */
+ { 166, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5830 */
+ { 167, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5835 */
+ { 168, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5840 */
+ { 169, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5845 */
+ { 170, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5850 */
+ { 171, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5855 */
+ { 172, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5860 */
+ { 173, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5865 */
+};
+
+static const struct mt76x0_freq_item mt76x0_sdm_frequency_plan[] = {
+ { 1, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x0CCCC, 0x3 }, /* Freq 2412 */
+ { 2, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x12222, 0x3 }, /* Freq 2417 */
+ { 3, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x17777, 0x3 }, /* Freq 2422 */
+ { 4, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x1CCCC, 0x3 }, /* Freq 2427 */
+ { 5, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x22222, 0x3 }, /* Freq 2432 */
+ { 6, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x27777, 0x3 }, /* Freq 2437 */
+ { 7, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x2CCCC, 0x3 }, /* Freq 2442 */
+ { 8, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x32222, 0x3 }, /* Freq 2447 */
+ { 9, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x37777, 0x3 }, /* Freq 2452 */
+ { 10, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x3CCCC, 0x3 }, /* Freq 2457 */
+ { 11, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x02222, 0x3 }, /* Freq 2462 */
+ { 12, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x07777, 0x3 }, /* Freq 2467 */
+ { 13, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x0CCCC, 0x3 }, /* Freq 2472 */
+ { 14, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x19999, 0x3 }, /* Freq 2484 */
+ { 183, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x3D555, 0x3 }, /* Freq 4915 */
+ { 184, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x00000, 0x3 }, /* Freq 4920 */
+ { 185, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x02AAA, 0x3 }, /* Freq 4925 */
+ { 187, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x08000, 0x3 }, /* Freq 4935 */
+ { 188, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x0AAAA, 0x3 }, /* Freq 4940 */
+ { 189, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x0D555, 0x3 }, /* Freq 4945 */
+ { 192, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x15555, 0x3 }, /* Freq 4960 */
+ { 196, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x20000, 0x3 }, /* Freq 4980 */
+ { 36, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x0AAAA, 0x3 }, /* Freq 5180 */
+ { 37, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x0D555, 0x3 }, /* Freq 5185 */
+ { 38, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x10000, 0x3 }, /* Freq 5190 */
+ { 39, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x12AAA, 0x3 }, /* Freq 5195 */
+ { 40, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x15555, 0x3 }, /* Freq 5200 */
+ { 41, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x18000, 0x3 }, /* Freq 5205 */
+ { 42, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x1AAAA, 0x3 }, /* Freq 5210 */
+ { 43, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x1D555, 0x3 }, /* Freq 5215 */
+ { 44, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x20000, 0x3 }, /* Freq 5220 */
+ { 45, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x22AAA, 0x3 }, /* Freq 5225 */
+ { 46, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x25555, 0x3 }, /* Freq 5230 */
+ { 47, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x28000, 0x3 }, /* Freq 5235 */
+ { 48, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x2AAAA, 0x3 }, /* Freq 5240 */
+ { 49, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x2D555, 0x3 }, /* Freq 5245 */
+ { 50, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x30000, 0x3 }, /* Freq 5250 */
+ { 51, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x32AAA, 0x3 }, /* Freq 5255 */
+ { 52, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x35555, 0x3 }, /* Freq 5260 */
+ { 53, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x38000, 0x3 }, /* Freq 5265 */
+ { 54, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x3AAAA, 0x3 }, /* Freq 5270 */
+ { 55, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x3D555, 0x3 }, /* Freq 5275 */
+ { 56, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x00000, 0x3 }, /* Freq 5280 */
+ { 57, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x02AAA, 0x3 }, /* Freq 5285 */
+ { 58, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x05555, 0x3 }, /* Freq 5290 */
+ { 59, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x08000, 0x3 }, /* Freq 5295 */
+ { 60, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x0AAAA, 0x3 }, /* Freq 5300 */
+ { 61, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x0D555, 0x3 }, /* Freq 5305 */
+ { 62, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x10000, 0x3 }, /* Freq 5310 */
+ { 63, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x12AAA, 0x3 }, /* Freq 5315 */
+ { 64, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x15555, 0x3 }, /* Freq 5320 */
+ { 100, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2D, 0, 0x0, 0x8, 0x35555, 0x3 }, /* Freq 5500 */
+ { 101, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2D, 0, 0x0, 0x8, 0x38000, 0x3 }, /* Freq 5505 */
+ { 102, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2D, 0, 0x0, 0x8, 0x3AAAA, 0x3 }, /* Freq 5510 */
+ { 103, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2D, 0, 0x0, 0x8, 0x3D555, 0x3 }, /* Freq 5515 */
+ { 104, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x00000, 0x3 }, /* Freq 5520 */
+ { 105, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x02AAA, 0x3 }, /* Freq 5525 */
+ { 106, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x05555, 0x3 }, /* Freq 5530 */
+ { 107, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x08000, 0x3 }, /* Freq 5535 */
+ { 108, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x0AAAA, 0x3 }, /* Freq 5540 */
+ { 109, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x0D555, 0x3 }, /* Freq 5545 */
+ { 110, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x10000, 0x3 }, /* Freq 5550 */
+ { 111, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x12AAA, 0x3 }, /* Freq 5555 */
+ { 112, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x15555, 0x3 }, /* Freq 5560 */
+ { 113, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x18000, 0x3 }, /* Freq 5565 */
+ { 114, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x1AAAA, 0x3 }, /* Freq 5570 */
+ { 115, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x1D555, 0x3 }, /* Freq 5575 */
+ { 116, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x20000, 0x3 }, /* Freq 5580 */
+ { 117, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x22AAA, 0x3 }, /* Freq 5585 */
+ { 118, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x25555, 0x3 }, /* Freq 5590 */
+ { 119, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x28000, 0x3 }, /* Freq 5595 */
+ { 120, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x2AAAA, 0x3 }, /* Freq 5600 */
+ { 121, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x2D555, 0x3 }, /* Freq 5605 */
+ { 122, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x30000, 0x3 }, /* Freq 5610 */
+ { 123, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x32AAA, 0x3 }, /* Freq 5615 */
+ { 124, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x35555, 0x3 }, /* Freq 5620 */
+ { 125, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x38000, 0x3 }, /* Freq 5625 */
+ { 126, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x3AAAA, 0x3 }, /* Freq 5630 */
+ { 127, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x3D555, 0x3 }, /* Freq 5635 */
+ { 128, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x00000, 0x3 }, /* Freq 5640 */
+ { 129, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x02AAA, 0x3 }, /* Freq 5645 */
+ { 130, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x05555, 0x3 }, /* Freq 5650 */
+ { 131, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x08000, 0x3 }, /* Freq 5655 */
+ { 132, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x0AAAA, 0x3 }, /* Freq 5660 */
+ { 133, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x0D555, 0x3 }, /* Freq 5665 */
+ { 134, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x10000, 0x3 }, /* Freq 5670 */
+ { 135, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x12AAA, 0x3 }, /* Freq 5675 */
+ { 136, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x15555, 0x3 }, /* Freq 5680 */
+ { 137, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x18000, 0x3 }, /* Freq 5685 */
+ { 138, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x1AAAA, 0x3 }, /* Freq 5690 */
+ { 139, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x1D555, 0x3 }, /* Freq 5695 */
+ { 140, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x20000, 0x3 }, /* Freq 5700 */
+ { 141, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x22AAA, 0x3 }, /* Freq 5705 */
+ { 142, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x25555, 0x3 }, /* Freq 5710 */
+ { 143, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x28000, 0x3 }, /* Freq 5715 */
+ { 144, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x2AAAA, 0x3 }, /* Freq 5720 */
+ { 145, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x2D555, 0x3 }, /* Freq 5725 */
+ { 146, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x30000, 0x3 }, /* Freq 5730 */
+ { 147, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x32AAA, 0x3 }, /* Freq 5735 */
+ { 148, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x35555, 0x3 }, /* Freq 5740 */
+ { 149, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x38000, 0x3 }, /* Freq 5745 */
+ { 150, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x3AAAA, 0x3 }, /* Freq 5750 */
+ { 151, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x3D555, 0x3 }, /* Freq 5755 */
+ { 152, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x00000, 0x3 }, /* Freq 5760 */
+ { 153, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x02AAA, 0x3 }, /* Freq 5765 */
+ { 154, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x05555, 0x3 }, /* Freq 5770 */
+ { 155, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x08000, 0x3 }, /* Freq 5775 */
+ { 156, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x0AAAA, 0x3 }, /* Freq 5780 */
+ { 157, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x0D555, 0x3 }, /* Freq 5785 */
+ { 158, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x10000, 0x3 }, /* Freq 5790 */
+ { 159, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x12AAA, 0x3 }, /* Freq 5795 */
+ { 160, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x15555, 0x3 }, /* Freq 5800 */
+ { 161, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x18000, 0x3 }, /* Freq 5805 */
+ { 162, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x1AAAA, 0x3 }, /* Freq 5810 */
+ { 163, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x1D555, 0x3 }, /* Freq 5815 */
+ { 164, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x20000, 0x3 }, /* Freq 5820 */
+ { 165, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x22AAA, 0x3 }, /* Freq 5825 */
+ { 166, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x25555, 0x3 }, /* Freq 5830 */
+ { 167, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x28000, 0x3 }, /* Freq 5835 */
+ { 168, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x2AAAA, 0x3 }, /* Freq 5840 */
+ { 169, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x2D555, 0x3 }, /* Freq 5845 */
+ { 170, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x30000, 0x3 }, /* Freq 5850 */
+ { 171, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x32AAA, 0x3 }, /* Freq 5855 */
+ { 172, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x35555, 0x3 }, /* Freq 5860 */
+ { 173, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x38000, 0x3 }, /* Freq 5865 */
+};
+
+static const u8 mt76x0_sdm_channel[] = {
+ 183, 185, 43, 45,
+ 54, 55, 57, 58,
+ 102, 103, 105, 106,
+ 115, 117, 126, 127,
+ 129, 130, 139, 141,
+ 150, 151, 153, 154,
+ 163, 165
+};
+
+static const struct mt76x0_rf_switch_item mt76x0_rf_ext_pa_tab[] = {
+ { MT_RF(6, 45), RF_A_BAND_LB, 0x63 },
+ { MT_RF(6, 45), RF_A_BAND_MB, 0x43 },
+ { MT_RF(6, 45), RF_A_BAND_HB, 0x33 },
+ { MT_RF(6, 45), RF_A_BAND_11J, 0x73 },
+ { MT_RF(6, 50), RF_A_BAND_LB, 0x02 },
+ { MT_RF(6, 50), RF_A_BAND_MB, 0x02 },
+ { MT_RF(6, 50), RF_A_BAND_HB, 0x02 },
+ { MT_RF(6, 50), RF_A_BAND_11J, 0x02 },
+ { MT_RF(6, 51), RF_A_BAND_LB, 0x02 },
+ { MT_RF(6, 51), RF_A_BAND_MB, 0x02 },
+ { MT_RF(6, 51), RF_A_BAND_HB, 0x02 },
+ { MT_RF(6, 51), RF_A_BAND_11J, 0x02 },
+ { MT_RF(6, 52), RF_A_BAND_LB, 0x08 },
+ { MT_RF(6, 52), RF_A_BAND_MB, 0x08 },
+ { MT_RF(6, 52), RF_A_BAND_HB, 0x08 },
+ { MT_RF(6, 52), RF_A_BAND_11J, 0x08 },
+ { MT_RF(6, 53), RF_A_BAND_LB, 0x08 },
+ { MT_RF(6, 53), RF_A_BAND_MB, 0x08 },
+ { MT_RF(6, 53), RF_A_BAND_HB, 0x08 },
+ { MT_RF(6, 53), RF_A_BAND_11J, 0x08 },
+ { MT_RF(6, 54), RF_A_BAND_LB, 0x0A },
+ { MT_RF(6, 54), RF_A_BAND_MB, 0x0A },
+ { MT_RF(6, 54), RF_A_BAND_HB, 0x0A },
+ { MT_RF(6, 54), RF_A_BAND_11J, 0x0A },
+ { MT_RF(6, 55), RF_A_BAND_LB, 0x0A },
+ { MT_RF(6, 55), RF_A_BAND_MB, 0x0A },
+ { MT_RF(6, 55), RF_A_BAND_HB, 0x0A },
+ { MT_RF(6, 55), RF_A_BAND_11J, 0x0A },
+ { MT_RF(6, 56), RF_A_BAND_LB, 0x05 },
+ { MT_RF(6, 56), RF_A_BAND_MB, 0x05 },
+ { MT_RF(6, 56), RF_A_BAND_HB, 0x05 },
+ { MT_RF(6, 56), RF_A_BAND_11J, 0x05 },
+ { MT_RF(6, 57), RF_A_BAND_LB, 0x05 },
+ { MT_RF(6, 57), RF_A_BAND_MB, 0x05 },
+ { MT_RF(6, 57), RF_A_BAND_HB, 0x05 },
+ { MT_RF(6, 57), RF_A_BAND_11J, 0x05 },
+ { MT_RF(6, 58), RF_A_BAND_LB, 0x05 },
+ { MT_RF(6, 58), RF_A_BAND_MB, 0x03 },
+ { MT_RF(6, 58), RF_A_BAND_HB, 0x02 },
+ { MT_RF(6, 58), RF_A_BAND_11J, 0x07 },
+ { MT_RF(6, 59), RF_A_BAND_LB, 0x05 },
+ { MT_RF(6, 59), RF_A_BAND_MB, 0x03 },
+ { MT_RF(6, 59), RF_A_BAND_HB, 0x02 },
+ { MT_RF(6, 59), RF_A_BAND_11J, 0x07 },
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
new file mode 100644
index 000000000..700ae9c12
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ */
+
+#include <linux/etherdevice.h>
+#include "mt76x0.h"
+
+static void
+mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
+{
+ cancel_delayed_work_sync(&dev->cal_work);
+ mt76x02_pre_tbtt_enable(dev, false);
+ if (mt76_is_mmio(&dev->mt76))
+ tasklet_disable(&dev->dfs_pd.dfs_tasklet);
+
+ mt76_set_channel(&dev->mphy);
+ mt76x0_phy_set_channel(dev, chandef);
+
+ mt76x02_mac_cc_reset(dev);
+ mt76x02_edcca_init(dev);
+
+ if (mt76_is_mmio(&dev->mt76)) {
+ mt76x02_dfs_init_params(dev);
+ tasklet_enable(&dev->dfs_pd.dfs_tasklet);
+ }
+ mt76x02_pre_tbtt_enable(dev, true);
+
+ mt76_txq_schedule_all(&dev->mphy);
+}
+
+int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct mt76x02_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ieee80211_stop_queues(hw);
+ mt76x0_set_channel(dev, &hw->conf.chandef);
+ ieee80211_wake_queues(hw);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ dev->txpower_conf = hw->conf.power_level * 2;
+
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
+ mt76x0_phy_set_txpower(dev);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+ dev->mt76.rxfilter |= MT_RX_FILTR_CFG_PROMISC;
+ else
+ dev->mt76.rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
+
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x0_config);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
new file mode 100644
index 000000000..0ef29f15f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#ifndef __MT76X0U_MCU_H
+#define __MT76X0U_MCU_H
+
+#include "../mt76x02_mcu.h"
+
+struct mt76x02_dev;
+
+#define MT_MCU_IVB_SIZE 0x40
+#define MT_MCU_DLM_OFFSET 0x80000
+
+/* We use same space for BBP as for MAC regs
+ * #define MT_MCU_MEMMAP_BBP 0x40000000
+ */
+#define MT_MCU_MEMMAP_RF 0x80000000
+
+enum mcu_calibrate {
+ MCU_CAL_R = 1,
+ MCU_CAL_RXDCOC,
+ MCU_CAL_LC,
+ MCU_CAL_LOFT,
+ MCU_CAL_TXIQ,
+ MCU_CAL_BW,
+ MCU_CAL_DPD,
+ MCU_CAL_RXIQ,
+ MCU_CAL_TXDCOC,
+ MCU_CAL_RX_GROUP_DELAY,
+ MCU_CAL_TX_GROUP_DELAY,
+ MCU_CAL_VCO,
+ MCU_CAL_NO_SIGNAL = 0xfe,
+ MCU_CAL_FULL = 0xff,
+};
+
+int mt76x0e_mcu_init(struct mt76x02_dev *dev);
+int mt76x0u_mcu_init(struct mt76x02_dev *dev);
+static inline int mt76x0_firmware_running(struct mt76x02_dev *dev)
+{
+ return mt76_rr(dev, MT_MCU_COM_REG0) == 1;
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
new file mode 100644
index 000000000..6953f253a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ */
+
+#ifndef MT76X0U_H
+#define MT76X0U_H
+
+#include <linux/bitfield.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/usb.h>
+#include <linux/completion.h>
+#include <net/mac80211.h>
+#include <linux/debugfs.h>
+
+#include "../mt76x02.h"
+#include "eeprom.h"
+
+#define MT7610E_FIRMWARE "mediatek/mt7610e.bin"
+#define MT7650E_FIRMWARE "mediatek/mt7650e.bin"
+
+#define MT7610U_FIRMWARE "mediatek/mt7610u.bin"
+
+#define MT_USB_AGGR_SIZE_LIMIT 21 /* * 1024B */
+#define MT_USB_AGGR_TIMEOUT 0x80 /* * 33ns */
+
+static inline bool is_mt7610e(struct mt76x02_dev *dev)
+{
+ if (!mt76_is_mmio(&dev->mt76))
+ return false;
+
+ return mt76_chip(&dev->mt76) == 0x7610;
+}
+
+static inline bool is_mt7630(struct mt76x02_dev *dev)
+{
+ return mt76_chip(&dev->mt76) == 0x7630;
+}
+
+/* Init */
+int mt76x0_init_hardware(struct mt76x02_dev *dev);
+int mt76x0_register_device(struct mt76x02_dev *dev);
+void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset);
+
+void mt76x0_mac_stop(struct mt76x02_dev *dev);
+
+int mt76x0_config(struct ieee80211_hw *hw, u32 changed);
+
+/* PHY */
+void mt76x0_phy_init(struct mt76x02_dev *dev);
+int mt76x0_phy_wait_bbp_ready(struct mt76x02_dev *dev);
+void mt76x0_phy_set_channel(struct mt76x02_dev *dev,
+ struct cfg80211_chan_def *chandef);
+void mt76x0_phy_set_txpower(struct mt76x02_dev *dev);
+void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on);
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
new file mode 100644
index 000000000..b87d8e136
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mt76x0.h"
+#include "mcu.h"
+
+static int mt76x0e_start(struct ieee80211_hw *hw)
+{
+ struct mt76x02_dev *dev = hw->priv;
+
+ mt76x02_mac_start(dev);
+ mt76x0_phy_calibrate(dev, true);
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mt76.mac_work,
+ MT_MAC_WORK_INTERVAL);
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+ set_bit(MT76_STATE_RUNNING, &dev->mphy.state);
+
+ return 0;
+}
+
+static void mt76x0e_stop_hw(struct mt76x02_dev *dev)
+{
+ cancel_delayed_work_sync(&dev->cal_work);
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
+ clear_bit(MT76_RESTART, &dev->mphy.state);
+
+ if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY,
+ 0, 1000))
+ dev_warn(dev->mt76.dev, "TX DMA did not stop\n");
+ mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_EN);
+
+ mt76x0_mac_stop(dev);
+
+ if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
+ 0, 1000))
+ dev_warn(dev->mt76.dev, "TX DMA did not stop\n");
+ mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_RX_DMA_EN);
+}
+
+static void mt76x0e_stop(struct ieee80211_hw *hw)
+{
+ struct mt76x02_dev *dev = hw->priv;
+
+ clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
+ mt76x0e_stop_hw(dev);
+}
+
+static void
+mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+}
+
+static const struct ieee80211_ops mt76x0e_ops = {
+ .tx = mt76x02_tx,
+ .start = mt76x0e_start,
+ .stop = mt76x0e_stop,
+ .add_interface = mt76x02_add_interface,
+ .remove_interface = mt76x02_remove_interface,
+ .config = mt76x0_config,
+ .configure_filter = mt76x02_configure_filter,
+ .bss_info_changed = mt76x02_bss_info_changed,
+ .sta_state = mt76_sta_state,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
+ .set_key = mt76x02_set_key,
+ .conf_tx = mt76x02_conf_tx,
+ .sw_scan_start = mt76_sw_scan,
+ .sw_scan_complete = mt76x02_sw_scan_complete,
+ .ampdu_action = mt76x02_ampdu_action,
+ .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
+ .wake_tx_queue = mt76_wake_tx_queue,
+ .get_survey = mt76_get_survey,
+ .get_txpower = mt76_get_txpower,
+ .flush = mt76x0e_flush,
+ .set_tim = mt76_set_tim,
+ .release_buffered_frames = mt76_release_buffered_frames,
+ .set_coverage_class = mt76x02_set_coverage_class,
+ .set_rts_threshold = mt76x02_set_rts_threshold,
+ .get_antenna = mt76_get_antenna,
+ .reconfig_complete = mt76x02_reconfig_complete,
+};
+
+static int mt76x0e_register_device(struct mt76x02_dev *dev)
+{
+ int err;
+
+ mt76x0_chip_onoff(dev, true, false);
+ if (!mt76x02_wait_for_mac(&dev->mt76))
+ return -ETIMEDOUT;
+
+ mt76x02_dma_disable(dev);
+ err = mt76x0e_mcu_init(dev);
+ if (err < 0)
+ return err;
+
+ err = mt76x02_dma_init(dev);
+ if (err < 0)
+ return err;
+
+ err = mt76x0_init_hardware(dev);
+ if (err < 0)
+ return err;
+
+ mt76x02e_init_beacon_config(dev);
+
+ if (mt76_chip(&dev->mt76) == 0x7610) {
+ u16 val;
+
+ mt76_clear(dev, MT_COEXCFG0, BIT(0));
+
+ val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
+ if (!(val & MT_EE_NIC_CONF_0_PA_IO_CURRENT))
+ mt76_set(dev, MT_XO_CTRL7, 0xc03);
+ }
+
+ mt76_clear(dev, 0x110, BIT(9));
+ mt76_set(dev, MT_MAX_LEN_CFG, BIT(13));
+
+ err = mt76x0_register_device(dev);
+ if (err < 0)
+ return err;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+
+ return 0;
+}
+
+static int
+mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ .txwi_size = sizeof(struct mt76x02_txwi),
+ .drv_flags = MT_DRV_TX_ALIGNED4_SKBS |
+ MT_DRV_SW_RX_AIRTIME,
+ .survey_flags = SURVEY_INFO_TIME_TX,
+ .update_survey = mt76x02_update_channel,
+ .tx_prepare_skb = mt76x02_tx_prepare_skb,
+ .tx_complete_skb = mt76x02_tx_complete_skb,
+ .rx_skb = mt76x02_queue_rx_skb,
+ .rx_poll_complete = mt76x02_rx_poll_complete,
+ .sta_ps = mt76x02_sta_ps,
+ .sta_add = mt76x02_sta_add,
+ .sta_remove = mt76x02_sta_remove,
+ };
+ struct mt76x02_dev *dev;
+ struct mt76_dev *mdev;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt76x0e_ops,
+ &drv_ops);
+ if (!mdev)
+ return -ENOMEM;
+
+ dev = container_of(mdev, struct mt76x02_dev, mt76);
+ mutex_init(&dev->phy_mutex);
+
+ mt76_mmio_init(mdev, pcim_iomap_table(pdev)[0]);
+
+ mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
+ dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
+
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
+ ret = devm_request_irq(mdev->dev, pdev->irq, mt76x02_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+ goto error;
+
+ ret = mt76x0e_register_device(dev);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ mt76_free_device(&dev->mt76);
+
+ return ret;
+}
+
+static void mt76x0e_cleanup(struct mt76x02_dev *dev)
+{
+ clear_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
+ mt76x0_chip_onoff(dev, false, false);
+ mt76x0e_stop_hw(dev);
+ mt76_dma_cleanup(&dev->mt76);
+ mt76x02_mcu_cleanup(dev);
+}
+
+static void
+mt76x0e_remove(struct pci_dev *pdev)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+
+ mt76_unregister_device(mdev);
+ mt76x0e_cleanup(dev);
+ mt76_free_device(mdev);
+}
+
+static const struct pci_device_id mt76x0e_device_table[] = {
+ { PCI_DEVICE(0x14c3, 0x7610) },
+ { PCI_DEVICE(0x14c3, 0x7630) },
+ { PCI_DEVICE(0x14c3, 0x7650) },
+ { },
+};
+
+MODULE_DEVICE_TABLE(pci, mt76x0e_device_table);
+MODULE_FIRMWARE(MT7610E_FIRMWARE);
+MODULE_FIRMWARE(MT7650E_FIRMWARE);
+MODULE_LICENSE("Dual BSD/GPL");
+
+static struct pci_driver mt76x0e_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt76x0e_device_table,
+ .probe = mt76x0e_probe,
+ .remove = mt76x0e_remove,
+};
+
+module_pci_driver(mt76x0e_driver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
new file mode 100644
index 000000000..007c762c6
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+
+#include "mt76x0.h"
+#include "mcu.h"
+
+#define MT_MCU_IVB_ADDR (MT_MCU_ILM_ADDR + 0x54000 - MT_MCU_IVB_SIZE)
+
+static int mt76x0e_load_firmware(struct mt76x02_dev *dev)
+{
+ bool is_combo_chip = mt76_chip(&dev->mt76) != 0x7610;
+ u32 val, ilm_len, dlm_len, offset = 0;
+ const struct mt76x02_fw_header *hdr;
+ const struct firmware *fw;
+ const char *firmware;
+ const u8 *fw_payload;
+ int len, err;
+
+ if (is_combo_chip)
+ firmware = MT7650E_FIRMWARE;
+ else
+ firmware = MT7610E_FIRMWARE;
+
+ err = request_firmware(&fw, firmware, dev->mt76.dev);
+ if (err)
+ return err;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ err = -EIO;
+ goto out;
+ }
+
+ hdr = (const struct mt76x02_fw_header *)fw->data;
+
+ len = sizeof(*hdr);
+ len += le32_to_cpu(hdr->ilm_len);
+ len += le32_to_cpu(hdr->dlm_len);
+
+ if (fw->size != len) {
+ err = -EIO;
+ goto out;
+ }
+
+ fw_payload = fw->data + sizeof(*hdr);
+
+ val = le16_to_cpu(hdr->fw_ver);
+ dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
+ (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
+
+ val = le16_to_cpu(hdr->fw_ver);
+ dev_dbg(dev->mt76.dev,
+ "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n",
+ (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf,
+ le16_to_cpu(hdr->build_ver), hdr->build_time);
+
+ if (is_combo_chip && !mt76_poll(dev, MT_MCU_SEMAPHORE_00, 1, 1, 600)) {
+ dev_err(dev->mt76.dev,
+ "Could not get hardware semaphore for loading fw\n");
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ /* upload ILM. */
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
+ ilm_len = le32_to_cpu(hdr->ilm_len);
+ if (is_combo_chip) {
+ ilm_len -= MT_MCU_IVB_SIZE;
+ offset = MT_MCU_IVB_SIZE;
+ }
+ dev_dbg(dev->mt76.dev, "loading FW - ILM %u\n", ilm_len);
+ mt76_wr_copy(dev, MT_MCU_ILM_ADDR + offset, fw_payload + offset,
+ ilm_len);
+
+ /* upload IVB. */
+ if (is_combo_chip) {
+ dev_dbg(dev->mt76.dev, "loading FW - IVB %u\n",
+ MT_MCU_IVB_SIZE);
+ mt76_wr_copy(dev, MT_MCU_IVB_ADDR, fw_payload, MT_MCU_IVB_SIZE);
+ }
+
+ /* upload DLM. */
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_DLM_OFFSET);
+ dlm_len = le32_to_cpu(hdr->dlm_len);
+ dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len);
+ mt76_wr_copy(dev, MT_MCU_ILM_ADDR,
+ fw_payload + le32_to_cpu(hdr->ilm_len), dlm_len);
+
+ /* trigger firmware */
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
+ if (is_combo_chip)
+ mt76_wr(dev, MT_MCU_INT_LEVEL, 0x3);
+ else
+ mt76_wr(dev, MT_MCU_RESET_CTL, 0x300);
+
+ if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 1000)) {
+ dev_err(dev->mt76.dev, "Firmware failed to start\n");
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ mt76x02_set_ethtool_fwver(dev, hdr);
+ dev_dbg(dev->mt76.dev, "Firmware running!\n");
+
+out:
+ if (is_combo_chip)
+ mt76_wr(dev, MT_MCU_SEMAPHORE_00, 0x1);
+ release_firmware(fw);
+
+ return err;
+}
+
+int mt76x0e_mcu_init(struct mt76x02_dev *dev)
+{
+ static const struct mt76_mcu_ops mt76x0e_mcu_ops = {
+ .mcu_send_msg = mt76x02_mcu_msg_send,
+ };
+ int err;
+
+ dev->mt76.mcu_ops = &mt76x0e_mcu_ops;
+
+ err = mt76x0e_load_firmware(dev);
+ if (err < 0)
+ return err;
+
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
new file mode 100644
index 000000000..e91c314cd
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -0,0 +1,1213 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ */
+
+#include <linux/kernel.h>
+#include <linux/etherdevice.h>
+
+#include "mt76x0.h"
+#include "mcu.h"
+#include "eeprom.h"
+#include "phy.h"
+#include "initvals.h"
+#include "initvals_phy.h"
+#include "../mt76x02_phy.h"
+
+static int
+mt76x0_rf_csr_wr(struct mt76x02_dev *dev, u32 offset, u8 value)
+{
+ int ret = 0;
+ u8 bank, reg;
+
+ if (test_bit(MT76_REMOVED, &dev->mphy.state))
+ return -ENODEV;
+
+ bank = MT_RF_BANK(offset);
+ reg = MT_RF_REG(offset);
+
+ if (WARN_ON_ONCE(reg > 127) || WARN_ON_ONCE(bank > 8))
+ return -EINVAL;
+
+ mutex_lock(&dev->phy_mutex);
+
+ if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ mt76_wr(dev, MT_RF_CSR_CFG,
+ FIELD_PREP(MT_RF_CSR_CFG_DATA, value) |
+ FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
+ FIELD_PREP(MT_RF_CSR_CFG_REG_ID, reg) |
+ MT_RF_CSR_CFG_WR |
+ MT_RF_CSR_CFG_KICK);
+
+out:
+ mutex_unlock(&dev->phy_mutex);
+
+ if (ret < 0)
+ dev_err(dev->mt76.dev, "Error: RF write %d:%d failed:%d!!\n",
+ bank, reg, ret);
+
+ return ret;
+}
+
+static int mt76x0_rf_csr_rr(struct mt76x02_dev *dev, u32 offset)
+{
+ int ret = -ETIMEDOUT;
+ u32 val;
+ u8 bank, reg;
+
+ if (test_bit(MT76_REMOVED, &dev->mphy.state))
+ return -ENODEV;
+
+ bank = MT_RF_BANK(offset);
+ reg = MT_RF_REG(offset);
+
+ if (WARN_ON_ONCE(reg > 127) || WARN_ON_ONCE(bank > 8))
+ return -EINVAL;
+
+ mutex_lock(&dev->phy_mutex);
+
+ if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
+ goto out;
+
+ mt76_wr(dev, MT_RF_CSR_CFG,
+ FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
+ FIELD_PREP(MT_RF_CSR_CFG_REG_ID, reg) |
+ MT_RF_CSR_CFG_KICK);
+
+ if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
+ goto out;
+
+ val = mt76_rr(dev, MT_RF_CSR_CFG);
+ if (FIELD_GET(MT_RF_CSR_CFG_REG_ID, val) == reg &&
+ FIELD_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank)
+ ret = FIELD_GET(MT_RF_CSR_CFG_DATA, val);
+
+out:
+ mutex_unlock(&dev->phy_mutex);
+
+ if (ret < 0)
+ dev_err(dev->mt76.dev, "Error: RF read %d:%d failed:%d!!\n",
+ bank, reg, ret);
+
+ return ret;
+}
+
+static int
+mt76x0_rf_wr(struct mt76x02_dev *dev, u32 offset, u8 val)
+{
+ if (mt76_is_usb(&dev->mt76)) {
+ struct mt76_reg_pair pair = {
+ .reg = offset,
+ .value = val,
+ };
+
+ WARN_ON_ONCE(!test_bit(MT76_STATE_MCU_RUNNING,
+ &dev->mphy.state));
+ return mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1);
+ } else {
+ return mt76x0_rf_csr_wr(dev, offset, val);
+ }
+}
+
+static int mt76x0_rf_rr(struct mt76x02_dev *dev, u32 offset)
+{
+ int ret;
+ u32 val;
+
+ if (mt76_is_usb(&dev->mt76)) {
+ struct mt76_reg_pair pair = {
+ .reg = offset,
+ };
+
+ WARN_ON_ONCE(!test_bit(MT76_STATE_MCU_RUNNING,
+ &dev->mphy.state));
+ ret = mt76_rd_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1);
+ val = pair.value;
+ } else {
+ ret = val = mt76x0_rf_csr_rr(dev, offset);
+ }
+
+ return (ret < 0) ? ret : val;
+}
+
+static int
+mt76x0_rf_rmw(struct mt76x02_dev *dev, u32 offset, u8 mask, u8 val)
+{
+ int ret;
+
+ ret = mt76x0_rf_rr(dev, offset);
+ if (ret < 0)
+ return ret;
+
+ val |= ret & ~mask;
+
+ ret = mt76x0_rf_wr(dev, offset, val);
+ return ret ? ret : val;
+}
+
+static int
+mt76x0_rf_set(struct mt76x02_dev *dev, u32 offset, u8 val)
+{
+ return mt76x0_rf_rmw(dev, offset, 0, val);
+}
+
+static int
+mt76x0_rf_clear(struct mt76x02_dev *dev, u32 offset, u8 mask)
+{
+ return mt76x0_rf_rmw(dev, offset, mask, 0);
+}
+
+static void
+mt76x0_phy_rf_csr_wr_rp(struct mt76x02_dev *dev,
+ const struct mt76_reg_pair *data,
+ int n)
+{
+ while (n-- > 0) {
+ mt76x0_rf_csr_wr(dev, data->reg, data->value);
+ data++;
+ }
+}
+
+#define RF_RANDOM_WRITE(dev, tab) do { \
+ if (mt76_is_mmio(&dev->mt76)) \
+ mt76x0_phy_rf_csr_wr_rp(dev, tab, ARRAY_SIZE(tab)); \
+ else \
+ mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, tab, ARRAY_SIZE(tab));\
+} while (0)
+
+int mt76x0_phy_wait_bbp_ready(struct mt76x02_dev *dev)
+{
+ int i = 20;
+ u32 val;
+
+ do {
+ val = mt76_rr(dev, MT_BBP(CORE, 0));
+ if (val && ~val)
+ break;
+ } while (--i);
+
+ if (!i) {
+ dev_err(dev->mt76.dev, "Error: BBP is not ready\n");
+ return -EIO;
+ }
+
+ dev_dbg(dev->mt76.dev, "BBP version %08x\n", val);
+ return 0;
+}
+
+static void
+mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band)
+{
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ RF_RANDOM_WRITE(dev, mt76x0_rf_2g_channel_0_tab);
+
+ mt76x0_rf_wr(dev, MT_RF(5, 0), 0x45);
+ mt76x0_rf_wr(dev, MT_RF(6, 0), 0x44);
+
+ mt76_wr(dev, MT_TX_ALC_VGA3, 0x00050007);
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x003E0002);
+ break;
+ case NL80211_BAND_5GHZ:
+ RF_RANDOM_WRITE(dev, mt76x0_rf_5g_channel_0_tab);
+
+ mt76x0_rf_wr(dev, MT_RF(5, 0), 0x44);
+ mt76x0_rf_wr(dev, MT_RF(6, 0), 0x45);
+
+ mt76_wr(dev, MT_TX_ALC_VGA3, 0x00000005);
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x01010102);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+mt76x0_phy_set_chan_rf_params(struct mt76x02_dev *dev, u8 channel,
+ u16 rf_bw_band)
+{
+ const struct mt76x0_freq_item *freq_item;
+ u16 rf_band = rf_bw_band & 0xff00;
+ u16 rf_bw = rf_bw_band & 0x00ff;
+ enum nl80211_band band;
+ bool b_sdm = false;
+ u32 mac_reg;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_sdm_channel); i++) {
+ if (channel == mt76x0_sdm_channel[i]) {
+ b_sdm = true;
+ break;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_frequency_plan); i++) {
+ if (channel == mt76x0_frequency_plan[i].channel) {
+ rf_band = mt76x0_frequency_plan[i].band;
+
+ if (b_sdm)
+ freq_item = &mt76x0_sdm_frequency_plan[i];
+ else
+ freq_item = &mt76x0_frequency_plan[i];
+
+ mt76x0_rf_wr(dev, MT_RF(0, 37), freq_item->pllR37);
+ mt76x0_rf_wr(dev, MT_RF(0, 36), freq_item->pllR36);
+ mt76x0_rf_wr(dev, MT_RF(0, 35), freq_item->pllR35);
+ mt76x0_rf_wr(dev, MT_RF(0, 34), freq_item->pllR34);
+ mt76x0_rf_wr(dev, MT_RF(0, 33), freq_item->pllR33);
+
+ mt76x0_rf_rmw(dev, MT_RF(0, 32), 0xe0,
+ freq_item->pllR32_b7b5);
+
+ /* R32<4:0> pll_den: (Denomina - 8) */
+ mt76x0_rf_rmw(dev, MT_RF(0, 32), MT_RF_PLL_DEN_MASK,
+ freq_item->pllR32_b4b0);
+
+ /* R31<7:5> */
+ mt76x0_rf_rmw(dev, MT_RF(0, 31), 0xe0,
+ freq_item->pllR31_b7b5);
+
+ /* R31<4:0> pll_k(Nominator) */
+ mt76x0_rf_rmw(dev, MT_RF(0, 31), MT_RF_PLL_K_MASK,
+ freq_item->pllR31_b4b0);
+
+ /* R30<7> sdm_reset_n */
+ if (b_sdm) {
+ mt76x0_rf_clear(dev, MT_RF(0, 30),
+ MT_RF_SDM_RESET_MASK);
+ mt76x0_rf_set(dev, MT_RF(0, 30),
+ MT_RF_SDM_RESET_MASK);
+ } else {
+ mt76x0_rf_rmw(dev, MT_RF(0, 30),
+ MT_RF_SDM_RESET_MASK,
+ freq_item->pllR30_b7);
+ }
+
+ /* R30<6:2> sdmmash_prbs,sin */
+ mt76x0_rf_rmw(dev, MT_RF(0, 30),
+ MT_RF_SDM_MASH_PRBS_MASK,
+ freq_item->pllR30_b6b2);
+
+ /* R30<1> sdm_bp */
+ mt76x0_rf_rmw(dev, MT_RF(0, 30), MT_RF_SDM_BP_MASK,
+ freq_item->pllR30_b1 << 1);
+
+ /* R30<0> R29<7:0> (hex) pll_n */
+ mt76x0_rf_wr(dev, MT_RF(0, 29),
+ freq_item->pll_n & 0xff);
+
+ mt76x0_rf_rmw(dev, MT_RF(0, 30), 0x1,
+ (freq_item->pll_n >> 8) & 0x1);
+
+ /* R28<7:6> isi_iso */
+ mt76x0_rf_rmw(dev, MT_RF(0, 28), MT_RF_ISI_ISO_MASK,
+ freq_item->pllR28_b7b6);
+
+ /* R28<5:4> pfd_dly */
+ mt76x0_rf_rmw(dev, MT_RF(0, 28), MT_RF_PFD_DLY_MASK,
+ freq_item->pllR28_b5b4);
+
+ /* R28<3:2> clksel option */
+ mt76x0_rf_rmw(dev, MT_RF(0, 28), MT_RF_CLK_SEL_MASK,
+ freq_item->pllR28_b3b2);
+
+ /* R28<1:0> R27<7:0> R26<7:0> (hex) sdm_k */
+ mt76x0_rf_wr(dev, MT_RF(0, 26),
+ freq_item->pll_sdm_k & 0xff);
+ mt76x0_rf_wr(dev, MT_RF(0, 27),
+ (freq_item->pll_sdm_k >> 8) & 0xff);
+
+ mt76x0_rf_rmw(dev, MT_RF(0, 28), 0x3,
+ (freq_item->pll_sdm_k >> 16) & 0x3);
+
+ /* R24<1:0> xo_div */
+ mt76x0_rf_rmw(dev, MT_RF(0, 24), MT_RF_XO_DIV_MASK,
+ freq_item->pllR24_b1b0);
+
+ break;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_rf_bw_switch_tab); i++) {
+ if (rf_bw == mt76x0_rf_bw_switch_tab[i].bw_band) {
+ mt76x0_rf_wr(dev,
+ mt76x0_rf_bw_switch_tab[i].rf_bank_reg,
+ mt76x0_rf_bw_switch_tab[i].value);
+ } else if ((rf_bw == (mt76x0_rf_bw_switch_tab[i].bw_band & 0xFF)) &&
+ (rf_band & mt76x0_rf_bw_switch_tab[i].bw_band)) {
+ mt76x0_rf_wr(dev,
+ mt76x0_rf_bw_switch_tab[i].rf_bank_reg,
+ mt76x0_rf_bw_switch_tab[i].value);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_rf_band_switch_tab); i++) {
+ if (mt76x0_rf_band_switch_tab[i].bw_band & rf_band) {
+ mt76x0_rf_wr(dev,
+ mt76x0_rf_band_switch_tab[i].rf_bank_reg,
+ mt76x0_rf_band_switch_tab[i].value);
+ }
+ }
+
+ mt76_clear(dev, MT_RF_MISC, 0xc);
+
+ band = (rf_band & RF_G_BAND) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+ if (mt76x02_ext_pa_enabled(dev, band)) {
+ /* MT_RF_MISC (offset: 0x0518)
+ * [2]1'b1: enable external A band PA
+ * 1'b0: disable external A band PA
+ * [3]1'b1: enable external G band PA
+ * 1'b0: disable external G band PA
+ */
+ if (rf_band & RF_A_BAND)
+ mt76_set(dev, MT_RF_MISC, BIT(2));
+ else
+ mt76_set(dev, MT_RF_MISC, BIT(3));
+
+ /* External PA */
+ for (i = 0; i < ARRAY_SIZE(mt76x0_rf_ext_pa_tab); i++)
+ if (mt76x0_rf_ext_pa_tab[i].bw_band & rf_band)
+ mt76x0_rf_wr(dev,
+ mt76x0_rf_ext_pa_tab[i].rf_bank_reg,
+ mt76x0_rf_ext_pa_tab[i].value);
+ }
+
+ if (rf_band & RF_G_BAND) {
+ mt76_wr(dev, MT_TX0_RF_GAIN_ATTEN, 0x63707400);
+ /* Set Atten mode = 2 For G band, Disable Tx Inc dcoc. */
+ mac_reg = mt76_rr(dev, MT_TX_ALC_CFG_1);
+ mac_reg &= 0x896400FF;
+ mt76_wr(dev, MT_TX_ALC_CFG_1, mac_reg);
+ } else {
+ mt76_wr(dev, MT_TX0_RF_GAIN_ATTEN, 0x686A7800);
+ /* Set Atten mode = 0
+ * For Ext A band, Disable Tx Inc dcoc Cal.
+ */
+ mac_reg = mt76_rr(dev, MT_TX_ALC_CFG_1);
+ mac_reg &= 0x890400FF;
+ mt76_wr(dev, MT_TX_ALC_CFG_1, mac_reg);
+ }
+}
+
+static void
+mt76x0_phy_set_chan_bbp_params(struct mt76x02_dev *dev, u16 rf_bw_band)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_bbp_switch_tab); i++) {
+ const struct mt76x0_bbp_switch_item *item = &mt76x0_bbp_switch_tab[i];
+ const struct mt76_reg_pair *pair = &item->reg_pair;
+
+ if ((rf_bw_band & item->bw_band) != rf_bw_band)
+ continue;
+
+ if (pair->reg == MT_BBP(AGC, 8)) {
+ u32 val = pair->value;
+ u8 gain;
+
+ gain = FIELD_GET(MT_BBP_AGC_GAIN, val);
+ gain -= dev->cal.rx.lna_gain * 2;
+ val &= ~MT_BBP_AGC_GAIN;
+ val |= FIELD_PREP(MT_BBP_AGC_GAIN, gain);
+ mt76_wr(dev, pair->reg, val);
+ } else {
+ mt76_wr(dev, pair->reg, pair->value);
+ }
+ }
+}
+
+static void mt76x0_phy_ant_select(struct mt76x02_dev *dev)
+{
+ u16 ee_ant = mt76x02_eeprom_get(dev, MT_EE_ANTENNA);
+ u16 ee_cfg1 = mt76x02_eeprom_get(dev, MT_EE_CFG1_INIT);
+ u16 nic_conf2 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
+ u32 wlan, coex3;
+ bool ant_div;
+
+ wlan = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+ coex3 = mt76_rr(dev, MT_COEXCFG3);
+
+ ee_ant &= ~(BIT(14) | BIT(12));
+ wlan &= ~(BIT(6) | BIT(5));
+ coex3 &= ~GENMASK(5, 2);
+
+ if (ee_ant & MT_EE_ANTENNA_DUAL) {
+ /* dual antenna mode */
+ ant_div = !(nic_conf2 & MT_EE_NIC_CONF_2_ANT_OPT) &&
+ (nic_conf2 & MT_EE_NIC_CONF_2_ANT_DIV);
+ if (ant_div)
+ ee_ant |= BIT(12);
+ else
+ coex3 |= BIT(4);
+ coex3 |= BIT(3);
+ if (dev->mphy.cap.has_2ghz)
+ wlan |= BIT(6);
+ } else {
+ /* sigle antenna mode */
+ if (dev->mphy.cap.has_5ghz) {
+ coex3 |= BIT(3) | BIT(4);
+ } else {
+ wlan |= BIT(6);
+ coex3 |= BIT(1);
+ }
+ }
+
+ if (is_mt7630(dev))
+ ee_ant |= BIT(14) | BIT(11);
+
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, wlan);
+ mt76_rmw(dev, MT_CMB_CTRL, GENMASK(15, 0), ee_ant);
+ mt76_rmw(dev, MT_CSR_EE_CFG1, GENMASK(15, 0), ee_cfg1);
+ mt76_clear(dev, MT_COEXCFG0, BIT(2));
+ mt76_wr(dev, MT_COEXCFG3, coex3);
+}
+
+static void
+mt76x0_phy_bbp_set_bw(struct mt76x02_dev *dev, enum nl80211_chan_width width)
+{
+ enum { BW_20 = 0, BW_40 = 1, BW_80 = 2, BW_10 = 4};
+ int bw;
+
+ switch (width) {
+ default:
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ bw = BW_20;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ bw = BW_40;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ bw = BW_80;
+ break;
+ case NL80211_CHAN_WIDTH_10:
+ bw = BW_10;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ case NL80211_CHAN_WIDTH_5:
+ /* TODO error */
+ return;
+ }
+
+ mt76x02_mcu_function_select(dev, BW_SETTING, bw);
+}
+
+static void mt76x0_phy_tssi_dc_calibrate(struct mt76x02_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
+ u32 val;
+
+ if (chan->band == NL80211_BAND_5GHZ)
+ mt76x0_rf_clear(dev, MT_RF(0, 67), 0xf);
+
+ /* bypass ADDA control */
+ mt76_wr(dev, MT_RF_SETTING_0, 0x60002237);
+ mt76_wr(dev, MT_RF_BYPASS_0, 0xffffffff);
+
+ /* bbp sw reset */
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+ usleep_range(500, 1000);
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+
+ val = (chan->band == NL80211_BAND_5GHZ) ? 0x80055 : 0x80050;
+ mt76_wr(dev, MT_BBP(CORE, 34), val);
+
+ /* enable TX with DAC0 input */
+ mt76_wr(dev, MT_BBP(TXBE, 6), BIT(31));
+
+ mt76_poll_msec(dev, MT_BBP(CORE, 34), BIT(4), 0, 200);
+ dev->cal.tssi_dc = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff;
+
+ /* stop bypass ADDA */
+ mt76_wr(dev, MT_RF_BYPASS_0, 0);
+ /* stop TX */
+ mt76_wr(dev, MT_BBP(TXBE, 6), 0);
+ /* bbp sw reset */
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+ usleep_range(500, 1000);
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+
+ if (chan->band == NL80211_BAND_5GHZ)
+ mt76x0_rf_rmw(dev, MT_RF(0, 67), 0xf, 0x4);
+}
+
+static int
+mt76x0_phy_tssi_adc_calibrate(struct mt76x02_dev *dev, s16 *ltssi,
+ u8 *info)
+{
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
+ u32 val;
+
+ val = (chan->band == NL80211_BAND_5GHZ) ? 0x80055 : 0x80050;
+ mt76_wr(dev, MT_BBP(CORE, 34), val);
+
+ if (!mt76_poll_msec(dev, MT_BBP(CORE, 34), BIT(4), 0, 200)) {
+ mt76_clear(dev, MT_BBP(CORE, 34), BIT(4));
+ return -ETIMEDOUT;
+ }
+
+ *ltssi = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff;
+ if (chan->band == NL80211_BAND_5GHZ)
+ *ltssi += 128;
+
+ /* set packet info#1 mode */
+ mt76_wr(dev, MT_BBP(CORE, 34), 0x80041);
+ info[0] = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff;
+
+ /* set packet info#2 mode */
+ mt76_wr(dev, MT_BBP(CORE, 34), 0x80042);
+ info[1] = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff;
+
+ /* set packet info#3 mode */
+ mt76_wr(dev, MT_BBP(CORE, 34), 0x80043);
+ info[2] = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff;
+
+ return 0;
+}
+
+static u8 mt76x0_phy_get_rf_pa_mode(struct mt76x02_dev *dev,
+ int index, u8 tx_rate)
+{
+ u32 val, reg;
+
+ reg = (index == 1) ? MT_RF_PA_MODE_CFG1 : MT_RF_PA_MODE_CFG0;
+ val = mt76_rr(dev, reg);
+ return (val & (3 << (tx_rate * 2))) >> (tx_rate * 2);
+}
+
+static int
+mt76x0_phy_get_target_power(struct mt76x02_dev *dev, u8 tx_mode,
+ u8 *info, s8 *target_power,
+ s8 *target_pa_power)
+{
+ u8 tx_rate, cur_power;
+
+ cur_power = mt76_rr(dev, MT_TX_ALC_CFG_0) & MT_TX_ALC_CFG_0_CH_INIT_0;
+ switch (tx_mode) {
+ case 0:
+ /* cck rates */
+ tx_rate = (info[0] & 0x60) >> 5;
+ if (tx_rate > 3)
+ return -EINVAL;
+
+ *target_power = cur_power + dev->mt76.rate_power.cck[tx_rate];
+ *target_pa_power = mt76x0_phy_get_rf_pa_mode(dev, 0, tx_rate);
+ break;
+ case 1: {
+ u8 index;
+
+ /* ofdm rates */
+ tx_rate = (info[0] & 0xf0) >> 4;
+ switch (tx_rate) {
+ case 0xb:
+ index = 0;
+ break;
+ case 0xf:
+ index = 1;
+ break;
+ case 0xa:
+ index = 2;
+ break;
+ case 0xe:
+ index = 3;
+ break;
+ case 0x9:
+ index = 4;
+ break;
+ case 0xd:
+ index = 5;
+ break;
+ case 0x8:
+ index = 6;
+ break;
+ case 0xc:
+ index = 7;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *target_power = cur_power + dev->mt76.rate_power.ofdm[index];
+ *target_pa_power = mt76x0_phy_get_rf_pa_mode(dev, 0, index + 4);
+ break;
+ }
+ case 4:
+ /* vht rates */
+ tx_rate = info[1] & 0xf;
+ if (tx_rate > 9)
+ return -EINVAL;
+
+ *target_power = cur_power + dev->mt76.rate_power.vht[tx_rate];
+ *target_pa_power = mt76x0_phy_get_rf_pa_mode(dev, 1, tx_rate);
+ break;
+ default:
+ /* ht rates */
+ tx_rate = info[1] & 0x7f;
+ if (tx_rate > 9)
+ return -EINVAL;
+
+ *target_power = cur_power + dev->mt76.rate_power.ht[tx_rate];
+ *target_pa_power = mt76x0_phy_get_rf_pa_mode(dev, 1, tx_rate);
+ break;
+ }
+
+ return 0;
+}
+
+static s16 mt76x0_phy_lin2db(u16 val)
+{
+ u32 mantissa = val << 4;
+ int ret, data;
+ s16 exp = -4;
+
+ while (mantissa < BIT(15)) {
+ mantissa <<= 1;
+ if (--exp < -20)
+ return -10000;
+ }
+ while (mantissa > 0xffff) {
+ mantissa >>= 1;
+ if (++exp > 20)
+ return -10000;
+ }
+
+ /* s(15,0) */
+ if (mantissa <= 47104)
+ data = mantissa + (mantissa >> 3) + (mantissa >> 4) - 38400;
+ else
+ data = mantissa - (mantissa >> 3) - (mantissa >> 6) - 23040;
+ data = max_t(int, 0, data);
+
+ ret = ((15 + exp) << 15) + data;
+ ret = (ret << 2) + (ret << 1) + (ret >> 6) + (ret >> 7);
+ return ret >> 10;
+}
+
+static int
+mt76x0_phy_get_delta_power(struct mt76x02_dev *dev, u8 tx_mode,
+ s8 target_power, s8 target_pa_power,
+ s16 ltssi)
+{
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
+ int tssi_target = target_power << 12, tssi_slope;
+ int tssi_offset, tssi_db, ret;
+ u32 data;
+ u16 val;
+
+ if (chan->band == NL80211_BAND_5GHZ) {
+ u8 bound[7];
+ int i, err;
+
+ err = mt76x02_eeprom_copy(dev, MT_EE_TSSI_BOUND1, bound,
+ sizeof(bound));
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(bound); i++) {
+ if (chan->hw_value <= bound[i] || !bound[i])
+ break;
+ }
+ val = mt76x02_eeprom_get(dev, MT_EE_TSSI_SLOPE_5G + i * 2);
+
+ tssi_offset = val >> 8;
+ if ((tssi_offset >= 64 && tssi_offset <= 127) ||
+ (tssi_offset & BIT(7)))
+ tssi_offset -= BIT(8);
+ } else {
+ val = mt76x02_eeprom_get(dev, MT_EE_TSSI_SLOPE_2G);
+
+ tssi_offset = val >> 8;
+ if (tssi_offset & BIT(7))
+ tssi_offset -= BIT(8);
+ }
+ tssi_slope = val & 0xff;
+
+ switch (target_pa_power) {
+ case 1:
+ if (chan->band == NL80211_BAND_2GHZ)
+ tssi_target += 29491; /* 3.6 * 8192 */
+ fallthrough;
+ case 0:
+ break;
+ default:
+ tssi_target += 4424; /* 0.54 * 8192 */
+ break;
+ }
+
+ if (!tx_mode) {
+ data = mt76_rr(dev, MT_BBP(CORE, 1));
+ if (is_mt7630(dev) && mt76_is_mmio(&dev->mt76)) {
+ int offset;
+
+ /* 2.3 * 8192 or 1.5 * 8192 */
+ offset = (data & BIT(5)) ? 18841 : 12288;
+ tssi_target += offset;
+ } else if (data & BIT(5)) {
+ /* 0.8 * 8192 */
+ tssi_target += 6554;
+ }
+ }
+
+ data = mt76_rr(dev, MT_BBP(TXBE, 4));
+ switch (data & 0x3) {
+ case 1:
+ tssi_target -= 49152; /* -6db * 8192 */
+ break;
+ case 2:
+ tssi_target -= 98304; /* -12db * 8192 */
+ break;
+ case 3:
+ tssi_target += 49152; /* 6db * 8192 */
+ break;
+ default:
+ break;
+ }
+
+ tssi_db = mt76x0_phy_lin2db(ltssi - dev->cal.tssi_dc) * tssi_slope;
+ if (chan->band == NL80211_BAND_5GHZ) {
+ tssi_db += ((tssi_offset - 50) << 10); /* offset s4.3 */
+ tssi_target -= tssi_db;
+ if (ltssi > 254 && tssi_target > 0) {
+ /* upper saturate */
+ tssi_target = 0;
+ }
+ } else {
+ tssi_db += (tssi_offset << 9); /* offset s3.4 */
+ tssi_target -= tssi_db;
+ /* upper-lower saturate */
+ if ((ltssi > 126 && tssi_target > 0) ||
+ ((ltssi - dev->cal.tssi_dc) < 1 && tssi_target < 0)) {
+ tssi_target = 0;
+ }
+ }
+
+ if ((dev->cal.tssi_target ^ tssi_target) < 0 &&
+ dev->cal.tssi_target > -4096 && dev->cal.tssi_target < 4096 &&
+ tssi_target > -4096 && tssi_target < 4096) {
+ if ((tssi_target < 0 &&
+ tssi_target + dev->cal.tssi_target > 0) ||
+ (tssi_target > 0 &&
+ tssi_target + dev->cal.tssi_target <= 0))
+ tssi_target = 0;
+ else
+ dev->cal.tssi_target = tssi_target;
+ } else {
+ dev->cal.tssi_target = tssi_target;
+ }
+
+ /* make the compensate value to the nearest compensate code */
+ if (tssi_target > 0)
+ tssi_target += 2048;
+ else
+ tssi_target -= 2048;
+ tssi_target >>= 12;
+
+ ret = mt76_get_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP);
+ if (ret & BIT(5))
+ ret -= BIT(6);
+ ret += tssi_target;
+
+ ret = min_t(int, 31, ret);
+ return max_t(int, -32, ret);
+}
+
+static void mt76x0_phy_tssi_calibrate(struct mt76x02_dev *dev)
+{
+ s8 target_power, target_pa_power;
+ u8 tssi_info[3], tx_mode;
+ s16 ltssi;
+ s8 val;
+
+ if (mt76x0_phy_tssi_adc_calibrate(dev, &ltssi, tssi_info) < 0)
+ return;
+
+ tx_mode = tssi_info[0] & 0x7;
+ if (mt76x0_phy_get_target_power(dev, tx_mode, tssi_info,
+ &target_power, &target_pa_power) < 0)
+ return;
+
+ val = mt76x0_phy_get_delta_power(dev, tx_mode, target_power,
+ target_pa_power, ltssi);
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP, val);
+}
+
+void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
+{
+ struct mt76_rate_power *t = &dev->mt76.rate_power;
+ s8 info;
+
+ mt76x0_get_tx_power_per_rate(dev, dev->mphy.chandef.chan, t);
+ mt76x0_get_power_info(dev, dev->mphy.chandef.chan, &info);
+
+ mt76x02_add_rate_power_offset(t, info);
+ mt76x02_limit_rate_power(t, dev->txpower_conf);
+ dev->mphy.txpower_cur = mt76x02_get_max_rate_power(t);
+ mt76x02_add_rate_power_offset(t, -info);
+
+ dev->target_power = info;
+ mt76x02_phy_set_txpower(dev, info, info);
+}
+
+void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
+{
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
+ int is_5ghz = (chan->band == NL80211_BAND_5GHZ) ? 1 : 0;
+ u32 val, tx_alc, reg_val;
+
+ if (is_mt7630(dev))
+ return;
+
+ if (power_on) {
+ mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_VCO, chan->hw_value);
+ usleep_range(10, 20);
+
+ if (mt76x0_tssi_enabled(dev)) {
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_RX);
+ mt76x0_phy_tssi_dc_calibrate(dev);
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX |
+ MT_MAC_SYS_CTRL_ENABLE_RX);
+ }
+ }
+
+ tx_alc = mt76_rr(dev, MT_TX_ALC_CFG_0);
+ mt76_wr(dev, MT_TX_ALC_CFG_0, 0);
+ usleep_range(500, 700);
+
+ reg_val = mt76_rr(dev, MT_BBP(IBI, 9));
+ mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e);
+
+ if (is_5ghz) {
+ if (chan->hw_value < 100)
+ val = 0x701;
+ else if (chan->hw_value < 140)
+ val = 0x801;
+ else
+ val = 0x901;
+ } else {
+ val = 0x600;
+ }
+
+ mt76x02_mcu_calibrate(dev, MCU_CAL_FULL, val);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz);
+ usleep_range(15000, 20000);
+
+ mt76_wr(dev, MT_BBP(IBI, 9), reg_val);
+ mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1);
+}
+EXPORT_SYMBOL_GPL(mt76x0_phy_calibrate);
+
+void mt76x0_phy_set_channel(struct mt76x02_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+ u32 ext_cca_chan[4] = {
+ [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)),
+ [1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)),
+ [2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)),
+ [3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
+ };
+ bool scan = test_bit(MT76_SCANNING, &dev->mphy.state);
+ int ch_group_index, freq, freq1;
+ u8 channel;
+ u32 val;
+ u16 rf_bw_band;
+
+ freq = chandef->chan->center_freq;
+ freq1 = chandef->center_freq1;
+ channel = chandef->chan->hw_value;
+ rf_bw_band = (channel <= 14) ? RF_G_BAND : RF_A_BAND;
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_40:
+ if (freq1 > freq)
+ ch_group_index = 0;
+ else
+ ch_group_index = 1;
+ channel += 2 - ch_group_index * 4;
+ rf_bw_band |= RF_BW_40;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ ch_group_index = (freq - freq1 + 30) / 20;
+ if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+ ch_group_index = 0;
+ channel += 6 - ch_group_index * 4;
+ rf_bw_band |= RF_BW_80;
+ break;
+ default:
+ ch_group_index = 0;
+ rf_bw_band |= RF_BW_20;
+ break;
+ }
+
+ if (mt76_is_usb(&dev->mt76)) {
+ mt76x0_phy_bbp_set_bw(dev, chandef->width);
+ } else {
+ if (chandef->width == NL80211_CHAN_WIDTH_80 ||
+ chandef->width == NL80211_CHAN_WIDTH_40)
+ val = 0x201;
+ else
+ val = 0x601;
+ mt76_wr(dev, MT_TX_SW_CFG0, val);
+ }
+ mt76x02_phy_set_bw(dev, chandef->width, ch_group_index);
+ mt76x02_phy_set_band(dev, chandef->chan->band,
+ ch_group_index & 1);
+
+ mt76_rmw(dev, MT_EXT_CCA_CFG,
+ (MT_EXT_CCA_CFG_CCA0 |
+ MT_EXT_CCA_CFG_CCA1 |
+ MT_EXT_CCA_CFG_CCA2 |
+ MT_EXT_CCA_CFG_CCA3 |
+ MT_EXT_CCA_CFG_CCA_MASK),
+ ext_cca_chan[ch_group_index]);
+
+ mt76x0_phy_set_band(dev, chandef->chan->band);
+ mt76x0_phy_set_chan_rf_params(dev, channel, rf_bw_band);
+
+ /* set Japan Tx filter at channel 14 */
+ if (channel == 14)
+ mt76_set(dev, MT_BBP(CORE, 1), 0x20);
+ else
+ mt76_clear(dev, MT_BBP(CORE, 1), 0x20);
+
+ mt76x0_read_rx_gain(dev);
+ mt76x0_phy_set_chan_bbp_params(dev, rf_bw_band);
+
+ /* enable vco */
+ mt76x0_rf_set(dev, MT_RF(0, 4), BIT(7));
+ if (scan)
+ return;
+
+ mt76x02_init_agc_gain(dev);
+ mt76x0_phy_calibrate(dev, false);
+ mt76x0_phy_set_txpower(dev);
+
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+}
+
+static void mt76x0_phy_temp_sensor(struct mt76x02_dev *dev)
+{
+ u8 rf_b7_73, rf_b0_66, rf_b0_67;
+ s8 val;
+
+ rf_b7_73 = mt76x0_rf_rr(dev, MT_RF(7, 73));
+ rf_b0_66 = mt76x0_rf_rr(dev, MT_RF(0, 66));
+ rf_b0_67 = mt76x0_rf_rr(dev, MT_RF(0, 67));
+
+ mt76x0_rf_wr(dev, MT_RF(7, 73), 0x02);
+ mt76x0_rf_wr(dev, MT_RF(0, 66), 0x23);
+ mt76x0_rf_wr(dev, MT_RF(0, 67), 0x01);
+
+ mt76_wr(dev, MT_BBP(CORE, 34), 0x00080055);
+ if (!mt76_poll_msec(dev, MT_BBP(CORE, 34), BIT(4), 0, 200)) {
+ mt76_clear(dev, MT_BBP(CORE, 34), BIT(4));
+ goto done;
+ }
+
+ val = mt76_rr(dev, MT_BBP(CORE, 35));
+ val = (35 * (val - dev->cal.rx.temp_offset)) / 10 + 25;
+
+ if (abs(val - dev->cal.temp_vco) > 20) {
+ mt76x02_mcu_calibrate(dev, MCU_CAL_VCO,
+ dev->mphy.chandef.chan->hw_value);
+ dev->cal.temp_vco = val;
+ }
+ if (abs(val - dev->cal.temp) > 30) {
+ mt76x0_phy_calibrate(dev, false);
+ dev->cal.temp = val;
+ }
+
+done:
+ mt76x0_rf_wr(dev, MT_RF(7, 73), rf_b7_73);
+ mt76x0_rf_wr(dev, MT_RF(0, 66), rf_b0_66);
+ mt76x0_rf_wr(dev, MT_RF(0, 67), rf_b0_67);
+}
+
+static void mt76x0_phy_set_gain_val(struct mt76x02_dev *dev)
+{
+ u8 gain = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
+
+ mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, gain);
+
+ if ((dev->mphy.chandef.chan->flags & IEEE80211_CHAN_RADAR) &&
+ !is_mt7630(dev))
+ mt76x02_phy_dfs_adjust_agc(dev);
+}
+
+static void
+mt76x0_phy_update_channel_gain(struct mt76x02_dev *dev)
+{
+ bool gain_change;
+ u8 gain_delta;
+ int low_gain;
+
+ dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76, false);
+ if (!dev->cal.avg_rssi_all)
+ dev->cal.avg_rssi_all = -75;
+
+ low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) +
+ (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev));
+
+ gain_change = dev->cal.low_gain < 0 ||
+ (dev->cal.low_gain & 2) ^ (low_gain & 2);
+ dev->cal.low_gain = low_gain;
+
+ if (!gain_change) {
+ if (mt76x02_phy_adjust_vga_gain(dev))
+ mt76x0_phy_set_gain_val(dev);
+ return;
+ }
+
+ dev->cal.agc_gain_adjust = (low_gain == 2) ? 0 : 10;
+ gain_delta = (low_gain == 2) ? 10 : 0;
+
+ dev->cal.agc_gain_cur[0] = dev->cal.agc_gain_init[0] - gain_delta;
+ mt76x0_phy_set_gain_val(dev);
+
+ /* clear false CCA counters */
+ mt76_rr(dev, MT_RX_STAT_1);
+}
+
+static void mt76x0_phy_calibration_work(struct work_struct *work)
+{
+ struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
+ cal_work.work);
+
+ mt76x0_phy_update_channel_gain(dev);
+ if (mt76x0_tssi_enabled(dev))
+ mt76x0_phy_tssi_calibrate(dev);
+ else
+ mt76x0_phy_temp_sensor(dev);
+
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+ 4 * MT_CALIBRATE_INTERVAL);
+}
+
+static void mt76x0_rf_patch_reg_array(struct mt76x02_dev *dev,
+ const struct mt76_reg_pair *rp, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ u32 reg = rp[i].reg;
+ u8 val = rp[i].value;
+
+ switch (reg) {
+ case MT_RF(0, 3):
+ if (mt76_is_mmio(&dev->mt76)) {
+ if (is_mt7630(dev))
+ val = 0x70;
+ else
+ val = 0x63;
+ } else {
+ val = 0x73;
+ }
+ break;
+ case MT_RF(0, 21):
+ if (is_mt7610e(dev))
+ val = 0x10;
+ else
+ val = 0x12;
+ break;
+ case MT_RF(5, 2):
+ if (is_mt7630(dev))
+ val = 0x1d;
+ else if (is_mt7610e(dev))
+ val = 0x00;
+ else
+ val = 0x0c;
+ break;
+ default:
+ break;
+ }
+ mt76x0_rf_wr(dev, reg, val);
+ }
+}
+
+static void mt76x0_phy_rf_init(struct mt76x02_dev *dev)
+{
+ int i;
+
+ mt76x0_rf_patch_reg_array(dev, mt76x0_rf_central_tab,
+ ARRAY_SIZE(mt76x0_rf_central_tab));
+ mt76x0_rf_patch_reg_array(dev, mt76x0_rf_2g_channel_0_tab,
+ ARRAY_SIZE(mt76x0_rf_2g_channel_0_tab));
+ RF_RANDOM_WRITE(dev, mt76x0_rf_5g_channel_0_tab);
+ RF_RANDOM_WRITE(dev, mt76x0_rf_vga_channel_0_tab);
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_rf_bw_switch_tab); i++) {
+ const struct mt76x0_rf_switch_item *item = &mt76x0_rf_bw_switch_tab[i];
+
+ if (item->bw_band == RF_BW_20)
+ mt76x0_rf_wr(dev, item->rf_bank_reg, item->value);
+ else if (((RF_G_BAND | RF_BW_20) & item->bw_band) ==
+ (RF_G_BAND | RF_BW_20))
+ mt76x0_rf_wr(dev, item->rf_bank_reg, item->value);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_rf_band_switch_tab); i++) {
+ if (mt76x0_rf_band_switch_tab[i].bw_band & RF_G_BAND) {
+ mt76x0_rf_wr(dev,
+ mt76x0_rf_band_switch_tab[i].rf_bank_reg,
+ mt76x0_rf_band_switch_tab[i].value);
+ }
+ }
+
+ /* Frequency calibration
+ * E1: B0.R22<6:0>: xo_cxo<6:0>
+ * E2: B0.R21<0>: xo_cxo<0>, B0.R22<7:0>: xo_cxo<8:1>
+ */
+ mt76x0_rf_wr(dev, MT_RF(0, 22),
+ min_t(u8, dev->cal.rx.freq_offset, 0xbf));
+ mt76x0_rf_rr(dev, MT_RF(0, 22));
+
+ /* Reset procedure DAC during power-up:
+ * - set B0.R73<7>
+ * - clear B0.R73<7>
+ * - set B0.R73<7>
+ */
+ mt76x0_rf_set(dev, MT_RF(0, 73), BIT(7));
+ mt76x0_rf_clear(dev, MT_RF(0, 73), BIT(7));
+ mt76x0_rf_set(dev, MT_RF(0, 73), BIT(7));
+
+ /* vcocal_en: initiate VCO calibration (reset after completion)) */
+ mt76x0_rf_set(dev, MT_RF(0, 4), 0x80);
+}
+
+void mt76x0_phy_init(struct mt76x02_dev *dev)
+{
+ INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibration_work);
+
+ mt76x0_phy_ant_select(dev);
+ mt76x0_phy_rf_init(dev);
+ mt76x02_phy_set_rxpath(dev);
+ mt76x02_phy_set_txdac(dev);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h
new file mode 100644
index 000000000..441d6559d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ */
+#ifndef _MT76X0_PHY_H_
+#define _MT76X0_PHY_H_
+
+#define RF_G_BAND 0x0100
+#define RF_A_BAND 0x0200
+#define RF_A_BAND_LB 0x0400
+#define RF_A_BAND_MB 0x0800
+#define RF_A_BAND_HB 0x1000
+#define RF_A_BAND_11J 0x2000
+
+#define RF_BW_20 1
+#define RF_BW_40 2
+#define RF_BW_10 4
+#define RF_BW_80 8
+
+#define MT_RF(bank, reg) ((bank) << 16 | (reg))
+#define MT_RF_BANK(offset) ((offset) >> 16)
+#define MT_RF_REG(offset) ((offset) & 0xff)
+
+#define MT_RF_VCO_BP_CLOSE_LOOP BIT(3)
+#define MT_RF_VCO_BP_CLOSE_LOOP_MASK GENMASK(3, 0)
+#define MT_RF_VCO_CAL_MASK GENMASK(2, 0)
+#define MT_RF_START_TIME 0x3
+#define MT_RF_START_TIME_MASK GENMASK(2, 0)
+#define MT_RF_SETTLE_TIME_MASK GENMASK(6, 4)
+
+#define MT_RF_PLL_DEN_MASK GENMASK(4, 0)
+#define MT_RF_PLL_K_MASK GENMASK(4, 0)
+#define MT_RF_SDM_RESET_MASK BIT(7)
+#define MT_RF_SDM_MASH_PRBS_MASK GENMASK(6, 2)
+#define MT_RF_SDM_BP_MASK BIT(1)
+#define MT_RF_ISI_ISO_MASK GENMASK(7, 6)
+#define MT_RF_PFD_DLY_MASK GENMASK(5, 4)
+#define MT_RF_CLK_SEL_MASK GENMASK(3, 2)
+#define MT_RF_XO_DIV_MASK GENMASK(1, 0)
+
+struct mt76x0_bbp_switch_item {
+ u16 bw_band;
+ struct mt76_reg_pair reg_pair;
+};
+
+struct mt76x0_rf_switch_item {
+ u32 rf_bank_reg;
+ u16 bw_band;
+ u8 value;
+};
+
+struct mt76x0_freq_item {
+ u8 channel;
+ u32 band;
+ u8 pllR37;
+ u8 pllR36;
+ u8 pllR35;
+ u8 pllR34;
+ u8 pllR33;
+ u8 pllR32_b7b5;
+ u8 pllR32_b4b0; /* PLL_DEN (Denomina - 8) */
+ u8 pllR31_b7b5;
+ u8 pllR31_b4b0; /* PLL_K (Nominator *)*/
+ u8 pllR30_b7; /* sdm_reset_n */
+ u8 pllR30_b6b2; /* sdmmash_prbs,sin */
+ u8 pllR30_b1; /* sdm_bp */
+ u16 pll_n; /* R30<0>, R29<7:0> (hex) */
+ u8 pllR28_b7b6; /* isi,iso */
+ u8 pllR28_b5b4; /* pfd_dly */
+ u8 pllR28_b3b2; /* clksel option */
+ u32 pll_sdm_k; /* R28<1:0>, R27<7:0>, R26<7:0> (hex) SDM_k */
+ u8 pllR24_b1b0; /* xo_div */
+};
+
+struct mt76x0_rate_pwr_item {
+ s8 mcs_power;
+ u8 rf_pa_mode;
+};
+
+struct mt76x0_rate_pwr_tab {
+ struct mt76x0_rate_pwr_item cck[4];
+ struct mt76x0_rate_pwr_item ofdm[8];
+ struct mt76x0_rate_pwr_item ht[8];
+ struct mt76x0_rate_pwr_item vht[10];
+ struct mt76x0_rate_pwr_item stbc[8];
+ struct mt76x0_rate_pwr_item mcs32;
+};
+
+#endif /* _MT76X0_PHY_H_ */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
new file mode 100644
index 000000000..ce6b286a8
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "mt76x0.h"
+#include "mcu.h"
+#include "../mt76x02_usb.h"
+
+static struct usb_device_id mt76x0_device_table[] = {
+ { USB_DEVICE(0x148F, 0x7610) }, /* MT7610U */
+ { USB_DEVICE(0x13B1, 0x003E) }, /* Linksys AE6000 */
+ { USB_DEVICE(0x0E8D, 0x7610) }, /* Sabrent NTWLAC */
+ { USB_DEVICE(0x7392, 0xa711) }, /* Edimax 7711mac */
+ { USB_DEVICE(0x7392, 0xb711) }, /* Edimax / Elecom */
+ { USB_DEVICE(0x148f, 0x761a) }, /* TP-Link TL-WDN5200 */
+ { USB_DEVICE(0x148f, 0x760a) }, /* TP-Link unknown */
+ { USB_DEVICE(0x0b05, 0x17d1) }, /* Asus USB-AC51 */
+ { USB_DEVICE(0x0b05, 0x17db) }, /* Asus USB-AC50 */
+ { USB_DEVICE(0x0df6, 0x0075) }, /* Sitecom WLA-3100 */
+ { USB_DEVICE(0x2019, 0xab31) }, /* Planex GW-450D */
+ { USB_DEVICE(0x2001, 0x3d02) }, /* D-LINK DWA-171 rev B1 */
+ { USB_DEVICE(0x0586, 0x3425) }, /* Zyxel NWD6505 */
+ { USB_DEVICE(0x07b8, 0x7610) }, /* AboCom AU7212 */
+ { USB_DEVICE(0x04bb, 0x0951) }, /* I-O DATA WN-AC433UK */
+ { USB_DEVICE(0x057c, 0x8502) }, /* AVM FRITZ!WLAN USB Stick AC 430 */
+ { USB_DEVICE(0x293c, 0x5702) }, /* Comcast Xfinity KXW02AAA */
+ { USB_DEVICE(0x20f4, 0x806b) }, /* TRENDnet TEW-806UBH */
+ { USB_DEVICE(0x7392, 0xc711) }, /* Devolo Wifi ac Stick */
+ { USB_DEVICE(0x0df6, 0x0079) }, /* Sitecom Europe B.V. ac Stick */
+ { USB_DEVICE(0x2357, 0x0123) }, /* TP-LINK T2UHP */
+ /* TP-LINK Archer T1U */
+ { USB_DEVICE(0x2357, 0x0105), .driver_info = 1, },
+ /* MT7630U */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7630, 0xff, 0x2, 0xff)},
+ /* MT7650U */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7650, 0xff, 0x2, 0xff)},
+ { 0, }
+};
+
+static void mt76x0_init_usb_dma(struct mt76x02_dev *dev)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_USB_DMA_CFG);
+
+ val |= MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN;
+
+ /* disable AGGR_BULK_RX in order to receive one
+ * frame in each rx urb and avoid copies
+ */
+ val &= ~MT_USB_DMA_CFG_RX_BULK_AGG_EN;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+
+ val = mt76_rr(dev, MT_COM_REG0);
+ if (val & 1)
+ dev_dbg(dev->mt76.dev, "MCU not ready\n");
+
+ val = mt76_rr(dev, MT_USB_DMA_CFG);
+
+ val |= MT_USB_DMA_CFG_RX_DROP_OR_PAD;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+ val &= ~MT_USB_DMA_CFG_RX_DROP_OR_PAD;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+}
+
+static void mt76x0u_cleanup(struct mt76x02_dev *dev)
+{
+ clear_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+ mt76x0_chip_onoff(dev, false, false);
+ mt76u_queues_deinit(&dev->mt76);
+}
+
+static void mt76x0u_stop(struct ieee80211_hw *hw)
+{
+ struct mt76x02_dev *dev = hw->priv;
+
+ clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
+ cancel_delayed_work_sync(&dev->cal_work);
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
+ mt76u_stop_tx(&dev->mt76);
+ mt76x02u_exit_beacon_config(dev);
+
+ if (test_bit(MT76_REMOVED, &dev->mphy.state))
+ return;
+
+ if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000))
+ dev_warn(dev->mt76.dev, "TX DMA did not stop\n");
+
+ mt76x0_mac_stop(dev);
+
+ if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_RX_BUSY, 0, 1000))
+ dev_warn(dev->mt76.dev, "RX DMA did not stop\n");
+}
+
+static int mt76x0u_start(struct ieee80211_hw *hw)
+{
+ struct mt76x02_dev *dev = hw->priv;
+ int ret;
+
+ ret = mt76x02u_mac_start(dev);
+ if (ret)
+ return ret;
+
+ mt76x0_phy_calibrate(dev, true);
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mt76.mac_work,
+ MT_MAC_WORK_INTERVAL);
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+ set_bit(MT76_STATE_RUNNING, &dev->mphy.state);
+ return 0;
+}
+
+static const struct ieee80211_ops mt76x0u_ops = {
+ .tx = mt76x02_tx,
+ .start = mt76x0u_start,
+ .stop = mt76x0u_stop,
+ .add_interface = mt76x02_add_interface,
+ .remove_interface = mt76x02_remove_interface,
+ .config = mt76x0_config,
+ .configure_filter = mt76x02_configure_filter,
+ .bss_info_changed = mt76x02_bss_info_changed,
+ .sta_state = mt76_sta_state,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
+ .set_key = mt76x02_set_key,
+ .conf_tx = mt76x02_conf_tx,
+ .sw_scan_start = mt76_sw_scan,
+ .sw_scan_complete = mt76x02_sw_scan_complete,
+ .ampdu_action = mt76x02_ampdu_action,
+ .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
+ .set_rts_threshold = mt76x02_set_rts_threshold,
+ .wake_tx_queue = mt76_wake_tx_queue,
+ .get_txpower = mt76_get_txpower,
+ .get_survey = mt76_get_survey,
+ .set_tim = mt76_set_tim,
+ .release_buffered_frames = mt76_release_buffered_frames,
+ .get_antenna = mt76_get_antenna,
+};
+
+static int mt76x0u_init_hardware(struct mt76x02_dev *dev, bool reset)
+{
+ int err;
+
+ mt76x0_chip_onoff(dev, true, reset);
+
+ if (!mt76x02_wait_for_mac(&dev->mt76))
+ return -ETIMEDOUT;
+
+ err = mt76x0u_mcu_init(dev);
+ if (err < 0)
+ return err;
+
+ mt76x0_init_usb_dma(dev);
+ err = mt76x0_init_hardware(dev);
+ if (err < 0)
+ return err;
+
+ mt76x02u_init_beacon_config(dev);
+
+ mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
+ mt76_wr(dev, MT_TXOP_CTRL_CFG,
+ FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
+ FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
+
+ return 0;
+}
+
+static int mt76x0u_register_device(struct mt76x02_dev *dev)
+{
+ struct ieee80211_hw *hw = dev->mt76.hw;
+ struct mt76_usb *usb = &dev->mt76.usb;
+ int err;
+
+ usb->mcu.data = devm_kmalloc(dev->mt76.dev, MCU_RESP_URB_SIZE,
+ GFP_KERNEL);
+ if (!usb->mcu.data)
+ return -ENOMEM;
+
+ err = mt76u_alloc_queues(&dev->mt76);
+ if (err < 0)
+ goto out_err;
+
+ err = mt76x0u_init_hardware(dev, true);
+ if (err < 0)
+ goto out_err;
+
+ /* check hw sg support in order to enable AMSDU */
+ hw->max_tx_fragments = dev->mt76.usb.sg_en ? MT_TX_SG_MAX_SIZE : 1;
+ err = mt76x0_register_device(dev);
+ if (err < 0)
+ goto out_err;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+
+ return 0;
+
+out_err:
+ mt76x0u_cleanup(dev);
+ return err;
+}
+
+static int mt76x0u_probe(struct usb_interface *usb_intf,
+ const struct usb_device_id *id)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ .drv_flags = MT_DRV_SW_RX_AIRTIME,
+ .survey_flags = SURVEY_INFO_TIME_TX,
+ .update_survey = mt76x02_update_channel,
+ .tx_prepare_skb = mt76x02u_tx_prepare_skb,
+ .tx_complete_skb = mt76x02u_tx_complete_skb,
+ .tx_status_data = mt76x02_tx_status_data,
+ .rx_skb = mt76x02_queue_rx_skb,
+ .sta_ps = mt76x02_sta_ps,
+ .sta_add = mt76x02_sta_add,
+ .sta_remove = mt76x02_sta_remove,
+ };
+ struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
+ struct mt76x02_dev *dev;
+ struct mt76_dev *mdev;
+ u32 mac_rev;
+ int ret;
+
+ mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops,
+ &drv_ops);
+ if (!mdev)
+ return -ENOMEM;
+
+ dev = container_of(mdev, struct mt76x02_dev, mt76);
+ mutex_init(&dev->phy_mutex);
+
+ /* Quirk for Archer T1U */
+ if (id->driver_info)
+ dev->no_2ghz = true;
+
+ usb_dev = usb_get_dev(usb_dev);
+ usb_reset_device(usb_dev);
+
+ usb_set_intfdata(usb_intf, dev);
+
+ mt76x02u_init_mcu(mdev);
+ ret = mt76u_init(mdev, usb_intf, false);
+ if (ret)
+ goto err;
+
+ /* Disable the HW, otherwise MCU fail to initialize on hot reboot */
+ mt76x0_chip_onoff(dev, false, false);
+
+ if (!mt76x02_wait_for_mac(mdev)) {
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
+ mac_rev = mt76_rr(dev, MT_MAC_CSR0);
+ dev_info(mdev->dev, "ASIC revision: %08x MAC revision: %08x\n",
+ mdev->rev, mac_rev);
+ if (!is_mt76x0(dev)) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* Note: vendor driver skips this check for MT76X0U */
+ if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
+ dev_warn(mdev->dev, "Warning: eFUSE not present\n");
+
+ ret = mt76x0u_register_device(dev);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+
+err:
+ usb_set_intfdata(usb_intf, NULL);
+ usb_put_dev(interface_to_usbdev(usb_intf));
+ mt76_free_device(&dev->mt76);
+
+ return ret;
+}
+
+static void mt76x0_disconnect(struct usb_interface *usb_intf)
+{
+ struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
+ bool initialized = test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+
+ if (!initialized)
+ return;
+
+ ieee80211_unregister_hw(dev->mt76.hw);
+ mt76x0u_cleanup(dev);
+
+ usb_set_intfdata(usb_intf, NULL);
+ usb_put_dev(interface_to_usbdev(usb_intf));
+
+ mt76_free_device(&dev->mt76);
+}
+
+static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
+ pm_message_t state)
+{
+ struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
+
+ mt76u_stop_rx(&dev->mt76);
+ clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+ mt76x0_chip_onoff(dev, false, false);
+
+ return 0;
+}
+
+static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
+{
+ struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
+ int ret;
+
+ ret = mt76u_resume_rx(&dev->mt76);
+ if (ret < 0)
+ goto err;
+
+ ret = mt76x0u_init_hardware(dev, false);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ mt76x0u_cleanup(dev);
+ return ret;
+}
+
+MODULE_DEVICE_TABLE(usb, mt76x0_device_table);
+MODULE_FIRMWARE(MT7610E_FIRMWARE);
+MODULE_FIRMWARE(MT7610U_FIRMWARE);
+MODULE_LICENSE("GPL");
+
+static struct usb_driver mt76x0_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt76x0_device_table,
+ .probe = mt76x0u_probe,
+ .disconnect = mt76x0_disconnect,
+ .suspend = mt76x0_suspend,
+ .resume = mt76x0_resume,
+ .reset_resume = mt76x0_resume,
+ .soft_unbind = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(mt76x0_driver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
new file mode 100644
index 000000000..45502fd46
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+
+#include "mt76x0.h"
+#include "mcu.h"
+#include "../mt76x02_usb.h"
+
+#define MCU_FW_URB_MAX_PAYLOAD 0x38f8
+#define MCU_FW_URB_SIZE (MCU_FW_URB_MAX_PAYLOAD + 12)
+
+static int
+mt76x0u_upload_firmware(struct mt76x02_dev *dev,
+ const struct mt76x02_fw_header *hdr)
+{
+ u8 *fw_payload = (u8 *)(hdr + 1);
+ u32 ilm_len, dlm_len;
+ void *ivb;
+ int err;
+
+ ivb = kmemdup(fw_payload, MT_MCU_IVB_SIZE, GFP_KERNEL);
+ if (!ivb)
+ return -ENOMEM;
+
+ ilm_len = le32_to_cpu(hdr->ilm_len) - MT_MCU_IVB_SIZE;
+ dev_dbg(dev->mt76.dev, "loading FW - ILM %u + IVB %u\n",
+ ilm_len, MT_MCU_IVB_SIZE);
+ err = mt76x02u_mcu_fw_send_data(dev, fw_payload + MT_MCU_IVB_SIZE,
+ ilm_len, MCU_FW_URB_MAX_PAYLOAD,
+ MT_MCU_IVB_SIZE);
+ if (err)
+ goto out;
+
+ dlm_len = le32_to_cpu(hdr->dlm_len);
+ dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len);
+ err = mt76x02u_mcu_fw_send_data(dev,
+ fw_payload + le32_to_cpu(hdr->ilm_len),
+ dlm_len, MCU_FW_URB_MAX_PAYLOAD,
+ MT_MCU_DLM_OFFSET);
+ if (err)
+ goto out;
+
+ err = mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0x12, 0, ivb, MT_MCU_IVB_SIZE);
+ if (err < 0)
+ goto out;
+
+ if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 1000)) {
+ dev_err(dev->mt76.dev, "Firmware failed to start\n");
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ dev_dbg(dev->mt76.dev, "Firmware running!\n");
+
+out:
+ kfree(ivb);
+
+ return err;
+}
+
+static int mt76x0_get_firmware(struct mt76x02_dev *dev,
+ const struct firmware **fw)
+{
+ int err;
+
+ /* try to load mt7610e fw if available
+ * otherwise fall back to mt7610u one
+ */
+ err = firmware_request_nowarn(fw, MT7610E_FIRMWARE, dev->mt76.dev);
+ if (err) {
+ dev_info(dev->mt76.dev, "%s not found, switching to %s",
+ MT7610E_FIRMWARE, MT7610U_FIRMWARE);
+ return request_firmware(fw, MT7610U_FIRMWARE,
+ dev->mt76.dev);
+ }
+ return 0;
+}
+
+static int mt76x0u_load_firmware(struct mt76x02_dev *dev)
+{
+ const struct firmware *fw;
+ const struct mt76x02_fw_header *hdr;
+ int len, ret;
+ u32 val;
+
+ mt76_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN));
+
+ if (mt76x0_firmware_running(dev))
+ return 0;
+
+ ret = mt76x0_get_firmware(dev, &fw);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr))
+ goto err_inv_fw;
+
+ hdr = (const struct mt76x02_fw_header *)fw->data;
+
+ if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE)
+ goto err_inv_fw;
+
+ len = sizeof(*hdr);
+ len += le32_to_cpu(hdr->ilm_len);
+ len += le32_to_cpu(hdr->dlm_len);
+
+ if (fw->size != len)
+ goto err_inv_fw;
+
+ val = le16_to_cpu(hdr->fw_ver);
+ dev_dbg(dev->mt76.dev,
+ "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n",
+ (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf,
+ le16_to_cpu(hdr->build_ver), hdr->build_time);
+
+ len = le32_to_cpu(hdr->ilm_len);
+
+ mt76_wr(dev, 0x1004, 0x2c);
+
+ mt76_set(dev, MT_USB_DMA_CFG,
+ (MT_USB_DMA_CFG_RX_BULK_EN | MT_USB_DMA_CFG_TX_BULK_EN) |
+ FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20));
+ mt76x02u_mcu_fw_reset(dev);
+ usleep_range(5000, 6000);
+
+ mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
+
+ /* FCE tx_fs_base_ptr */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+ /* FCE tx_fs_max_cnt */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1);
+ /* FCE pdma enable */
+ mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+ /* FCE skip_fs_en */
+ mt76_wr(dev, MT_FCE_SKIP_FS, 3);
+
+ val = mt76_rr(dev, MT_USB_DMA_CFG);
+ val |= MT_USB_DMA_CFG_UDMA_TX_WL_DROP;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+ val &= ~MT_USB_DMA_CFG_UDMA_TX_WL_DROP;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+
+ ret = mt76x0u_upload_firmware(dev, hdr);
+ release_firmware(fw);
+
+ mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
+
+ return ret;
+
+err_inv_fw:
+ dev_err(dev->mt76.dev, "Invalid firmware image\n");
+ release_firmware(fw);
+ return -ENOENT;
+}
+
+int mt76x0u_mcu_init(struct mt76x02_dev *dev)
+{
+ int ret;
+
+ ret = mt76x0u_load_firmware(dev);
+ if (ret < 0)
+ return ret;
+
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
new file mode 100644
index 000000000..d626817a2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ */
+
+#ifndef __MT76x02_H
+#define __MT76x02_H
+
+#include <linux/kfifo.h>
+
+#include "mt76.h"
+#include "mt76x02_regs.h"
+#include "mt76x02_mac.h"
+#include "mt76x02_dfs.h"
+#include "mt76x02_dma.h"
+
+#define MT76x02_TX_RING_SIZE 512
+#define MT76x02_PSD_RING_SIZE 128
+#define MT76x02_N_WCIDS 128
+#define MT_CALIBRATE_INTERVAL HZ
+#define MT_MAC_WORK_INTERVAL (HZ / 10)
+
+#define MT_WATCHDOG_TIME (HZ / 10)
+#define MT_TX_HANG_TH 10
+
+#define MT_MAX_CHAINS 2
+struct mt76x02_rx_freq_cal {
+ s8 high_gain[MT_MAX_CHAINS];
+ s8 rssi_offset[MT_MAX_CHAINS];
+ s8 lna_gain;
+ u32 mcu_gain;
+ s16 temp_offset;
+ u8 freq_offset;
+};
+
+struct mt76x02_calibration {
+ struct mt76x02_rx_freq_cal rx;
+
+ u8 agc_gain_init[MT_MAX_CHAINS];
+ u8 agc_gain_cur[MT_MAX_CHAINS];
+
+ u16 false_cca;
+ s8 avg_rssi_all;
+ s8 agc_gain_adjust;
+ s8 agc_lowest_gain;
+ s8 low_gain;
+
+ s8 temp_vco;
+ s8 temp;
+
+ bool init_cal_done;
+ bool tssi_cal_done;
+ bool tssi_comp_pending;
+ bool dpd_cal_done;
+ bool channel_cal_done;
+ bool gain_init_done;
+
+ int tssi_target;
+ s8 tssi_dc;
+};
+
+struct mt76x02_beacon_ops {
+ unsigned int nslots;
+ unsigned int slot_size;
+ void (*pre_tbtt_enable)(struct mt76x02_dev *dev, bool en);
+ void (*beacon_enable)(struct mt76x02_dev *dev, bool en);
+};
+
+#define mt76x02_beacon_enable(dev, enable) \
+ (dev)->beacon_ops->beacon_enable(dev, enable)
+#define mt76x02_pre_tbtt_enable(dev, enable) \
+ (dev)->beacon_ops->pre_tbtt_enable(dev, enable)
+
+struct mt76x02_dev {
+ union { /* must be first */
+ struct mt76_dev mt76;
+ struct mt76_phy mphy;
+ };
+
+ struct mac_address macaddr_list[8];
+
+ struct mutex phy_mutex;
+
+ u16 chainmask;
+
+ u8 txdone_seq;
+ DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status);
+ spinlock_t txstatus_fifo_lock;
+ u32 tx_airtime;
+ u32 ampdu_ref;
+
+ struct sk_buff *rx_head;
+
+ struct delayed_work cal_work;
+ struct delayed_work wdt_work;
+
+ struct hrtimer pre_tbtt_timer;
+ struct work_struct pre_tbtt_work;
+
+ const struct mt76x02_beacon_ops *beacon_ops;
+
+ u8 beacon_data_count;
+
+ u8 tbtt_count;
+
+ u32 tx_hang_reset;
+ u8 tx_hang_check;
+ u8 mcu_timeout;
+
+ struct mt76x02_calibration cal;
+
+ int txpower_conf;
+ s8 target_power;
+ s8 target_power_delta[2];
+ bool enable_tpc;
+
+ bool no_2ghz;
+
+ s16 coverage_class;
+ u8 slottime;
+
+ struct mt76x02_dfs_pattern_detector dfs_pd;
+
+ /* edcca monitor */
+ unsigned long ed_trigger_timeout;
+ bool ed_tx_blocked;
+ bool ed_monitor;
+ u8 ed_monitor_enabled;
+ u8 ed_monitor_learning;
+ u8 ed_trigger;
+ u8 ed_silent;
+ ktime_t ed_time;
+};
+
+extern struct ieee80211_rate mt76x02_rates[12];
+
+void mt76x02_init_device(struct mt76x02_dev *dev);
+void mt76x02_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast);
+int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt76x02_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
+void mt76x02_config_mac_addr_list(struct mt76x02_dev *dev);
+
+int mt76x02_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+void mt76x02_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
+int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params);
+int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params);
+void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
+ const struct ieee80211_tx_rate *rate);
+s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr,
+ s8 max_txpwr_adj);
+void mt76x02_wdt_work(struct work_struct *work);
+void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr);
+void mt76x02_set_tx_ackto(struct mt76x02_dev *dev);
+void mt76x02_set_coverage_class(struct ieee80211_hw *hw,
+ s16 coverage_class);
+int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val);
+void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len);
+bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update);
+void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb);
+void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
+irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance);
+void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
+int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info);
+void mt76x02_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+void mt76x02_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps);
+void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed);
+void mt76x02_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type);
+
+struct beacon_bc_data {
+ struct mt76x02_dev *dev;
+ struct sk_buff_head q;
+ struct sk_buff *tail[8];
+};
+
+void mt76x02_init_beacon_config(struct mt76x02_dev *dev);
+void mt76x02e_init_beacon_config(struct mt76x02_dev *dev);
+void mt76x02_resync_beacon_timer(struct mt76x02_dev *dev);
+void mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif);
+void mt76x02_enqueue_buffered_bc(struct mt76x02_dev *dev,
+ struct beacon_bc_data *data,
+ int max_nframes);
+
+void mt76x02_mac_start(struct mt76x02_dev *dev);
+
+void mt76x02_init_debugfs(struct mt76x02_dev *dev);
+
+static inline bool is_mt76x0(struct mt76x02_dev *dev)
+{
+ return mt76_chip(&dev->mt76) == 0x7610 ||
+ mt76_chip(&dev->mt76) == 0x7630 ||
+ mt76_chip(&dev->mt76) == 0x7650;
+}
+
+static inline bool is_mt76x2(struct mt76x02_dev *dev)
+{
+ return mt76_chip(&dev->mt76) == 0x7612 ||
+ mt76_chip(&dev->mt76) == 0x7632 ||
+ mt76_chip(&dev->mt76) == 0x7662 ||
+ mt76_chip(&dev->mt76) == 0x7602;
+}
+
+static inline void mt76x02_irq_enable(struct mt76x02_dev *dev, u32 mask)
+{
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask);
+}
+
+static inline void mt76x02_irq_disable(struct mt76x02_dev *dev, u32 mask)
+{
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
+}
+
+static inline bool
+mt76x02_wait_for_txrx_idle(struct mt76_dev *dev)
+{
+ return __mt76_poll_msec(dev, MT_MAC_STATUS,
+ MT_MAC_STATUS_TX | MT_MAC_STATUS_RX,
+ 0, 100);
+}
+
+static inline struct mt76x02_sta *
+mt76x02_rx_get_sta(struct mt76_dev *dev, u8 idx)
+{
+ struct mt76_wcid *wcid;
+
+ if (idx >= MT76x02_N_WCIDS)
+ return NULL;
+
+ wcid = rcu_dereference(dev->wcid[idx]);
+ if (!wcid)
+ return NULL;
+
+ return container_of(wcid, struct mt76x02_sta, wcid);
+}
+
+static inline struct mt76_wcid *
+mt76x02_rx_get_sta_wcid(struct mt76x02_sta *sta, bool unicast)
+{
+ if (!sta)
+ return NULL;
+
+ if (unicast)
+ return &sta->wcid;
+ else
+ return &sta->vif->group_wcid;
+}
+
+#endif /* __MT76x02_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
new file mode 100644
index 000000000..5d034cec1
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ */
+
+#include "mt76x02.h"
+
+static void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev)
+{
+ u32 regs[4] = {};
+ u16 val;
+ int i;
+
+ for (i = 0; i < dev->beacon_ops->nslots; i++) {
+ val = i * dev->beacon_ops->slot_size;
+ regs[i / 4] |= (val / 64) << (8 * (i % 4));
+ }
+
+ for (i = 0; i < 4; i++)
+ mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
+}
+
+static int
+mt76x02_write_beacon(struct mt76x02_dev *dev, int offset, struct sk_buff *skb)
+{
+ int beacon_len = dev->beacon_ops->slot_size;
+
+ if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi)))
+ return -ENOSPC;
+
+ /* USB devices already reserve enough skb headroom for txwi's. This
+ * helps to save slow copies over USB.
+ */
+ if (mt76_is_usb(&dev->mt76)) {
+ struct mt76x02_txwi *txwi;
+
+ txwi = (struct mt76x02_txwi *)(skb->data - sizeof(*txwi));
+ mt76x02_mac_write_txwi(dev, txwi, skb, NULL, NULL, skb->len);
+ skb_push(skb, sizeof(*txwi));
+ } else {
+ struct mt76x02_txwi txwi;
+
+ mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
+ mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
+ offset += sizeof(txwi);
+ }
+
+ mt76_wr_copy(dev, offset, skb->data, skb->len);
+ return 0;
+}
+
+void mt76x02_mac_set_beacon(struct mt76x02_dev *dev,
+ struct sk_buff *skb)
+{
+ int bcn_len = dev->beacon_ops->slot_size;
+ int bcn_addr = MT_BEACON_BASE + (bcn_len * dev->beacon_data_count);
+
+ if (!mt76x02_write_beacon(dev, bcn_addr, skb))
+ dev->beacon_data_count++;
+ dev_kfree_skb(skb);
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_set_beacon);
+
+void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
+ struct ieee80211_vif *vif, bool enable)
+{
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+ u8 old_mask = dev->mt76.beacon_mask;
+
+ mt76x02_pre_tbtt_enable(dev, false);
+
+ if (!dev->mt76.beacon_mask)
+ dev->tbtt_count = 0;
+
+ if (enable) {
+ dev->mt76.beacon_mask |= BIT(mvif->idx);
+ } else {
+ dev->mt76.beacon_mask &= ~BIT(mvif->idx);
+ }
+
+ if (!!old_mask == !!dev->mt76.beacon_mask)
+ goto out;
+
+ if (dev->mt76.beacon_mask)
+ mt76_set(dev, MT_BEACON_TIME_CFG,
+ MT_BEACON_TIME_CFG_BEACON_TX |
+ MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_TIMER_EN);
+ else
+ mt76_clear(dev, MT_BEACON_TIME_CFG,
+ MT_BEACON_TIME_CFG_BEACON_TX |
+ MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_TIMER_EN);
+ mt76x02_beacon_enable(dev, !!dev->mt76.beacon_mask);
+
+out:
+ mt76x02_pre_tbtt_enable(dev, true);
+}
+
+void
+mt76x02_resync_beacon_timer(struct mt76x02_dev *dev)
+{
+ u32 timer_val = dev->mt76.beacon_int << 4;
+
+ dev->tbtt_count++;
+
+ /*
+ * Beacon timer drifts by 1us every tick, the timer is configured
+ * in 1/16 TU (64us) units.
+ */
+ if (dev->tbtt_count < 63)
+ return;
+
+ /*
+ * The updated beacon interval takes effect after two TBTT, because
+ * at this point the original interval has already been loaded into
+ * the next TBTT_TIMER value
+ */
+ if (dev->tbtt_count == 63)
+ timer_val -= 1;
+
+ mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
+ MT_BEACON_TIME_CFG_INTVAL, timer_val);
+
+ if (dev->tbtt_count >= 64)
+ dev->tbtt_count = 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_resync_beacon_timer);
+
+void
+mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt76x02_dev *dev = (struct mt76x02_dev *)priv;
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+ struct sk_buff *skb = NULL;
+
+ if (!(dev->mt76.beacon_mask & BIT(mvif->idx)))
+ return;
+
+ skb = ieee80211_beacon_get(mt76_hw(dev), vif);
+ if (!skb)
+ return;
+
+ mt76x02_mac_set_beacon(dev, skb);
+}
+EXPORT_SYMBOL_GPL(mt76x02_update_beacon_iter);
+
+static void
+mt76x02_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct beacon_bc_data *data = priv;
+ struct mt76x02_dev *dev = data->dev;
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb;
+
+ if (!(dev->mt76.beacon_mask & BIT(mvif->idx)))
+ return;
+
+ skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
+ if (!skb)
+ return;
+
+ info = IEEE80211_SKB_CB(skb);
+ info->control.vif = vif;
+ info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
+ mt76_skb_set_moredata(skb, true);
+ __skb_queue_tail(&data->q, skb);
+ data->tail[mvif->idx] = skb;
+}
+
+void
+mt76x02_enqueue_buffered_bc(struct mt76x02_dev *dev,
+ struct beacon_bc_data *data,
+ int max_nframes)
+{
+ int i, nframes;
+
+ data->dev = dev;
+ __skb_queue_head_init(&data->q);
+
+ do {
+ nframes = skb_queue_len(&data->q);
+ ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt76x02_add_buffered_bc, data);
+ } while (nframes != skb_queue_len(&data->q) &&
+ skb_queue_len(&data->q) < max_nframes);
+
+ if (!skb_queue_len(&data->q))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(data->tail); i++) {
+ if (!data->tail[i])
+ continue;
+ mt76_skb_set_moredata(data->tail[i], false);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x02_enqueue_buffered_bc);
+
+void mt76x02_init_beacon_config(struct mt76x02_dev *dev)
+{
+ mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_BEACON_TX));
+ mt76_set(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_SYNC_MODE);
+ mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff);
+ mt76x02_set_beacon_offsets(dev);
+}
+EXPORT_SYMBOL_GPL(mt76x02_init_beacon_config);
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
new file mode 100644
index 000000000..c4fe1c436
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/debugfs.h>
+#include "mt76x02.h"
+
+static int
+mt76x02_ampdu_stat_show(struct seq_file *file, void *data)
+{
+ struct mt76x02_dev *dev = file->private;
+ int i, j;
+
+ for (i = 0; i < 4; i++) {
+ seq_puts(file, "Length: ");
+ for (j = 0; j < 8; j++)
+ seq_printf(file, "%8d | ", i * 8 + j + 1);
+ seq_puts(file, "\n");
+ seq_puts(file, "Count: ");
+ for (j = 0; j < 8; j++)
+ seq_printf(file, "%8d | ",
+ dev->mt76.aggr_stats[i * 8 + j]);
+ seq_puts(file, "\n");
+ seq_puts(file, "--------");
+ for (j = 0; j < 8; j++)
+ seq_puts(file, "-----------");
+ seq_puts(file, "\n");
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mt76x02_ampdu_stat);
+
+static int read_txpower(struct seq_file *file, void *data)
+{
+ struct mt76x02_dev *dev = dev_get_drvdata(file->private);
+
+ seq_printf(file, "Target power: %d\n", dev->target_power);
+
+ mt76_seq_puts_array(file, "Delta", dev->target_power_delta,
+ ARRAY_SIZE(dev->target_power_delta));
+ return 0;
+}
+
+static int
+mt76x02_dfs_stat_show(struct seq_file *file, void *data)
+{
+ struct mt76x02_dev *dev = file->private;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ int i;
+
+ seq_printf(file, "allocated sequences:\t%d\n",
+ dfs_pd->seq_stats.seq_pool_len);
+ seq_printf(file, "used sequences:\t\t%d\n",
+ dfs_pd->seq_stats.seq_len);
+ seq_puts(file, "\n");
+
+ for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
+ seq_printf(file, "engine: %d\n", i);
+ seq_printf(file, " hw pattern detected:\t%d\n",
+ dfs_pd->stats[i].hw_pattern);
+ seq_printf(file, " hw pulse discarded:\t%d\n",
+ dfs_pd->stats[i].hw_pulse_discarded);
+ seq_printf(file, " sw pattern detected:\t%d\n",
+ dfs_pd->stats[i].sw_pattern);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mt76x02_dfs_stat);
+
+static int read_agc(struct seq_file *file, void *data)
+{
+ struct mt76x02_dev *dev = dev_get_drvdata(file->private);
+
+ seq_printf(file, "avg_rssi: %d\n", dev->cal.avg_rssi_all);
+ seq_printf(file, "low_gain: %d\n", dev->cal.low_gain);
+ seq_printf(file, "false_cca: %d\n", dev->cal.false_cca);
+ seq_printf(file, "agc_gain_adjust: %d\n", dev->cal.agc_gain_adjust);
+
+ return 0;
+}
+
+static int
+mt76_edcca_set(void *data, u64 val)
+{
+ struct mt76x02_dev *dev = data;
+ enum nl80211_dfs_regions region = dev->mt76.region;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ dev->ed_monitor_enabled = !!val;
+ dev->ed_monitor = dev->ed_monitor_enabled &&
+ region == NL80211_DFS_ETSI;
+ mt76x02_edcca_init(dev);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+static int
+mt76_edcca_get(void *data, u64 *val)
+{
+ struct mt76x02_dev *dev = data;
+
+ *val = dev->ed_monitor_enabled;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_edcca, mt76_edcca_get, mt76_edcca_set,
+ "%lld\n");
+
+void mt76x02_init_debugfs(struct mt76x02_dev *dev)
+{
+ struct dentry *dir;
+
+ dir = mt76_register_debugfs(&dev->mt76);
+ if (!dir)
+ return;
+
+ debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", dir,
+ mt76_queues_read);
+ debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp);
+ debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc);
+
+ debugfs_create_file("edcca", 0600, dir, dev, &fops_edcca);
+ debugfs_create_file("ampdu_stat", 0400, dir, dev, &mt76x02_ampdu_stat_fops);
+ debugfs_create_file("dfs_stats", 0400, dir, dev, &mt76x02_dfs_stat_fops);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,
+ read_txpower);
+
+ debugfs_create_devm_seqfile(dev->mt76.dev, "agc", dir, read_agc);
+
+ debugfs_create_u32("tx_hang_reset", 0400, dir, &dev->tx_hang_reset);
+}
+EXPORT_SYMBOL_GPL(mt76x02_init_debugfs);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
new file mode 100644
index 000000000..b29cd39dc
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
@@ -0,0 +1,895 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include "mt76x02.h"
+
+#define RADAR_SPEC(m, len, el, eh, wl, wh, \
+ w_tolerance, tl, th, t_tolerance, \
+ bl, bh, event_exp, power_jmp) \
+{ \
+ .mode = m, \
+ .avg_len = len, \
+ .e_low = el, \
+ .e_high = eh, \
+ .w_low = wl, \
+ .w_high = wh, \
+ .w_margin = w_tolerance, \
+ .t_low = tl, \
+ .t_high = th, \
+ .t_margin = t_tolerance, \
+ .b_low = bl, \
+ .b_high = bh, \
+ .event_expiration = event_exp, \
+ .pwr_jmp = power_jmp \
+}
+
+static const struct mt76x02_radar_specs etsi_radar_specs[] = {
+ /* 20MHz */
+ RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
+ 0x7fffffff, 0x155cc0, 0x19cc),
+ RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
+ 0x7fffffff, 0x155cc0, 0x19cc),
+ RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
+ 0x7fffffff, 0x155cc0, 0x19dd),
+ RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
+ 0x7fffffff, 0x2191c0, 0x15cc),
+ /* 40MHz */
+ RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
+ 0x7fffffff, 0x155cc0, 0x19cc),
+ RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
+ 0x7fffffff, 0x155cc0, 0x19cc),
+ RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
+ 0x7fffffff, 0x155cc0, 0x19dd),
+ RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
+ 0x7fffffff, 0x2191c0, 0x15cc),
+ /* 80MHz */
+ RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
+ 0x7fffffff, 0x155cc0, 0x19cc),
+ RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
+ 0x7fffffff, 0x155cc0, 0x19cc),
+ RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
+ 0x7fffffff, 0x155cc0, 0x19dd),
+ RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
+ 0x7fffffff, 0x2191c0, 0x15cc)
+};
+
+static const struct mt76x02_radar_specs fcc_radar_specs[] = {
+ /* 20MHz */
+ RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0,
+ 0x7fffffff, 0xfe808, 0x13dc),
+ RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+ 0x7fffffff, 0xfe808, 0x19dd),
+ RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
+ 0x7fffffff, 0xfe808, 0x12cc),
+ RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
+ 0x3938700, 0x57bcf00, 0x1289),
+ /* 40MHz */
+ RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0,
+ 0x7fffffff, 0xfe808, 0x13dc),
+ RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+ 0x7fffffff, 0xfe808, 0x19dd),
+ RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
+ 0x7fffffff, 0xfe808, 0x12cc),
+ RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
+ 0x3938700, 0x57bcf00, 0x1289),
+ /* 80MHz */
+ RADAR_SPEC(0, 8, 2, 14, 106, 150, 15, 2900, 80100, 15, 0,
+ 0x7fffffff, 0xfe808, 0x16cc),
+ RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+ 0x7fffffff, 0xfe808, 0x19dd),
+ RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
+ 0x7fffffff, 0xfe808, 0x12cc),
+ RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
+ 0x3938700, 0x57bcf00, 0x1289)
+};
+
+static const struct mt76x02_radar_specs jp_w56_radar_specs[] = {
+ /* 20MHz */
+ RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0,
+ 0x7fffffff, 0x14c080, 0x13dc),
+ RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+ 0x7fffffff, 0x14c080, 0x19dd),
+ RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
+ 0x7fffffff, 0x14c080, 0x12cc),
+ RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
+ 0x3938700, 0X57bcf00, 0x1289),
+ /* 40MHz */
+ RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0,
+ 0x7fffffff, 0x14c080, 0x13dc),
+ RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+ 0x7fffffff, 0x14c080, 0x19dd),
+ RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
+ 0x7fffffff, 0x14c080, 0x12cc),
+ RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
+ 0x3938700, 0X57bcf00, 0x1289),
+ /* 80MHz */
+ RADAR_SPEC(0, 8, 2, 9, 106, 150, 15, 2900, 80100, 15, 0,
+ 0x7fffffff, 0x14c080, 0x16cc),
+ RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+ 0x7fffffff, 0x14c080, 0x19dd),
+ RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
+ 0x7fffffff, 0x14c080, 0x12cc),
+ RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
+ 0x3938700, 0X57bcf00, 0x1289)
+};
+
+static const struct mt76x02_radar_specs jp_w53_radar_specs[] = {
+ /* 20MHz */
+ RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
+ 0x7fffffff, 0x14c080, 0x16cc),
+ { 0 },
+ RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
+ 0x7fffffff, 0x14c080, 0x16cc),
+ { 0 },
+ /* 40MHz */
+ RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
+ 0x7fffffff, 0x14c080, 0x16cc),
+ { 0 },
+ RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
+ 0x7fffffff, 0x14c080, 0x16cc),
+ { 0 },
+ /* 80MHz */
+ RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
+ 0x7fffffff, 0x14c080, 0x16cc),
+ { 0 },
+ RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
+ 0x7fffffff, 0x14c080, 0x16cc),
+ { 0 }
+};
+
+static void
+mt76x02_dfs_set_capture_mode_ctrl(struct mt76x02_dev *dev, u8 enable)
+{
+ u32 data;
+
+ data = (1 << 1) | enable;
+ mt76_wr(dev, MT_BBP(DFS, 36), data);
+}
+
+static void mt76x02_dfs_seq_pool_put(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_sequence *seq)
+{
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+ list_add(&seq->head, &dfs_pd->seq_pool);
+
+ dfs_pd->seq_stats.seq_pool_len++;
+ dfs_pd->seq_stats.seq_len--;
+}
+
+static struct mt76x02_dfs_sequence *
+mt76x02_dfs_seq_pool_get(struct mt76x02_dev *dev)
+{
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_sequence *seq;
+
+ if (list_empty(&dfs_pd->seq_pool)) {
+ seq = devm_kzalloc(dev->mt76.dev, sizeof(*seq), GFP_ATOMIC);
+ } else {
+ seq = list_first_entry(&dfs_pd->seq_pool,
+ struct mt76x02_dfs_sequence,
+ head);
+ list_del(&seq->head);
+ dfs_pd->seq_stats.seq_pool_len--;
+ }
+ if (seq)
+ dfs_pd->seq_stats.seq_len++;
+
+ return seq;
+}
+
+static int mt76x02_dfs_get_multiple(int val, int frac, int margin)
+{
+ int remainder, factor;
+
+ if (!frac)
+ return 0;
+
+ if (abs(val - frac) <= margin)
+ return 1;
+
+ factor = val / frac;
+ remainder = val % frac;
+
+ if (remainder > margin) {
+ if ((frac - remainder) <= margin)
+ factor++;
+ else
+ factor = 0;
+ }
+ return factor;
+}
+
+static void mt76x02_dfs_detector_reset(struct mt76x02_dev *dev)
+{
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_sequence *seq, *tmp_seq;
+ int i;
+
+ /* reset hw detector */
+ mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+
+ /* reset sw detector */
+ for (i = 0; i < ARRAY_SIZE(dfs_pd->event_rb); i++) {
+ dfs_pd->event_rb[i].h_rb = 0;
+ dfs_pd->event_rb[i].t_rb = 0;
+ }
+
+ list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) {
+ list_del_init(&seq->head);
+ mt76x02_dfs_seq_pool_put(dev, seq);
+ }
+}
+
+static bool mt76x02_dfs_check_chirp(struct mt76x02_dev *dev)
+{
+ bool ret = false;
+ u32 current_ts, delta_ts;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+ current_ts = mt76_rr(dev, MT_PBF_LIFE_TIMER);
+ delta_ts = current_ts - dfs_pd->chirp_pulse_ts;
+ dfs_pd->chirp_pulse_ts = current_ts;
+
+ /* 12 sec */
+ if (delta_ts <= (12 * (1 << 20))) {
+ if (++dfs_pd->chirp_pulse_cnt > 8)
+ ret = true;
+ } else {
+ dfs_pd->chirp_pulse_cnt = 1;
+ }
+
+ return ret;
+}
+
+static void mt76x02_dfs_get_hw_pulse(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_hw_pulse *pulse)
+{
+ u32 data;
+
+ /* select channel */
+ data = (MT_DFS_CH_EN << 16) | pulse->engine;
+ mt76_wr(dev, MT_BBP(DFS, 0), data);
+
+ /* reported period */
+ pulse->period = mt76_rr(dev, MT_BBP(DFS, 19));
+
+ /* reported width */
+ pulse->w1 = mt76_rr(dev, MT_BBP(DFS, 20));
+ pulse->w2 = mt76_rr(dev, MT_BBP(DFS, 23));
+
+ /* reported burst number */
+ pulse->burst = mt76_rr(dev, MT_BBP(DFS, 22));
+}
+
+static bool mt76x02_dfs_check_hw_pulse(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_hw_pulse *pulse)
+{
+ bool ret = false;
+
+ if (!pulse->period || !pulse->w1)
+ return false;
+
+ switch (dev->mt76.region) {
+ case NL80211_DFS_FCC:
+ if (pulse->engine > 3)
+ break;
+
+ if (pulse->engine == 3) {
+ ret = mt76x02_dfs_check_chirp(dev);
+ break;
+ }
+
+ /* check short pulse*/
+ if (pulse->w1 < 120)
+ ret = (pulse->period >= 2900 &&
+ (pulse->period <= 4700 ||
+ pulse->period >= 6400) &&
+ (pulse->period <= 6800 ||
+ pulse->period >= 10200) &&
+ pulse->period <= 61600);
+ else if (pulse->w1 < 130) /* 120 - 130 */
+ ret = (pulse->period >= 2900 &&
+ pulse->period <= 61600);
+ else
+ ret = (pulse->period >= 3500 &&
+ pulse->period <= 10100);
+ break;
+ case NL80211_DFS_ETSI:
+ if (pulse->engine >= 3)
+ break;
+
+ ret = (pulse->period >= 4900 &&
+ (pulse->period <= 10200 ||
+ pulse->period >= 12400) &&
+ pulse->period <= 100100);
+ break;
+ case NL80211_DFS_JP:
+ if (dev->mphy.chandef.chan->center_freq >= 5250 &&
+ dev->mphy.chandef.chan->center_freq <= 5350) {
+ /* JPW53 */
+ if (pulse->w1 <= 130)
+ ret = (pulse->period >= 28360 &&
+ (pulse->period <= 28700 ||
+ pulse->period >= 76900) &&
+ pulse->period <= 76940);
+ break;
+ }
+
+ if (pulse->engine > 3)
+ break;
+
+ if (pulse->engine == 3) {
+ ret = mt76x02_dfs_check_chirp(dev);
+ break;
+ }
+
+ /* check short pulse*/
+ if (pulse->w1 < 120)
+ ret = (pulse->period >= 2900 &&
+ (pulse->period <= 4700 ||
+ pulse->period >= 6400) &&
+ (pulse->period <= 6800 ||
+ pulse->period >= 27560) &&
+ (pulse->period <= 27960 ||
+ pulse->period >= 28360) &&
+ (pulse->period <= 28700 ||
+ pulse->period >= 79900) &&
+ pulse->period <= 80100);
+ else if (pulse->w1 < 130) /* 120 - 130 */
+ ret = (pulse->period >= 2900 &&
+ (pulse->period <= 10100 ||
+ pulse->period >= 27560) &&
+ (pulse->period <= 27960 ||
+ pulse->period >= 28360) &&
+ (pulse->period <= 28700 ||
+ pulse->period >= 79900) &&
+ pulse->period <= 80100);
+ else
+ ret = (pulse->period >= 3900 &&
+ pulse->period <= 10100);
+ break;
+ case NL80211_DFS_UNSET:
+ default:
+ return false;
+ }
+
+ return ret;
+}
+
+static bool mt76x02_dfs_fetch_event(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_event *event)
+{
+ u32 data;
+
+ /* 1st: DFS_R37[31]: 0 (engine 0) - 1 (engine 2)
+ * 2nd: DFS_R37[21:0]: pulse time
+ * 3rd: DFS_R37[11:0]: pulse width
+ * 3rd: DFS_R37[25:16]: phase
+ * 4th: DFS_R37[12:0]: current pwr
+ * 4th: DFS_R37[21:16]: pwr stable counter
+ *
+ * 1st: DFS_R37[31:0] set to 0xffffffff means no event detected
+ */
+ data = mt76_rr(dev, MT_BBP(DFS, 37));
+ if (!MT_DFS_CHECK_EVENT(data))
+ return false;
+
+ event->engine = MT_DFS_EVENT_ENGINE(data);
+ data = mt76_rr(dev, MT_BBP(DFS, 37));
+ event->ts = MT_DFS_EVENT_TIMESTAMP(data);
+ data = mt76_rr(dev, MT_BBP(DFS, 37));
+ event->width = MT_DFS_EVENT_WIDTH(data);
+
+ return true;
+}
+
+static bool mt76x02_dfs_check_event(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_event *event)
+{
+ if (event->engine == 2) {
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_event_rb *event_buff = &dfs_pd->event_rb[1];
+ u16 last_event_idx;
+ u32 delta_ts;
+
+ last_event_idx = mt76_decr(event_buff->t_rb,
+ MT_DFS_EVENT_BUFLEN);
+ delta_ts = event->ts - event_buff->data[last_event_idx].ts;
+ if (delta_ts < MT_DFS_EVENT_TIME_MARGIN &&
+ event_buff->data[last_event_idx].width >= 200)
+ return false;
+ }
+ return true;
+}
+
+static void mt76x02_dfs_queue_event(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_event *event)
+{
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_event_rb *event_buff;
+
+ /* add radar event to ring buffer */
+ event_buff = event->engine == 2 ? &dfs_pd->event_rb[1]
+ : &dfs_pd->event_rb[0];
+ event_buff->data[event_buff->t_rb] = *event;
+ event_buff->data[event_buff->t_rb].fetch_ts = jiffies;
+
+ event_buff->t_rb = mt76_incr(event_buff->t_rb, MT_DFS_EVENT_BUFLEN);
+ if (event_buff->t_rb == event_buff->h_rb)
+ event_buff->h_rb = mt76_incr(event_buff->h_rb,
+ MT_DFS_EVENT_BUFLEN);
+}
+
+static int mt76x02_dfs_create_sequence(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_event *event,
+ u16 cur_len)
+{
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_sw_detector_params *sw_params;
+ u32 width_delta, with_sum;
+ struct mt76x02_dfs_sequence seq, *seq_p;
+ struct mt76x02_dfs_event_rb *event_rb;
+ struct mt76x02_dfs_event *cur_event;
+ int i, j, end, pri, factor, cur_pri;
+
+ event_rb = event->engine == 2 ? &dfs_pd->event_rb[1]
+ : &dfs_pd->event_rb[0];
+
+ i = mt76_decr(event_rb->t_rb, MT_DFS_EVENT_BUFLEN);
+ end = mt76_decr(event_rb->h_rb, MT_DFS_EVENT_BUFLEN);
+
+ while (i != end) {
+ cur_event = &event_rb->data[i];
+ with_sum = event->width + cur_event->width;
+
+ sw_params = &dfs_pd->sw_dpd_params;
+ switch (dev->mt76.region) {
+ case NL80211_DFS_FCC:
+ case NL80211_DFS_JP:
+ if (with_sum < 600)
+ width_delta = 8;
+ else
+ width_delta = with_sum >> 3;
+ break;
+ case NL80211_DFS_ETSI:
+ if (event->engine == 2)
+ width_delta = with_sum >> 6;
+ else if (with_sum < 620)
+ width_delta = 24;
+ else
+ width_delta = 8;
+ break;
+ case NL80211_DFS_UNSET:
+ default:
+ return -EINVAL;
+ }
+
+ pri = event->ts - cur_event->ts;
+ if (abs(event->width - cur_event->width) > width_delta ||
+ pri < sw_params->min_pri)
+ goto next;
+
+ if (pri > sw_params->max_pri)
+ break;
+
+ seq.pri = event->ts - cur_event->ts;
+ seq.first_ts = cur_event->ts;
+ seq.last_ts = event->ts;
+ seq.engine = event->engine;
+ seq.count = 2;
+
+ j = mt76_decr(i, MT_DFS_EVENT_BUFLEN);
+ while (j != end) {
+ cur_event = &event_rb->data[j];
+ cur_pri = event->ts - cur_event->ts;
+ factor = mt76x02_dfs_get_multiple(cur_pri, seq.pri,
+ sw_params->pri_margin);
+ if (factor > 0) {
+ seq.first_ts = cur_event->ts;
+ seq.count++;
+ }
+
+ j = mt76_decr(j, MT_DFS_EVENT_BUFLEN);
+ }
+ if (seq.count <= cur_len)
+ goto next;
+
+ seq_p = mt76x02_dfs_seq_pool_get(dev);
+ if (!seq_p)
+ return -ENOMEM;
+
+ *seq_p = seq;
+ INIT_LIST_HEAD(&seq_p->head);
+ list_add(&seq_p->head, &dfs_pd->sequences);
+next:
+ i = mt76_decr(i, MT_DFS_EVENT_BUFLEN);
+ }
+ return 0;
+}
+
+static u16 mt76x02_dfs_add_event_to_sequence(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_event *event)
+{
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_sw_detector_params *sw_params;
+ struct mt76x02_dfs_sequence *seq, *tmp_seq;
+ u16 max_seq_len = 0;
+ int factor, pri;
+
+ sw_params = &dfs_pd->sw_dpd_params;
+ list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) {
+ if (event->ts > seq->first_ts + MT_DFS_SEQUENCE_WINDOW) {
+ list_del_init(&seq->head);
+ mt76x02_dfs_seq_pool_put(dev, seq);
+ continue;
+ }
+
+ if (event->engine != seq->engine)
+ continue;
+
+ pri = event->ts - seq->last_ts;
+ factor = mt76x02_dfs_get_multiple(pri, seq->pri,
+ sw_params->pri_margin);
+ if (factor > 0) {
+ seq->last_ts = event->ts;
+ seq->count++;
+ max_seq_len = max_t(u16, max_seq_len, seq->count);
+ }
+ }
+ return max_seq_len;
+}
+
+static bool mt76x02_dfs_check_detection(struct mt76x02_dev *dev)
+{
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_sequence *seq;
+
+ if (list_empty(&dfs_pd->sequences))
+ return false;
+
+ list_for_each_entry(seq, &dfs_pd->sequences, head) {
+ if (seq->count > MT_DFS_SEQUENCE_TH) {
+ dfs_pd->stats[seq->engine].sw_pattern++;
+ return true;
+ }
+ }
+ return false;
+}
+
+static void mt76x02_dfs_add_events(struct mt76x02_dev *dev)
+{
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_event event;
+ int i, seq_len;
+
+ /* disable debug mode */
+ mt76x02_dfs_set_capture_mode_ctrl(dev, false);
+ for (i = 0; i < MT_DFS_EVENT_LOOP; i++) {
+ if (!mt76x02_dfs_fetch_event(dev, &event))
+ break;
+
+ if (dfs_pd->last_event_ts > event.ts)
+ mt76x02_dfs_detector_reset(dev);
+ dfs_pd->last_event_ts = event.ts;
+
+ if (!mt76x02_dfs_check_event(dev, &event))
+ continue;
+
+ seq_len = mt76x02_dfs_add_event_to_sequence(dev, &event);
+ mt76x02_dfs_create_sequence(dev, &event, seq_len);
+
+ mt76x02_dfs_queue_event(dev, &event);
+ }
+ mt76x02_dfs_set_capture_mode_ctrl(dev, true);
+}
+
+static void mt76x02_dfs_check_event_window(struct mt76x02_dev *dev)
+{
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_event_rb *event_buff;
+ struct mt76x02_dfs_event *event;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dfs_pd->event_rb); i++) {
+ event_buff = &dfs_pd->event_rb[i];
+
+ while (event_buff->h_rb != event_buff->t_rb) {
+ event = &event_buff->data[event_buff->h_rb];
+
+ /* sorted list */
+ if (time_is_after_jiffies(event->fetch_ts +
+ MT_DFS_EVENT_WINDOW))
+ break;
+ event_buff->h_rb = mt76_incr(event_buff->h_rb,
+ MT_DFS_EVENT_BUFLEN);
+ }
+ }
+}
+
+static void mt76x02_dfs_tasklet(unsigned long arg)
+{
+ struct mt76x02_dev *dev = (struct mt76x02_dev *)arg;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ u32 engine_mask;
+ int i;
+
+ if (test_bit(MT76_SCANNING, &dev->mphy.state))
+ goto out;
+
+ if (time_is_before_jiffies(dfs_pd->last_sw_check +
+ MT_DFS_SW_TIMEOUT)) {
+ bool radar_detected;
+
+ dfs_pd->last_sw_check = jiffies;
+
+ mt76x02_dfs_add_events(dev);
+ radar_detected = mt76x02_dfs_check_detection(dev);
+ if (radar_detected) {
+ /* sw detector rx radar pattern */
+ ieee80211_radar_detected(dev->mt76.hw);
+ mt76x02_dfs_detector_reset(dev);
+
+ return;
+ }
+ mt76x02_dfs_check_event_window(dev);
+ }
+
+ engine_mask = mt76_rr(dev, MT_BBP(DFS, 1));
+ if (!(engine_mask & 0xf))
+ goto out;
+
+ for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
+ struct mt76x02_dfs_hw_pulse pulse;
+
+ if (!(engine_mask & (1 << i)))
+ continue;
+
+ pulse.engine = i;
+ mt76x02_dfs_get_hw_pulse(dev, &pulse);
+
+ if (!mt76x02_dfs_check_hw_pulse(dev, &pulse)) {
+ dfs_pd->stats[i].hw_pulse_discarded++;
+ continue;
+ }
+
+ /* hw detector rx radar pattern */
+ dfs_pd->stats[i].hw_pattern++;
+ ieee80211_radar_detected(dev->mt76.hw);
+ mt76x02_dfs_detector_reset(dev);
+
+ return;
+ }
+
+ /* reset hw detector */
+ mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+
+out:
+ mt76x02_irq_enable(dev, MT_INT_GPTIMER);
+}
+
+static void mt76x02_dfs_init_sw_detector(struct mt76x02_dev *dev)
+{
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+ switch (dev->mt76.region) {
+ case NL80211_DFS_FCC:
+ dfs_pd->sw_dpd_params.max_pri = MT_DFS_FCC_MAX_PRI;
+ dfs_pd->sw_dpd_params.min_pri = MT_DFS_FCC_MIN_PRI;
+ dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN;
+ break;
+ case NL80211_DFS_ETSI:
+ dfs_pd->sw_dpd_params.max_pri = MT_DFS_ETSI_MAX_PRI;
+ dfs_pd->sw_dpd_params.min_pri = MT_DFS_ETSI_MIN_PRI;
+ dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN << 2;
+ break;
+ case NL80211_DFS_JP:
+ dfs_pd->sw_dpd_params.max_pri = MT_DFS_JP_MAX_PRI;
+ dfs_pd->sw_dpd_params.min_pri = MT_DFS_JP_MIN_PRI;
+ dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN;
+ break;
+ case NL80211_DFS_UNSET:
+ default:
+ break;
+ }
+}
+
+static void mt76x02_dfs_set_bbp_params(struct mt76x02_dev *dev)
+{
+ const struct mt76x02_radar_specs *radar_specs;
+ u8 i, shift;
+ u32 data;
+
+ switch (dev->mphy.chandef.width) {
+ case NL80211_CHAN_WIDTH_40:
+ shift = MT_DFS_NUM_ENGINES;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ shift = 2 * MT_DFS_NUM_ENGINES;
+ break;
+ default:
+ shift = 0;
+ break;
+ }
+
+ switch (dev->mt76.region) {
+ case NL80211_DFS_FCC:
+ radar_specs = &fcc_radar_specs[shift];
+ break;
+ case NL80211_DFS_ETSI:
+ radar_specs = &etsi_radar_specs[shift];
+ break;
+ case NL80211_DFS_JP:
+ if (dev->mphy.chandef.chan->center_freq >= 5250 &&
+ dev->mphy.chandef.chan->center_freq <= 5350)
+ radar_specs = &jp_w53_radar_specs[shift];
+ else
+ radar_specs = &jp_w56_radar_specs[shift];
+ break;
+ case NL80211_DFS_UNSET:
+ default:
+ return;
+ }
+
+ data = (MT_DFS_VGA_MASK << 16) |
+ (MT_DFS_PWR_GAIN_OFFSET << 12) |
+ (MT_DFS_PWR_DOWN_TIME << 8) |
+ (MT_DFS_SYM_ROUND << 4) |
+ (MT_DFS_DELTA_DELAY & 0xf);
+ mt76_wr(dev, MT_BBP(DFS, 2), data);
+
+ data = (MT_DFS_RX_PE_MASK << 16) | MT_DFS_PKT_END_MASK;
+ mt76_wr(dev, MT_BBP(DFS, 3), data);
+
+ for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
+ /* configure engine */
+ mt76_wr(dev, MT_BBP(DFS, 0), i);
+
+ /* detection mode + avg_len */
+ data = ((radar_specs[i].avg_len & 0x1ff) << 16) |
+ (radar_specs[i].mode & 0xf);
+ mt76_wr(dev, MT_BBP(DFS, 4), data);
+
+ /* dfs energy */
+ data = ((radar_specs[i].e_high & 0x0fff) << 16) |
+ (radar_specs[i].e_low & 0x0fff);
+ mt76_wr(dev, MT_BBP(DFS, 5), data);
+
+ /* dfs period */
+ mt76_wr(dev, MT_BBP(DFS, 7), radar_specs[i].t_low);
+ mt76_wr(dev, MT_BBP(DFS, 9), radar_specs[i].t_high);
+
+ /* dfs burst */
+ mt76_wr(dev, MT_BBP(DFS, 11), radar_specs[i].b_low);
+ mt76_wr(dev, MT_BBP(DFS, 13), radar_specs[i].b_high);
+
+ /* dfs width */
+ data = ((radar_specs[i].w_high & 0x0fff) << 16) |
+ (radar_specs[i].w_low & 0x0fff);
+ mt76_wr(dev, MT_BBP(DFS, 14), data);
+
+ /* dfs margins */
+ data = (radar_specs[i].w_margin << 16) |
+ radar_specs[i].t_margin;
+ mt76_wr(dev, MT_BBP(DFS, 15), data);
+
+ /* dfs event expiration */
+ mt76_wr(dev, MT_BBP(DFS, 17), radar_specs[i].event_expiration);
+
+ /* dfs pwr adj */
+ mt76_wr(dev, MT_BBP(DFS, 30), radar_specs[i].pwr_jmp);
+ }
+
+ /* reset status */
+ mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+ mt76_wr(dev, MT_BBP(DFS, 36), 0x3);
+
+ /* enable detection*/
+ mt76_wr(dev, MT_BBP(DFS, 0), MT_DFS_CH_EN << 16);
+ mt76_wr(dev, MT_BBP(IBI, 11), 0x0c350001);
+}
+
+void mt76x02_phy_dfs_adjust_agc(struct mt76x02_dev *dev)
+{
+ u32 agc_r8, agc_r4, val_r8, val_r4, dfs_r31;
+
+ agc_r8 = mt76_rr(dev, MT_BBP(AGC, 8));
+ agc_r4 = mt76_rr(dev, MT_BBP(AGC, 4));
+
+ val_r8 = (agc_r8 & 0x00007e00) >> 9;
+ val_r4 = agc_r4 & ~0x1f000000;
+ val_r4 += (((val_r8 + 1) >> 1) << 24);
+ mt76_wr(dev, MT_BBP(AGC, 4), val_r4);
+
+ dfs_r31 = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, val_r4);
+ dfs_r31 += val_r8;
+ dfs_r31 -= (agc_r8 & 0x00000038) >> 3;
+ dfs_r31 = (dfs_r31 << 16) | 0x00000307;
+ mt76_wr(dev, MT_BBP(DFS, 31), dfs_r31);
+
+ if (is_mt76x2(dev)) {
+ mt76_wr(dev, MT_BBP(DFS, 32), 0x00040071);
+ } else {
+ /* disable hw detector */
+ mt76_wr(dev, MT_BBP(DFS, 0), 0);
+ /* enable hw detector */
+ mt76_wr(dev, MT_BBP(DFS, 0), MT_DFS_CH_EN << 16);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_dfs_adjust_agc);
+
+void mt76x02_dfs_init_params(struct mt76x02_dev *dev)
+{
+ struct cfg80211_chan_def *chandef = &dev->mphy.chandef;
+
+ if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
+ dev->mt76.region != NL80211_DFS_UNSET) {
+ mt76x02_dfs_init_sw_detector(dev);
+ mt76x02_dfs_set_bbp_params(dev);
+ /* enable debug mode */
+ mt76x02_dfs_set_capture_mode_ctrl(dev, true);
+
+ mt76x02_irq_enable(dev, MT_INT_GPTIMER);
+ mt76_rmw_field(dev, MT_INT_TIMER_EN,
+ MT_INT_TIMER_EN_GP_TIMER_EN, 1);
+ } else {
+ /* disable hw detector */
+ mt76_wr(dev, MT_BBP(DFS, 0), 0);
+ /* clear detector status */
+ mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+ if (mt76_chip(&dev->mt76) == 0x7610 ||
+ mt76_chip(&dev->mt76) == 0x7630)
+ mt76_wr(dev, MT_BBP(IBI, 11), 0xfde8081);
+ else
+ mt76_wr(dev, MT_BBP(IBI, 11), 0);
+
+ mt76x02_irq_disable(dev, MT_INT_GPTIMER);
+ mt76_rmw_field(dev, MT_INT_TIMER_EN,
+ MT_INT_TIMER_EN_GP_TIMER_EN, 0);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x02_dfs_init_params);
+
+void mt76x02_dfs_init_detector(struct mt76x02_dev *dev)
+{
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+ INIT_LIST_HEAD(&dfs_pd->sequences);
+ INIT_LIST_HEAD(&dfs_pd->seq_pool);
+ dev->mt76.region = NL80211_DFS_UNSET;
+ dfs_pd->last_sw_check = jiffies;
+ tasklet_init(&dfs_pd->dfs_tasklet, mt76x02_dfs_tasklet,
+ (unsigned long)dev);
+}
+
+static void
+mt76x02_dfs_set_domain(struct mt76x02_dev *dev,
+ enum nl80211_dfs_regions region)
+{
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+ mutex_lock(&dev->mt76.mutex);
+ if (dev->mt76.region != region) {
+ tasklet_disable(&dfs_pd->dfs_tasklet);
+
+ dev->ed_monitor = dev->ed_monitor_enabled &&
+ region == NL80211_DFS_ETSI;
+ mt76x02_edcca_init(dev);
+
+ dev->mt76.region = region;
+ mt76x02_dfs_init_params(dev);
+ tasklet_enable(&dfs_pd->dfs_tasklet);
+ }
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+void mt76x02_regd_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct mt76x02_dev *dev = hw->priv;
+
+ mt76x02_dfs_set_domain(dev, request->dfs_region);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h
new file mode 100644
index 000000000..491010a32
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#ifndef __MT76x02_DFS_H
+#define __MT76x02_DFS_H
+
+#include <linux/types.h>
+#include <linux/nl80211.h>
+
+#define MT_DFS_GP_INTERVAL (10 << 4) /* 64 us unit */
+#define MT_DFS_NUM_ENGINES 4
+
+/* bbp params */
+#define MT_DFS_SYM_ROUND 0
+#define MT_DFS_DELTA_DELAY 2
+#define MT_DFS_VGA_MASK 0
+#define MT_DFS_PWR_GAIN_OFFSET 3
+#define MT_DFS_PWR_DOWN_TIME 0xf
+#define MT_DFS_RX_PE_MASK 0xff
+#define MT_DFS_PKT_END_MASK 0
+#define MT_DFS_CH_EN 0xf
+
+/* sw detector params */
+#define MT_DFS_EVENT_LOOP 64
+#define MT_DFS_SW_TIMEOUT (HZ / 20)
+#define MT_DFS_EVENT_WINDOW (HZ / 5)
+#define MT_DFS_SEQUENCE_WINDOW (200 * (1 << 20))
+#define MT_DFS_EVENT_TIME_MARGIN 2000
+#define MT_DFS_PRI_MARGIN 4
+#define MT_DFS_SEQUENCE_TH 6
+
+#define MT_DFS_FCC_MAX_PRI ((28570 << 1) + 1000)
+#define MT_DFS_FCC_MIN_PRI (3000 - 2)
+#define MT_DFS_JP_MAX_PRI ((80000 << 1) + 1000)
+#define MT_DFS_JP_MIN_PRI (28500 - 2)
+#define MT_DFS_ETSI_MAX_PRI (133333 + 125000 + 117647 + 1000)
+#define MT_DFS_ETSI_MIN_PRI (4500 - 20)
+
+struct mt76x02_radar_specs {
+ u8 mode;
+ u16 avg_len;
+ u16 e_low;
+ u16 e_high;
+ u16 w_low;
+ u16 w_high;
+ u16 w_margin;
+ u32 t_low;
+ u32 t_high;
+ u16 t_margin;
+ u32 b_low;
+ u32 b_high;
+ u32 event_expiration;
+ u16 pwr_jmp;
+};
+
+#define MT_DFS_CHECK_EVENT(x) ((x) != GENMASK(31, 0))
+#define MT_DFS_EVENT_ENGINE(x) (((x) & BIT(31)) ? 2 : 0)
+#define MT_DFS_EVENT_TIMESTAMP(x) ((x) & GENMASK(21, 0))
+#define MT_DFS_EVENT_WIDTH(x) ((x) & GENMASK(11, 0))
+struct mt76x02_dfs_event {
+ unsigned long fetch_ts;
+ u32 ts;
+ u16 width;
+ u8 engine;
+};
+
+#define MT_DFS_EVENT_BUFLEN 256
+struct mt76x02_dfs_event_rb {
+ struct mt76x02_dfs_event data[MT_DFS_EVENT_BUFLEN];
+ int h_rb, t_rb;
+};
+
+struct mt76x02_dfs_sequence {
+ struct list_head head;
+ u32 first_ts;
+ u32 last_ts;
+ u32 pri;
+ u16 count;
+ u8 engine;
+};
+
+struct mt76x02_dfs_hw_pulse {
+ u8 engine;
+ u32 period;
+ u32 w1;
+ u32 w2;
+ u32 burst;
+};
+
+struct mt76x02_dfs_sw_detector_params {
+ u32 min_pri;
+ u32 max_pri;
+ u32 pri_margin;
+};
+
+struct mt76x02_dfs_engine_stats {
+ u32 hw_pattern;
+ u32 hw_pulse_discarded;
+ u32 sw_pattern;
+};
+
+struct mt76x02_dfs_seq_stats {
+ u32 seq_pool_len;
+ u32 seq_len;
+};
+
+struct mt76x02_dfs_pattern_detector {
+ u8 chirp_pulse_cnt;
+ u32 chirp_pulse_ts;
+
+ struct mt76x02_dfs_sw_detector_params sw_dpd_params;
+ struct mt76x02_dfs_event_rb event_rb[2];
+
+ struct list_head sequences;
+ struct list_head seq_pool;
+ struct mt76x02_dfs_seq_stats seq_stats;
+
+ unsigned long last_sw_check;
+ u32 last_event_ts;
+
+ struct mt76x02_dfs_engine_stats stats[MT_DFS_NUM_ENGINES];
+ struct tasklet_struct dfs_tasklet;
+};
+
+void mt76x02_dfs_init_params(struct mt76x02_dev *dev);
+void mt76x02_dfs_init_detector(struct mt76x02_dev *dev);
+void mt76x02_regd_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request);
+void mt76x02_phy_dfs_adjust_agc(struct mt76x02_dev *dev);
+#endif /* __MT76x02_DFS_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h
new file mode 100644
index 000000000..23b0e7d10
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#ifndef __MT76x02_DMA_H
+#define __MT76x02_DMA_H
+
+#include "mt76x02.h"
+#include "dma.h"
+
+#define MT_TXD_INFO_LEN GENMASK(15, 0)
+#define MT_TXD_INFO_NEXT_VLD BIT(16)
+#define MT_TXD_INFO_TX_BURST BIT(17)
+#define MT_TXD_INFO_80211 BIT(19)
+#define MT_TXD_INFO_TSO BIT(20)
+#define MT_TXD_INFO_CSO BIT(21)
+#define MT_TXD_INFO_WIV BIT(24)
+#define MT_TXD_INFO_QSEL GENMASK(26, 25)
+#define MT_TXD_INFO_DPORT GENMASK(29, 27)
+#define MT_TXD_INFO_TYPE GENMASK(31, 30)
+
+#define MT_RX_FCE_INFO_LEN GENMASK(13, 0)
+#define MT_RX_FCE_INFO_SELF_GEN BIT(15)
+#define MT_RX_FCE_INFO_CMD_SEQ GENMASK(19, 16)
+#define MT_RX_FCE_INFO_EVT_TYPE GENMASK(23, 20)
+#define MT_RX_FCE_INFO_PCIE_INTR BIT(24)
+#define MT_RX_FCE_INFO_QSEL GENMASK(26, 25)
+#define MT_RX_FCE_INFO_D_PORT GENMASK(29, 27)
+#define MT_RX_FCE_INFO_TYPE GENMASK(31, 30)
+
+/* MCU request message header */
+#define MT_MCU_MSG_LEN GENMASK(15, 0)
+#define MT_MCU_MSG_CMD_SEQ GENMASK(19, 16)
+#define MT_MCU_MSG_CMD_TYPE GENMASK(26, 20)
+#define MT_MCU_MSG_PORT GENMASK(29, 27)
+#define MT_MCU_MSG_TYPE GENMASK(31, 30)
+#define MT_MCU_MSG_TYPE_CMD BIT(30)
+
+#define MT_RX_HEADROOM 32
+#define MT76X02_RX_RING_SIZE 256
+
+enum dma_msg_port {
+ WLAN_PORT,
+ CPU_RX_PORT,
+ CPU_TX_PORT,
+ HOST_PORT,
+ VIRTUAL_CPU_RX_PORT,
+ VIRTUAL_CPU_TX_PORT,
+ DISCARD,
+};
+
+static inline bool
+mt76x02_wait_for_wpdma(struct mt76_dev *dev, int timeout)
+{
+ return __mt76_poll(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
+ 0, timeout);
+}
+
+int mt76x02_dma_init(struct mt76x02_dev *dev);
+void mt76x02_dma_disable(struct mt76x02_dev *dev);
+
+#endif /* __MT76x02_DMA_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
new file mode 100644
index 000000000..5d402cf29
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+
+#include "mt76x02_eeprom.h"
+
+static int
+mt76x02_efuse_read(struct mt76x02_dev *dev, u16 addr, u8 *data,
+ enum mt76x02_eeprom_modes mode)
+{
+ u32 val;
+ int i;
+
+ val = mt76_rr(dev, MT_EFUSE_CTRL);
+ val &= ~(MT_EFUSE_CTRL_AIN |
+ MT_EFUSE_CTRL_MODE);
+ val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf);
+ val |= FIELD_PREP(MT_EFUSE_CTRL_MODE, mode);
+ val |= MT_EFUSE_CTRL_KICK;
+ mt76_wr(dev, MT_EFUSE_CTRL, val);
+
+ if (!mt76_poll_msec(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
+ return -ETIMEDOUT;
+
+ udelay(2);
+
+ val = mt76_rr(dev, MT_EFUSE_CTRL);
+ if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
+ memset(data, 0xff, 16);
+ return 0;
+ }
+
+ for (i = 0; i < 4; i++) {
+ val = mt76_rr(dev, MT_EFUSE_DATA(i));
+ put_unaligned_le32(val, data + 4 * i);
+ }
+
+ return 0;
+}
+
+int mt76x02_eeprom_copy(struct mt76x02_dev *dev,
+ enum mt76x02_eeprom_field field,
+ void *dest, int len)
+{
+ if (field + len > dev->mt76.eeprom.size)
+ return -1;
+
+ memcpy(dest, dev->mt76.eeprom.data + field, len);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_eeprom_copy);
+
+int mt76x02_get_efuse_data(struct mt76x02_dev *dev, u16 base, void *buf,
+ int len, enum mt76x02_eeprom_modes mode)
+{
+ int ret, i;
+
+ for (i = 0; i + 16 <= len; i += 16) {
+ ret = mt76x02_efuse_read(dev, base + i, buf + i, mode);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_get_efuse_data);
+
+void mt76x02_eeprom_parse_hw_cap(struct mt76x02_dev *dev)
+{
+ u16 val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
+
+ switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, val)) {
+ case BOARD_TYPE_5GHZ:
+ dev->mphy.cap.has_5ghz = true;
+ break;
+ case BOARD_TYPE_2GHZ:
+ dev->mphy.cap.has_2ghz = true;
+ break;
+ default:
+ dev->mphy.cap.has_2ghz = true;
+ dev->mphy.cap.has_5ghz = true;
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x02_eeprom_parse_hw_cap);
+
+bool mt76x02_ext_pa_enabled(struct mt76x02_dev *dev, enum nl80211_band band)
+{
+ u16 conf0 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
+
+ if (band == NL80211_BAND_5GHZ)
+ return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_5G);
+ else
+ return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_2G);
+}
+EXPORT_SYMBOL_GPL(mt76x02_ext_pa_enabled);
+
+void mt76x02_get_rx_gain(struct mt76x02_dev *dev, enum nl80211_band band,
+ u16 *rssi_offset, s8 *lna_2g, s8 *lna_5g)
+{
+ u16 val;
+
+ val = mt76x02_eeprom_get(dev, MT_EE_LNA_GAIN);
+ *lna_2g = val & 0xff;
+ lna_5g[0] = val >> 8;
+
+ val = mt76x02_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_1);
+ lna_5g[1] = val >> 8;
+
+ val = mt76x02_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_1);
+ lna_5g[2] = val >> 8;
+
+ if (!mt76x02_field_valid(lna_5g[1]))
+ lna_5g[1] = lna_5g[0];
+
+ if (!mt76x02_field_valid(lna_5g[2]))
+ lna_5g[2] = lna_5g[0];
+
+ if (band == NL80211_BAND_2GHZ)
+ *rssi_offset = mt76x02_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_0);
+ else
+ *rssi_offset = mt76x02_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_0);
+}
+EXPORT_SYMBOL_GPL(mt76x02_get_rx_gain);
+
+u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
+ s8 *lna_2g, s8 *lna_5g,
+ struct ieee80211_channel *chan)
+{
+ u8 lna;
+
+ if (chan->band == NL80211_BAND_2GHZ)
+ lna = *lna_2g;
+ else if (chan->hw_value <= 64)
+ lna = lna_5g[0];
+ else if (chan->hw_value <= 128)
+ lna = lna_5g[1];
+ else
+ lna = lna_5g[2];
+
+ return lna != 0xff ? lna : 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_get_lna_gain);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
new file mode 100644
index 000000000..99941a470
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#ifndef __MT76x02_EEPROM_H
+#define __MT76x02_EEPROM_H
+
+#include "mt76x02.h"
+
+enum mt76x02_eeprom_field {
+ MT_EE_CHIP_ID = 0x000,
+ MT_EE_VERSION = 0x002,
+ MT_EE_MAC_ADDR = 0x004,
+ MT_EE_PCI_ID = 0x00A,
+ MT_EE_ANTENNA = 0x022,
+ MT_EE_CFG1_INIT = 0x024,
+ MT_EE_NIC_CONF_0 = 0x034,
+ MT_EE_NIC_CONF_1 = 0x036,
+ MT_EE_COUNTRY_REGION_5GHZ = 0x038,
+ MT_EE_COUNTRY_REGION_2GHZ = 0x039,
+ MT_EE_FREQ_OFFSET = 0x03a,
+ MT_EE_NIC_CONF_2 = 0x042,
+
+ MT_EE_XTAL_TRIM_1 = 0x03a,
+ MT_EE_XTAL_TRIM_2 = 0x09e,
+
+ MT_EE_LNA_GAIN = 0x044,
+ MT_EE_RSSI_OFFSET_2G_0 = 0x046,
+ MT_EE_RSSI_OFFSET_2G_1 = 0x048,
+ MT_EE_LNA_GAIN_5GHZ_1 = 0x049,
+ MT_EE_RSSI_OFFSET_5G_0 = 0x04a,
+ MT_EE_RSSI_OFFSET_5G_1 = 0x04c,
+ MT_EE_LNA_GAIN_5GHZ_2 = 0x04d,
+
+ MT_EE_TX_POWER_DELTA_BW40 = 0x050,
+ MT_EE_TX_POWER_DELTA_BW80 = 0x052,
+
+ MT_EE_TX_POWER_EXT_PA_5G = 0x054,
+
+ MT_EE_TX_POWER_0_START_2G = 0x056,
+ MT_EE_TX_POWER_1_START_2G = 0x05c,
+
+ /* used as byte arrays */
+#define MT_TX_POWER_GROUP_SIZE_5G 5
+#define MT_TX_POWER_GROUPS_5G 6
+ MT_EE_TX_POWER_0_START_5G = 0x062,
+ MT_EE_TSSI_SLOPE_2G = 0x06e,
+
+ MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA = 0x074,
+ MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE = 0x076,
+
+ MT_EE_TX_POWER_1_START_5G = 0x080,
+
+ MT_EE_TX_POWER_CCK = 0x0a0,
+ MT_EE_TX_POWER_OFDM_2G_6M = 0x0a2,
+ MT_EE_TX_POWER_OFDM_2G_24M = 0x0a4,
+ MT_EE_TX_POWER_OFDM_5G_6M = 0x0b2,
+ MT_EE_TX_POWER_OFDM_5G_24M = 0x0b4,
+ MT_EE_TX_POWER_HT_MCS0 = 0x0a6,
+ MT_EE_TX_POWER_HT_MCS4 = 0x0a8,
+ MT_EE_TX_POWER_HT_MCS8 = 0x0aa,
+ MT_EE_TX_POWER_HT_MCS12 = 0x0ac,
+ MT_EE_TX_POWER_VHT_MCS0 = 0x0ba,
+ MT_EE_TX_POWER_VHT_MCS4 = 0x0bc,
+ MT_EE_TX_POWER_VHT_MCS8 = 0x0be,
+
+ MT_EE_2G_TARGET_POWER = 0x0d0,
+ MT_EE_TEMP_OFFSET = 0x0d1,
+ MT_EE_5G_TARGET_POWER = 0x0d2,
+ MT_EE_TSSI_BOUND1 = 0x0d4,
+ MT_EE_TSSI_BOUND2 = 0x0d6,
+ MT_EE_TSSI_BOUND3 = 0x0d8,
+ MT_EE_TSSI_BOUND4 = 0x0da,
+ MT_EE_FREQ_OFFSET_COMPENSATION = 0x0db,
+ MT_EE_TSSI_BOUND5 = 0x0dc,
+ MT_EE_TX_POWER_BYRATE_BASE = 0x0de,
+
+ MT_EE_TSSI_SLOPE_5G = 0x0f0,
+ MT_EE_RF_TEMP_COMP_SLOPE_5G = 0x0f2,
+ MT_EE_RF_TEMP_COMP_SLOPE_2G = 0x0f4,
+
+ MT_EE_RF_2G_TSSI_OFF_TXPOWER = 0x0f6,
+ MT_EE_RF_2G_RX_HIGH_GAIN = 0x0f8,
+ MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN = 0x0fa,
+ MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN = 0x0fc,
+ MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN = 0x0fe,
+
+ MT_EE_BT_RCAL_RESULT = 0x138,
+ MT_EE_BT_VCDL_CALIBRATION = 0x13c,
+ MT_EE_BT_PMUCFG = 0x13e,
+
+ MT_EE_USAGE_MAP_START = 0x1e0,
+ MT_EE_USAGE_MAP_END = 0x1fc,
+
+ __MT_EE_MAX
+};
+
+#define MT_EE_ANTENNA_DUAL BIT(15)
+
+#define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0)
+#define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4)
+#define MT_EE_NIC_CONF_0_PA_TYPE GENMASK(9, 8)
+#define MT_EE_NIC_CONF_0_PA_INT_2G BIT(8)
+#define MT_EE_NIC_CONF_0_PA_INT_5G BIT(9)
+#define MT_EE_NIC_CONF_0_PA_IO_CURRENT BIT(10)
+#define MT_EE_NIC_CONF_0_BOARD_TYPE GENMASK(13, 12)
+
+#define MT_EE_NIC_CONF_1_HW_RF_CTRL BIT(0)
+#define MT_EE_NIC_CONF_1_TEMP_TX_ALC BIT(1)
+#define MT_EE_NIC_CONF_1_LNA_EXT_2G BIT(2)
+#define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3)
+#define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13)
+
+#define MT_EE_NIC_CONF_2_ANT_OPT BIT(3)
+#define MT_EE_NIC_CONF_2_ANT_DIV BIT(4)
+#define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9)
+
+#define MT_EFUSE_USAGE_MAP_SIZE (MT_EE_USAGE_MAP_END - \
+ MT_EE_USAGE_MAP_START + 1)
+
+enum mt76x02_eeprom_modes {
+ MT_EE_READ,
+ MT_EE_PHYSICAL_READ,
+};
+
+enum mt76x02_board_type {
+ BOARD_TYPE_2GHZ = 1,
+ BOARD_TYPE_5GHZ = 2,
+};
+
+static inline bool mt76x02_field_valid(u8 val)
+{
+ return val != 0 && val != 0xff;
+}
+
+static inline int
+mt76x02_sign_extend(u32 val, unsigned int size)
+{
+ bool sign = val & BIT(size - 1);
+
+ val &= BIT(size - 1) - 1;
+
+ return sign ? val : -val;
+}
+
+static inline int
+mt76x02_sign_extend_optional(u32 val, unsigned int size)
+{
+ bool enable = val & BIT(size);
+
+ return enable ? mt76x02_sign_extend(val, size) : 0;
+}
+
+static inline s8 mt76x02_rate_power_val(u8 val)
+{
+ if (!mt76x02_field_valid(val))
+ return 0;
+
+ return mt76x02_sign_extend_optional(val, 7);
+}
+
+static inline int
+mt76x02_eeprom_get(struct mt76x02_dev *dev,
+ enum mt76x02_eeprom_field field)
+{
+ if ((field & 1) || field >= __MT_EE_MAX)
+ return -1;
+
+ return get_unaligned_le16(dev->mt76.eeprom.data + field);
+}
+
+bool mt76x02_ext_pa_enabled(struct mt76x02_dev *dev, enum nl80211_band band);
+int mt76x02_get_efuse_data(struct mt76x02_dev *dev, u16 base, void *buf,
+ int len, enum mt76x02_eeprom_modes mode);
+void mt76x02_get_rx_gain(struct mt76x02_dev *dev, enum nl80211_band band,
+ u16 *rssi_offset, s8 *lna_2g, s8 *lna_5g);
+u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
+ s8 *lna_2g, s8 *lna_5g,
+ struct ieee80211_channel *chan);
+void mt76x02_eeprom_parse_hw_cap(struct mt76x02_dev *dev);
+int mt76x02_eeprom_copy(struct mt76x02_dev *dev,
+ enum mt76x02_eeprom_field field,
+ void *dest, int len);
+
+#endif /* __MT76x02_EEPROM_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
new file mode 100644
index 000000000..677082d86
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -0,0 +1,1218 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ */
+
+#include "mt76x02.h"
+#include "mt76x02_trace.h"
+#include "trace.h"
+
+void mt76x02_mac_reset_counters(struct mt76x02_dev *dev)
+{
+ int i;
+
+ mt76_rr(dev, MT_RX_STAT_0);
+ mt76_rr(dev, MT_RX_STAT_1);
+ mt76_rr(dev, MT_RX_STAT_2);
+ mt76_rr(dev, MT_TX_STA_0);
+ mt76_rr(dev, MT_TX_STA_1);
+ mt76_rr(dev, MT_TX_STA_2);
+
+ for (i = 0; i < 16; i++)
+ mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+ for (i = 0; i < 16; i++)
+ mt76_rr(dev, MT_TX_STAT_FIFO);
+
+ memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_reset_counters);
+
+static enum mt76x02_cipher_type
+mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+ memset(key_data, 0, 32);
+ if (!key)
+ return MT_CIPHER_NONE;
+
+ if (key->keylen > 32)
+ return MT_CIPHER_NONE;
+
+ memcpy(key_data, key->key, key->keylen);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MT_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MT_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return MT_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MT_CIPHER_AES_CCMP;
+ default:
+ return MT_CIPHER_NONE;
+ }
+}
+
+int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
+ u8 key_idx, struct ieee80211_key_conf *key)
+{
+ enum mt76x02_cipher_type cipher;
+ u8 key_data[32];
+ u32 val;
+
+ cipher = mt76x02_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EOPNOTSUPP;
+
+ val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
+ val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
+ val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
+ mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
+
+ mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
+ sizeof(key_data));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup);
+
+void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76x02_cipher_type cipher;
+ u8 key_data[32];
+ u32 iv, eiv;
+ u64 pn;
+
+ cipher = mt76x02_mac_get_key_info(key, key_data);
+ iv = mt76_rr(dev, MT_WCID_IV(idx));
+ eiv = mt76_rr(dev, MT_WCID_IV(idx) + 4);
+
+ pn = (u64)eiv << 16;
+ if (cipher == MT_CIPHER_TKIP) {
+ pn |= (iv >> 16) & 0xff;
+ pn |= (iv & 0xff) << 8;
+ } else if (cipher >= MT_CIPHER_AES_CCMP) {
+ pn |= iv & 0xffff;
+ } else {
+ return;
+ }
+
+ atomic64_set(&key->tx_pn, pn);
+}
+
+int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76x02_cipher_type cipher;
+ u8 key_data[32];
+ u8 iv_data[8];
+ u64 pn;
+
+ cipher = mt76x02_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EOPNOTSUPP;
+
+ mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+ mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
+
+ memset(iv_data, 0, sizeof(iv_data));
+ if (key) {
+ mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
+ !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
+
+ pn = atomic64_read(&key->tx_pn);
+
+ iv_data[3] = key->keyidx << 6;
+ if (cipher >= MT_CIPHER_TKIP) {
+ iv_data[3] |= 0x20;
+ put_unaligned_le32(pn >> 16, &iv_data[4]);
+ }
+
+ if (cipher == MT_CIPHER_TKIP) {
+ iv_data[0] = (pn >> 8) & 0xff;
+ iv_data[1] = (iv_data[0] | 0x20) & 0x7f;
+ iv_data[2] = pn & 0xff;
+ } else if (cipher >= MT_CIPHER_AES_CCMP) {
+ put_unaligned_le16((pn & 0xffff), &iv_data[0]);
+ }
+ }
+
+ mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
+
+ return 0;
+}
+
+void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx,
+ u8 vif_idx, u8 *mac)
+{
+ struct mt76_wcid_addr addr = {};
+ u32 attr;
+
+ attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+ FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+
+ mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+
+ if (idx >= 128)
+ return;
+
+ if (mac)
+ memcpy(addr.macaddr, mac, ETH_ALEN);
+
+ mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_setup);
+
+void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop)
+{
+ u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
+ u32 bit = MT_WCID_DROP_MASK(idx);
+
+ /* prevent unnecessary writes */
+ if ((val & bit) != (bit * drop))
+ mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
+}
+
+static u16
+mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev,
+ const struct ieee80211_tx_rate *rate, u8 *nss_val)
+{
+ u8 phy, rate_idx, nss, bw = 0;
+ u16 rateval;
+
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 4);
+ phy = MT_PHY_TYPE_VHT;
+ if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ bw = 2;
+ else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = 1;
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 3);
+ phy = MT_PHY_TYPE_HT;
+ if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+ phy = MT_PHY_TYPE_HT_GF;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = 1;
+ } else {
+ const struct ieee80211_rate *r;
+ int band = dev->mphy.chandef.chan->band;
+ u16 val;
+
+ r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx];
+ if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ val = r->hw_value_short;
+ else
+ val = r->hw_value;
+
+ phy = val >> 8;
+ rate_idx = val & 0xff;
+ nss = 1;
+ }
+
+ rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ rateval |= MT_RXWI_RATE_SGI;
+
+ *nss_val = nss;
+ return rateval;
+}
+
+void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate)
+{
+ s8 max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
+ u16 rateval;
+ u32 tx_info;
+ s8 nss;
+
+ rateval = mt76x02_mac_tx_rate_val(dev, rate, &nss);
+ tx_info = FIELD_PREP(MT_WCID_TX_INFO_RATE, rateval) |
+ FIELD_PREP(MT_WCID_TX_INFO_NSS, nss) |
+ FIELD_PREP(MT_WCID_TX_INFO_TXPWR_ADJ, max_txpwr_adj) |
+ MT_WCID_TX_INFO_SET;
+ wcid->tx_info = tx_info;
+}
+
+void mt76x02_mac_set_short_preamble(struct mt76x02_dev *dev, bool enable)
+{
+ if (enable)
+ mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+ else
+ mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+}
+
+bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev,
+ struct mt76x02_tx_status *stat)
+{
+ u32 stat1, stat2;
+
+ stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
+ stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
+
+ stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
+ if (!stat->valid)
+ return false;
+
+ stat->success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
+ stat->aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
+ stat->ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
+ stat->wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
+ stat->rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
+
+ stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
+ stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
+
+ trace_mac_txstat_fetch(dev, stat);
+
+ return true;
+}
+
+static int
+mt76x02_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
+ enum nl80211_band band)
+{
+ u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+ txrate->idx = 0;
+ txrate->flags = 0;
+ txrate->count = 1;
+
+ switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ if (band == NL80211_BAND_2GHZ)
+ idx += 4;
+
+ txrate->idx = idx;
+ return 0;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8)
+ idx -= 8;
+
+ txrate->idx = idx;
+ return 0;
+ case MT_PHY_TYPE_HT_GF:
+ txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ fallthrough;
+ case MT_PHY_TYPE_HT:
+ txrate->flags |= IEEE80211_TX_RC_MCS;
+ txrate->idx = idx;
+ break;
+ case MT_PHY_TYPE_VHT:
+ txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
+ txrate->idx = idx;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+ case MT_PHY_BW_20:
+ break;
+ case MT_PHY_BW_40:
+ txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ break;
+ case MT_PHY_BW_80:
+ txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (rate & MT_RXWI_RATE_SGI)
+ txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
+
+ return 0;
+}
+
+void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta, int len)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rate = &info->control.rates[0];
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ u32 wcid_tx_info;
+ u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
+ u16 txwi_flags = 0, rateval;
+ u8 nss;
+ s8 txpwr_adj, max_txpwr_adj;
+ u8 ccmp_pn[8], nstreams = dev->chainmask & 0xf;
+
+ memset(txwi, 0, sizeof(*txwi));
+
+ mt76_tx_check_agg_ssn(sta, skb);
+
+ if (!info->control.hw_key && wcid && wcid->hw_key_idx != 0xff &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ wcid = NULL;
+ ieee80211_get_tx_rates(info->control.vif, sta, skb,
+ info->control.rates, 1);
+ }
+
+ if (wcid)
+ txwi->wcid = wcid->idx;
+ else
+ txwi->wcid = 0xff;
+
+ if (wcid && wcid->sw_iv && key) {
+ u64 pn = atomic64_inc_return(&key->tx_pn);
+
+ ccmp_pn[0] = pn;
+ ccmp_pn[1] = pn >> 8;
+ ccmp_pn[2] = 0;
+ ccmp_pn[3] = 0x20 | (key->keyidx << 6);
+ ccmp_pn[4] = pn >> 16;
+ ccmp_pn[5] = pn >> 24;
+ ccmp_pn[6] = pn >> 32;
+ ccmp_pn[7] = pn >> 40;
+ txwi->iv = *((__le32 *)&ccmp_pn[0]);
+ txwi->eiv = *((__le32 *)&ccmp_pn[4]);
+ }
+
+ if (wcid && (rate->idx < 0 || !rate->count)) {
+ wcid_tx_info = wcid->tx_info;
+ rateval = FIELD_GET(MT_WCID_TX_INFO_RATE, wcid_tx_info);
+ max_txpwr_adj = FIELD_GET(MT_WCID_TX_INFO_TXPWR_ADJ,
+ wcid_tx_info);
+ nss = FIELD_GET(MT_WCID_TX_INFO_NSS, wcid_tx_info);
+ } else {
+ rateval = mt76x02_mac_tx_rate_val(dev, rate, &nss);
+ max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
+ }
+ txwi->rate = cpu_to_le16(rateval);
+
+ txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->txpower_conf,
+ max_txpwr_adj);
+ txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
+
+ if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E4)
+ txwi->txstream = 0x13;
+ else if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E3 &&
+ !(txwi->rate & cpu_to_le16(rate_ht_mask)))
+ txwi->txstream = 0x93;
+
+ if (is_mt76x2(dev) && (info->flags & IEEE80211_TX_CTL_LDPC))
+ txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
+ if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
+ txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
+ if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+ txwi_flags |= MT_TXWI_FLAGS_MMPS;
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
+ u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
+ u8 ampdu_density = sta->ht_cap.ampdu_density;
+
+ ba_size <<= sta->ht_cap.ampdu_factor;
+ ba_size = min_t(int, 63, ba_size - 1);
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ ba_size = 0;
+ txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+
+ if (ampdu_density < IEEE80211_HT_MPDU_DENSITY_4)
+ ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
+
+ txwi_flags |= MT_TXWI_FLAGS_AMPDU |
+ FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY, ampdu_density);
+ }
+
+ if (ieee80211_is_probe_resp(hdr->frame_control) ||
+ ieee80211_is_beacon(hdr->frame_control))
+ txwi_flags |= MT_TXWI_FLAGS_TS;
+
+ txwi->flags |= cpu_to_le16(txwi_flags);
+ txwi->len_ctl = cpu_to_le16(len);
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_write_txwi);
+
+static void
+mt76x02_tx_rate_fallback(struct ieee80211_tx_rate *rates, int idx, int phy)
+{
+ u8 mcs, nss;
+
+ if (!idx)
+ return;
+
+ rates += idx - 1;
+ rates[1] = rates[0];
+ switch (phy) {
+ case MT_PHY_TYPE_VHT:
+ mcs = ieee80211_rate_get_vht_mcs(rates);
+ nss = ieee80211_rate_get_vht_nss(rates);
+
+ if (mcs == 0)
+ nss = max_t(int, nss - 1, 1);
+ else
+ mcs--;
+
+ ieee80211_rate_set_vht(rates + 1, mcs, nss);
+ break;
+ case MT_PHY_TYPE_HT_GF:
+ case MT_PHY_TYPE_HT:
+ /* MCS 8 falls back to MCS 0 */
+ if (rates[0].idx == 8) {
+ rates[1].idx = 0;
+ break;
+ }
+ fallthrough;
+ default:
+ rates[1].idx = max_t(int, rates[0].idx - 1, 0);
+ break;
+ }
+}
+
+static void
+mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev, struct mt76x02_sta *msta,
+ struct ieee80211_tx_info *info,
+ struct mt76x02_tx_status *st, int n_frames)
+{
+ struct ieee80211_tx_rate *rate = info->status.rates;
+ struct ieee80211_tx_rate last_rate;
+ u16 first_rate;
+ int retry = st->retry;
+ int phy;
+ int i;
+
+ if (!n_frames)
+ return;
+
+ phy = FIELD_GET(MT_RXWI_RATE_PHY, st->rate);
+
+ if (st->pktid & MT_PACKET_ID_HAS_RATE) {
+ first_rate = st->rate & ~MT_PKTID_RATE;
+ first_rate |= st->pktid & MT_PKTID_RATE;
+
+ mt76x02_mac_process_tx_rate(&rate[0], first_rate,
+ dev->mphy.chandef.chan->band);
+ } else if (rate[0].idx < 0) {
+ if (!msta)
+ return;
+
+ mt76x02_mac_process_tx_rate(&rate[0], msta->wcid.tx_info,
+ dev->mphy.chandef.chan->band);
+ }
+
+ mt76x02_mac_process_tx_rate(&last_rate, st->rate,
+ dev->mphy.chandef.chan->band);
+
+ for (i = 0; i < ARRAY_SIZE(info->status.rates); i++) {
+ retry--;
+ if (i + 1 == ARRAY_SIZE(info->status.rates)) {
+ info->status.rates[i] = last_rate;
+ info->status.rates[i].count = max_t(int, retry, 1);
+ break;
+ }
+
+ mt76x02_tx_rate_fallback(info->status.rates, i, phy);
+ if (info->status.rates[i].idx == last_rate.idx)
+ break;
+ }
+
+ if (i + 1 < ARRAY_SIZE(info->status.rates)) {
+ info->status.rates[i + 1].idx = -1;
+ info->status.rates[i + 1].count = 0;
+ }
+
+ info->status.ampdu_len = n_frames;
+ info->status.ampdu_ack_len = st->success ? n_frames : 0;
+
+ if (st->aggr)
+ info->flags |= IEEE80211_TX_CTL_AMPDU |
+ IEEE80211_TX_STAT_AMPDU;
+
+ if (!st->ack_req)
+ info->flags |= IEEE80211_TX_CTL_NO_ACK;
+ else if (st->success)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+}
+
+void mt76x02_send_tx_status(struct mt76x02_dev *dev,
+ struct mt76x02_tx_status *stat, u8 *update)
+{
+ struct ieee80211_tx_info info = {};
+ struct ieee80211_tx_status status = {
+ .info = &info
+ };
+ static const u8 ac_to_tid[4] = {
+ [IEEE80211_AC_BE] = 0,
+ [IEEE80211_AC_BK] = 1,
+ [IEEE80211_AC_VI] = 4,
+ [IEEE80211_AC_VO] = 6
+ };
+ struct mt76_wcid *wcid = NULL;
+ struct mt76x02_sta *msta = NULL;
+ struct mt76_dev *mdev = &dev->mt76;
+ struct sk_buff_head list;
+ u32 duration = 0;
+ u8 cur_pktid;
+ u32 ac = 0;
+ int len = 0;
+
+ if (stat->pktid == MT_PACKET_ID_NO_ACK)
+ return;
+
+ rcu_read_lock();
+
+ if (stat->wcid < MT76x02_N_WCIDS)
+ wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);
+
+ if (wcid && wcid->sta) {
+ void *priv;
+
+ priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
+ status.sta = container_of(priv, struct ieee80211_sta,
+ drv_priv);
+ }
+
+ mt76_tx_status_lock(mdev, &list);
+
+ if (wcid) {
+ if (mt76_is_skb_pktid(stat->pktid))
+ status.skb = mt76_tx_status_skb_get(mdev, wcid,
+ stat->pktid, &list);
+ if (status.skb)
+ status.info = IEEE80211_SKB_CB(status.skb);
+ }
+
+ if (!status.skb && !(stat->pktid & MT_PACKET_ID_HAS_RATE)) {
+ mt76_tx_status_unlock(mdev, &list);
+ goto out;
+ }
+
+
+ if (msta && stat->aggr && !status.skb) {
+ u32 stat_val, stat_cache;
+
+ stat_val = stat->rate;
+ stat_val |= ((u32)stat->retry) << 16;
+ stat_cache = msta->status.rate;
+ stat_cache |= ((u32)msta->status.retry) << 16;
+
+ if (*update == 0 && stat_val == stat_cache &&
+ stat->wcid == msta->status.wcid && msta->n_frames < 32) {
+ msta->n_frames++;
+ mt76_tx_status_unlock(mdev, &list);
+ goto out;
+ }
+
+ cur_pktid = msta->status.pktid;
+ mt76x02_mac_fill_tx_status(dev, msta, status.info,
+ &msta->status, msta->n_frames);
+
+ msta->status = *stat;
+ msta->n_frames = 1;
+ *update = 0;
+ } else {
+ cur_pktid = stat->pktid;
+ mt76x02_mac_fill_tx_status(dev, msta, status.info, stat, 1);
+ *update = 1;
+ }
+
+ if (status.skb) {
+ info = *status.info;
+ len = status.skb->len;
+ ac = skb_get_queue_mapping(status.skb);
+ mt76_tx_status_skb_done(mdev, status.skb, &list);
+ } else if (msta) {
+ len = status.info->status.ampdu_len * ewma_pktlen_read(&msta->pktlen);
+ ac = FIELD_GET(MT_PKTID_AC, cur_pktid);
+ }
+
+ mt76_tx_status_unlock(mdev, &list);
+
+ if (!status.skb)
+ ieee80211_tx_status_ext(mt76_hw(dev), &status);
+
+ if (!len)
+ goto out;
+
+ duration = ieee80211_calc_tx_airtime(mt76_hw(dev), &info, len);
+
+ spin_lock_bh(&dev->mt76.cc_lock);
+ dev->tx_airtime += duration;
+ spin_unlock_bh(&dev->mt76.cc_lock);
+
+ if (msta)
+ ieee80211_sta_register_airtime(status.sta, ac_to_tid[ac], duration, 0);
+
+out:
+ rcu_read_unlock();
+}
+
+static int
+mt76x02_mac_process_rate(struct mt76x02_dev *dev,
+ struct mt76_rx_status *status,
+ u16 rate)
+{
+ u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+ switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ if (idx >= 8)
+ idx = 0;
+
+ if (status->band == NL80211_BAND_2GHZ)
+ idx += 4;
+
+ status->rate_idx = idx;
+ return 0;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8) {
+ idx -= 8;
+ status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+ }
+
+ if (idx >= 4)
+ idx = 0;
+
+ status->rate_idx = idx;
+ return 0;
+ case MT_PHY_TYPE_HT_GF:
+ status->enc_flags |= RX_ENC_FLAG_HT_GF;
+ fallthrough;
+ case MT_PHY_TYPE_HT:
+ status->encoding = RX_ENC_HT;
+ status->rate_idx = idx;
+ break;
+ case MT_PHY_TYPE_VHT: {
+ u8 n_rxstream = dev->chainmask & 0xf;
+
+ status->encoding = RX_ENC_VHT;
+ status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
+ status->nss = min_t(u8, n_rxstream,
+ FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ if (rate & MT_RXWI_RATE_LDPC)
+ status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+ if (rate & MT_RXWI_RATE_SGI)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ if (rate & MT_RXWI_RATE_STBC)
+ status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
+
+ switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+ case MT_PHY_BW_20:
+ break;
+ case MT_PHY_BW_40:
+ status->bw = RATE_INFO_BW_40;
+ break;
+ case MT_PHY_BW_80:
+ status->bw = RATE_INFO_BW_80;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void mt76x02_mac_setaddr(struct mt76x02_dev *dev, const u8 *addr)
+{
+ static const u8 null_addr[ETH_ALEN] = {};
+ int i;
+
+ ether_addr_copy(dev->mt76.macaddr, addr);
+
+ if (!is_valid_ether_addr(dev->mt76.macaddr)) {
+ eth_random_addr(dev->mt76.macaddr);
+ dev_info(dev->mt76.dev,
+ "Invalid MAC address, using random address %pM\n",
+ dev->mt76.macaddr);
+ }
+
+ mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mt76.macaddr));
+ mt76_wr(dev, MT_MAC_ADDR_DW1,
+ get_unaligned_le16(dev->mt76.macaddr + 4) |
+ FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
+
+ mt76_wr(dev, MT_MAC_BSSID_DW0,
+ get_unaligned_le32(dev->mt76.macaddr));
+ mt76_wr(dev, MT_MAC_BSSID_DW1,
+ get_unaligned_le16(dev->mt76.macaddr + 4) |
+ FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 APs + 8 STAs */
+ MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT);
+ /* enable 7 additional beacon slots and control them with bypass mask */
+ mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N, 7);
+
+ for (i = 0; i < 16; i++)
+ mt76x02_mac_set_bssid(dev, i, null_addr);
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_setaddr);
+
+static int
+mt76x02_mac_get_rssi(struct mt76x02_dev *dev, s8 rssi, int chain)
+{
+ struct mt76x02_rx_freq_cal *cal = &dev->cal.rx;
+
+ rssi += cal->rssi_offset[chain];
+ rssi -= cal->lna_gain;
+
+ return rssi;
+}
+
+int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
+ void *rxi)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct mt76x02_rxwi *rxwi = rxi;
+ struct mt76x02_sta *sta;
+ u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
+ u32 ctl = le32_to_cpu(rxwi->ctl);
+ u16 rate = le16_to_cpu(rxwi->rate);
+ u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
+ bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
+ int pad_len = 0, nstreams = dev->chainmask & 0xf;
+ s8 signal;
+ u8 pn_len;
+ u8 wcid;
+ int len;
+
+ if (!test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
+ return -EINVAL;
+
+ if (rxinfo & MT_RXINFO_L2PAD)
+ pad_len += 2;
+
+ if (rxinfo & MT_RXINFO_DECRYPT) {
+ status->flag |= RX_FLAG_DECRYPTED;
+ status->flag |= RX_FLAG_MMIC_STRIPPED;
+ status->flag |= RX_FLAG_MIC_STRIPPED;
+ status->flag |= RX_FLAG_IV_STRIPPED;
+ }
+
+ wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
+ sta = mt76x02_rx_get_sta(&dev->mt76, wcid);
+ status->wcid = mt76x02_rx_get_sta_wcid(sta, unicast);
+
+ len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+ pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
+ if (pn_len) {
+ int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
+ u8 *data = skb->data + offset;
+
+ status->iv[0] = data[7];
+ status->iv[1] = data[6];
+ status->iv[2] = data[5];
+ status->iv[3] = data[4];
+ status->iv[4] = data[1];
+ status->iv[5] = data[0];
+
+ /*
+ * Driver CCMP validation can't deal with fragments.
+ * Let mac80211 take care of it.
+ */
+ if (rxinfo & MT_RXINFO_FRAG) {
+ status->flag &= ~RX_FLAG_IV_STRIPPED;
+ } else {
+ pad_len += pn_len << 2;
+ len -= pn_len << 2;
+ }
+ }
+
+ mt76x02_remove_hdr_pad(skb, pad_len);
+
+ if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
+ status->aggr = true;
+
+ if (rxinfo & MT_RXINFO_AMPDU) {
+ status->flag |= RX_FLAG_AMPDU_DETAILS;
+ status->ampdu_ref = dev->ampdu_ref;
+
+ /*
+ * When receiving an A-MPDU subframe and RSSI info is not valid,
+ * we can assume that more subframes belonging to the same A-MPDU
+ * are coming. The last one will have valid RSSI info
+ */
+ if (rxinfo & MT_RXINFO_RSSI) {
+ if (!++dev->ampdu_ref)
+ dev->ampdu_ref++;
+ }
+ }
+
+ if (WARN_ON_ONCE(len > skb->len))
+ return -EINVAL;
+
+ pskb_trim(skb, len);
+
+ status->chains = BIT(0);
+ signal = mt76x02_mac_get_rssi(dev, rxwi->rssi[0], 0);
+ status->chain_signal[0] = signal;
+ if (nstreams > 1) {
+ status->chains |= BIT(1);
+ status->chain_signal[1] = mt76x02_mac_get_rssi(dev,
+ rxwi->rssi[1],
+ 1);
+ signal = max_t(s8, signal, status->chain_signal[1]);
+ }
+ status->signal = signal;
+ status->freq = dev->mphy.chandef.chan->center_freq;
+ status->band = dev->mphy.chandef.chan->band;
+
+ status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
+ status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
+
+ return mt76x02_mac_process_rate(dev, status, rate);
+}
+
+void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
+{
+ struct mt76x02_tx_status stat = {};
+ u8 update = 1;
+ bool ret;
+
+ if (!test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
+ return;
+
+ trace_mac_txstat_poll(dev);
+
+ while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
+ if (!spin_trylock(&dev->txstatus_fifo_lock))
+ break;
+
+ ret = mt76x02_mac_load_tx_status(dev, &stat);
+ spin_unlock(&dev->txstatus_fifo_lock);
+
+ if (!ret)
+ break;
+
+ if (!irq) {
+ mt76x02_send_tx_status(dev, &stat, &update);
+ continue;
+ }
+
+ kfifo_put(&dev->txstatus_fifo, stat);
+ }
+}
+
+void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
+{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ struct mt76x02_txwi *txwi;
+ u8 *txwi_ptr;
+
+ if (!e->txwi) {
+ dev_kfree_skb_any(e->skb);
+ return;
+ }
+
+ mt76x02_mac_poll_tx_status(dev, false);
+
+ txwi_ptr = mt76_get_txwi_ptr(mdev, e->txwi);
+ txwi = (struct mt76x02_txwi *)txwi_ptr;
+ trace_mac_txdone(mdev, txwi->wcid, txwi->pktid);
+
+ mt76_tx_complete_skb(mdev, e->wcid, e->skb);
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb);
+
+void mt76x02_mac_set_rts_thresh(struct mt76x02_dev *dev, u32 val)
+{
+ u32 data = 0;
+
+ if (val != ~0)
+ data = FIELD_PREP(MT_PROT_CFG_CTRL, 1) |
+ MT_PROT_CFG_RTS_THRESH;
+
+ mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val);
+
+ mt76_rmw(dev, MT_CCK_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_OFDM_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+}
+
+void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, bool legacy_prot,
+ int ht_mode)
+{
+ int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+ bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+ u32 prot[6];
+ u32 vht_prot[3];
+ int i;
+ u16 rts_thr;
+
+ for (i = 0; i < ARRAY_SIZE(prot); i++) {
+ prot[i] = mt76_rr(dev, MT_CCK_PROT_CFG + i * 4);
+ prot[i] &= ~MT_PROT_CFG_CTRL;
+ if (i >= 2)
+ prot[i] &= ~MT_PROT_CFG_RATE;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vht_prot); i++) {
+ vht_prot[i] = mt76_rr(dev, MT_TX_PROT_CFG6 + i * 4);
+ vht_prot[i] &= ~(MT_PROT_CFG_CTRL | MT_PROT_CFG_RATE);
+ }
+
+ rts_thr = mt76_get_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH);
+
+ if (rts_thr != 0xffff)
+ prot[0] |= MT_PROT_CTRL_RTS_CTS;
+
+ if (legacy_prot) {
+ prot[1] |= MT_PROT_CTRL_CTS2SELF;
+
+ prot[2] |= MT_PROT_RATE_CCK_11;
+ prot[3] |= MT_PROT_RATE_CCK_11;
+ prot[4] |= MT_PROT_RATE_CCK_11;
+ prot[5] |= MT_PROT_RATE_CCK_11;
+
+ vht_prot[0] |= MT_PROT_RATE_CCK_11;
+ vht_prot[1] |= MT_PROT_RATE_CCK_11;
+ vht_prot[2] |= MT_PROT_RATE_CCK_11;
+ } else {
+ if (rts_thr != 0xffff)
+ prot[1] |= MT_PROT_CTRL_RTS_CTS;
+
+ prot[2] |= MT_PROT_RATE_OFDM_24;
+ prot[3] |= MT_PROT_RATE_DUP_OFDM_24;
+ prot[4] |= MT_PROT_RATE_OFDM_24;
+ prot[5] |= MT_PROT_RATE_DUP_OFDM_24;
+
+ vht_prot[0] |= MT_PROT_RATE_OFDM_24;
+ vht_prot[1] |= MT_PROT_RATE_DUP_OFDM_24;
+ vht_prot[2] |= MT_PROT_RATE_SGI_OFDM_24;
+ }
+
+ switch (mode) {
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+ prot[2] |= MT_PROT_CTRL_RTS_CTS;
+ prot[3] |= MT_PROT_CTRL_RTS_CTS;
+ prot[4] |= MT_PROT_CTRL_RTS_CTS;
+ prot[5] |= MT_PROT_CTRL_RTS_CTS;
+ vht_prot[0] |= MT_PROT_CTRL_RTS_CTS;
+ vht_prot[1] |= MT_PROT_CTRL_RTS_CTS;
+ vht_prot[2] |= MT_PROT_CTRL_RTS_CTS;
+ break;
+ case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+ prot[3] |= MT_PROT_CTRL_RTS_CTS;
+ prot[5] |= MT_PROT_CTRL_RTS_CTS;
+ vht_prot[1] |= MT_PROT_CTRL_RTS_CTS;
+ vht_prot[2] |= MT_PROT_CTRL_RTS_CTS;
+ break;
+ }
+
+ if (non_gf) {
+ prot[4] |= MT_PROT_CTRL_RTS_CTS;
+ prot[5] |= MT_PROT_CTRL_RTS_CTS;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(prot); i++)
+ mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
+
+ for (i = 0; i < ARRAY_SIZE(vht_prot); i++)
+ mt76_wr(dev, MT_TX_PROT_CFG6 + i * 4, vht_prot[i]);
+}
+
+void mt76x02_update_channel(struct mt76_dev *mdev)
+{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ struct mt76_channel_state *state;
+
+ state = mdev->phy.chan_state;
+ state->cc_busy += mt76_rr(dev, MT_CH_BUSY);
+
+ spin_lock_bh(&dev->mt76.cc_lock);
+ state->cc_tx += dev->tx_airtime;
+ dev->tx_airtime = 0;
+ spin_unlock_bh(&dev->mt76.cc_lock);
+}
+EXPORT_SYMBOL_GPL(mt76x02_update_channel);
+
+static void mt76x02_check_mac_err(struct mt76x02_dev *dev)
+{
+ u32 val = mt76_rr(dev, 0x10f4);
+
+ if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
+ return;
+
+ dev_err(dev->mt76.dev, "mac specific condition occurred\n");
+
+ mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
+ udelay(10);
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
+}
+
+static void
+mt76x02_edcca_tx_enable(struct mt76x02_dev *dev, bool enable)
+{
+ if (enable) {
+ u32 data;
+
+ mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+ mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_EN);
+ /* enable pa-lna */
+ data = mt76_rr(dev, MT_TX_PIN_CFG);
+ data |= MT_TX_PIN_CFG_TXANT |
+ MT_TX_PIN_CFG_RXANT |
+ MT_TX_PIN_RFTR_EN |
+ MT_TX_PIN_TRSW_EN;
+ mt76_wr(dev, MT_TX_PIN_CFG, data);
+ } else {
+ mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+ mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_EN);
+ /* disable pa-lna */
+ mt76_clear(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT);
+ mt76_clear(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_RXANT);
+ }
+ dev->ed_tx_blocked = !enable;
+}
+
+void mt76x02_edcca_init(struct mt76x02_dev *dev)
+{
+ dev->ed_trigger = 0;
+ dev->ed_silent = 0;
+
+ if (dev->ed_monitor) {
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
+ u8 ed_th = chan->band == NL80211_BAND_5GHZ ? 0x0e : 0x20;
+
+ mt76_clear(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN);
+ mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
+ mt76_rmw(dev, MT_BBP(AGC, 2), GENMASK(15, 0),
+ ed_th << 8 | ed_th);
+ mt76_set(dev, MT_TXOP_HLDR_ET, MT_TXOP_HLDR_TX40M_BLK_EN);
+ } else {
+ mt76_set(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN);
+ mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
+ if (is_mt76x2(dev)) {
+ mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
+ mt76_set(dev, MT_TXOP_HLDR_ET,
+ MT_TXOP_HLDR_TX40M_BLK_EN);
+ } else {
+ mt76_wr(dev, MT_BBP(AGC, 2), 0x003a6464);
+ mt76_clear(dev, MT_TXOP_HLDR_ET,
+ MT_TXOP_HLDR_TX40M_BLK_EN);
+ }
+ }
+ mt76x02_edcca_tx_enable(dev, true);
+ dev->ed_monitor_learning = true;
+
+ /* clear previous CCA timer value */
+ mt76_rr(dev, MT_ED_CCA_TIMER);
+ dev->ed_time = ktime_get_boottime();
+}
+EXPORT_SYMBOL_GPL(mt76x02_edcca_init);
+
+#define MT_EDCCA_TH 92
+#define MT_EDCCA_BLOCK_TH 2
+#define MT_EDCCA_LEARN_TH 50
+#define MT_EDCCA_LEARN_CCA 180
+#define MT_EDCCA_LEARN_TIMEOUT (20 * HZ)
+
+static void mt76x02_edcca_check(struct mt76x02_dev *dev)
+{
+ ktime_t cur_time;
+ u32 active, val, busy;
+
+ cur_time = ktime_get_boottime();
+ val = mt76_rr(dev, MT_ED_CCA_TIMER);
+
+ active = ktime_to_us(ktime_sub(cur_time, dev->ed_time));
+ dev->ed_time = cur_time;
+
+ busy = (val * 100) / active;
+ busy = min_t(u32, busy, 100);
+
+ if (busy > MT_EDCCA_TH) {
+ dev->ed_trigger++;
+ dev->ed_silent = 0;
+ } else {
+ dev->ed_silent++;
+ dev->ed_trigger = 0;
+ }
+
+ if (dev->cal.agc_lowest_gain &&
+ dev->cal.false_cca > MT_EDCCA_LEARN_CCA &&
+ dev->ed_trigger > MT_EDCCA_LEARN_TH) {
+ dev->ed_monitor_learning = false;
+ dev->ed_trigger_timeout = jiffies + 20 * HZ;
+ } else if (!dev->ed_monitor_learning &&
+ time_is_after_jiffies(dev->ed_trigger_timeout)) {
+ dev->ed_monitor_learning = true;
+ mt76x02_edcca_tx_enable(dev, true);
+ }
+
+ if (dev->ed_monitor_learning)
+ return;
+
+ if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && !dev->ed_tx_blocked)
+ mt76x02_edcca_tx_enable(dev, false);
+ else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && dev->ed_tx_blocked)
+ mt76x02_edcca_tx_enable(dev, true);
+}
+
+void mt76x02_mac_work(struct work_struct *work)
+{
+ struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
+ mt76.mac_work.work);
+ int i, idx;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt76_update_survey(&dev->mt76);
+ for (i = 0, idx = 0; i < 16; i++) {
+ u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+ dev->mt76.aggr_stats[idx++] += val & 0xffff;
+ dev->mt76.aggr_stats[idx++] += val >> 16;
+ }
+
+ if (!dev->mt76.beacon_mask)
+ mt76x02_check_mac_err(dev);
+
+ if (dev->ed_monitor)
+ mt76x02_edcca_check(dev);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ mt76_tx_status_check(&dev->mt76, NULL, false);
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+ MT_MAC_WORK_INTERVAL);
+}
+
+void mt76x02_mac_cc_reset(struct mt76x02_dev *dev)
+{
+ dev->mphy.survey_time = ktime_get_boottime();
+
+ mt76_wr(dev, MT_CH_TIME_CFG,
+ MT_CH_TIME_CFG_TIMER_EN |
+ MT_CH_TIME_CFG_TX_AS_BUSY |
+ MT_CH_TIME_CFG_RX_AS_BUSY |
+ MT_CH_TIME_CFG_NAV_AS_BUSY |
+ MT_CH_TIME_CFG_EIFS_AS_BUSY |
+ MT_CH_CCA_RC_EN |
+ FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
+
+ /* channel cycle counters read-and-clear */
+ mt76_rr(dev, MT_CH_BUSY);
+ mt76_rr(dev, MT_CH_IDLE);
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_cc_reset);
+
+void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr)
+{
+ idx &= 7;
+ mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr));
+ mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR,
+ get_unaligned_le16(addr + 4));
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
new file mode 100644
index 000000000..0cfbaca50
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ */
+
+#ifndef __MT76X02_MAC_H
+#define __MT76X02_MAC_H
+
+struct mt76x02_dev;
+
+struct mt76x02_tx_status {
+ u8 valid:1;
+ u8 success:1;
+ u8 aggr:1;
+ u8 ack_req:1;
+ u8 wcid;
+ u8 pktid;
+ u8 retry;
+ u16 rate;
+} __packed __aligned(2);
+
+#define MT_VIF_WCID(_n) (254 - ((_n) & 7))
+#define MT_MAX_VIFS 8
+
+#define MT_PKTID_RATE GENMASK(4, 0)
+#define MT_PKTID_AC GENMASK(6, 5)
+
+struct mt76x02_vif {
+ struct mt76_wcid group_wcid; /* must be first */
+ u8 idx;
+};
+
+DECLARE_EWMA(pktlen, 8, 8);
+
+struct mt76x02_sta {
+ struct mt76_wcid wcid; /* must be first */
+
+ struct mt76x02_vif *vif;
+ struct mt76x02_tx_status status;
+ int n_frames;
+
+ struct ewma_pktlen pktlen;
+};
+
+#define MT_RXINFO_BA BIT(0)
+#define MT_RXINFO_DATA BIT(1)
+#define MT_RXINFO_NULL BIT(2)
+#define MT_RXINFO_FRAG BIT(3)
+#define MT_RXINFO_UNICAST BIT(4)
+#define MT_RXINFO_MULTICAST BIT(5)
+#define MT_RXINFO_BROADCAST BIT(6)
+#define MT_RXINFO_MYBSS BIT(7)
+#define MT_RXINFO_CRCERR BIT(8)
+#define MT_RXINFO_ICVERR BIT(9)
+#define MT_RXINFO_MICERR BIT(10)
+#define MT_RXINFO_AMSDU BIT(11)
+#define MT_RXINFO_HTC BIT(12)
+#define MT_RXINFO_RSSI BIT(13)
+#define MT_RXINFO_L2PAD BIT(14)
+#define MT_RXINFO_AMPDU BIT(15)
+#define MT_RXINFO_DECRYPT BIT(16)
+#define MT_RXINFO_BSSIDX3 BIT(17)
+#define MT_RXINFO_WAPI_KEY BIT(18)
+#define MT_RXINFO_PN_LEN GENMASK(21, 19)
+#define MT_RXINFO_SW_FTYPE0 BIT(22)
+#define MT_RXINFO_SW_FTYPE1 BIT(23)
+#define MT_RXINFO_PROBE_RESP BIT(24)
+#define MT_RXINFO_BEACON BIT(25)
+#define MT_RXINFO_DISASSOC BIT(26)
+#define MT_RXINFO_DEAUTH BIT(27)
+#define MT_RXINFO_ACTION BIT(28)
+#define MT_RXINFO_TCP_SUM_ERR BIT(30)
+#define MT_RXINFO_IP_SUM_ERR BIT(31)
+
+#define MT_RXWI_CTL_WCID GENMASK(7, 0)
+#define MT_RXWI_CTL_KEY_IDX GENMASK(9, 8)
+#define MT_RXWI_CTL_BSS_IDX GENMASK(12, 10)
+#define MT_RXWI_CTL_UDF GENMASK(15, 13)
+#define MT_RXWI_CTL_MPDU_LEN GENMASK(29, 16)
+#define MT_RXWI_CTL_EOF BIT(31)
+
+#define MT_RXWI_TID GENMASK(3, 0)
+#define MT_RXWI_SN GENMASK(15, 4)
+
+#define MT_RXWI_RATE_INDEX GENMASK(5, 0)
+#define MT_RXWI_RATE_LDPC BIT(6)
+#define MT_RXWI_RATE_BW GENMASK(8, 7)
+#define MT_RXWI_RATE_SGI BIT(9)
+#define MT_RXWI_RATE_STBC BIT(10)
+#define MT_RXWI_RATE_LDPC_EXSYM BIT(11)
+#define MT_RXWI_RATE_PHY GENMASK(15, 13)
+
+#define MT_RATE_INDEX_VHT_IDX GENMASK(3, 0)
+#define MT_RATE_INDEX_VHT_NSS GENMASK(5, 4)
+
+struct mt76x02_rxwi {
+ __le32 rxinfo;
+
+ __le32 ctl;
+
+ __le16 tid_sn;
+ __le16 rate;
+
+ u8 rssi[4];
+
+ __le32 bbp_rxinfo[4];
+};
+
+#define MT_TX_PWR_ADJ GENMASK(3, 0)
+
+enum mt76x2_phy_bandwidth {
+ MT_PHY_BW_20,
+ MT_PHY_BW_40,
+ MT_PHY_BW_80,
+};
+
+#define MT_TXWI_FLAGS_FRAG BIT(0)
+#define MT_TXWI_FLAGS_MMPS BIT(1)
+#define MT_TXWI_FLAGS_CFACK BIT(2)
+#define MT_TXWI_FLAGS_TS BIT(3)
+#define MT_TXWI_FLAGS_AMPDU BIT(4)
+#define MT_TXWI_FLAGS_MPDU_DENSITY GENMASK(7, 5)
+#define MT_TXWI_FLAGS_TXOP GENMASK(9, 8)
+#define MT_TXWI_FLAGS_NDPS BIT(10)
+#define MT_TXWI_FLAGS_RTSBWSIG BIT(11)
+#define MT_TXWI_FLAGS_NDP_BW GENMASK(13, 12)
+#define MT_TXWI_FLAGS_SOUND BIT(14)
+#define MT_TXWI_FLAGS_TX_RATE_LUT BIT(15)
+
+#define MT_TXWI_ACK_CTL_REQ BIT(0)
+#define MT_TXWI_ACK_CTL_NSEQ BIT(1)
+#define MT_TXWI_ACK_CTL_BA_WINDOW GENMASK(7, 2)
+
+struct mt76x02_txwi {
+ __le16 flags;
+ __le16 rate;
+ u8 ack_ctl;
+ u8 wcid;
+ __le16 len_ctl;
+ __le32 iv;
+ __le32 eiv;
+ u8 aid;
+ u8 txstream;
+ u8 ctl2;
+ u8 pktid;
+} __packed __aligned(4);
+
+static inline bool mt76x02_wait_for_mac(struct mt76_dev *dev)
+{
+ const u32 MAC_CSR0 = 0x1000;
+ int i;
+
+ for (i = 0; i < 500; i++) {
+ if (test_bit(MT76_REMOVED, &dev->phy.state))
+ return false;
+
+ switch (dev->bus->rr(dev, MAC_CSR0)) {
+ case 0:
+ case ~0:
+ break;
+ default:
+ return true;
+ }
+ usleep_range(5000, 10000);
+ }
+ return false;
+}
+
+void mt76x02_mac_reset_counters(struct mt76x02_dev *dev);
+void mt76x02_mac_set_short_preamble(struct mt76x02_dev *dev, bool enable);
+int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
+ u8 key_idx, struct ieee80211_key_conf *key);
+int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key);
+void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key);
+void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, u8 vif_idx,
+ u8 *mac);
+void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop);
+void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate);
+bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev,
+ struct mt76x02_tx_status *stat);
+void mt76x02_send_tx_status(struct mt76x02_dev *dev,
+ struct mt76x02_tx_status *stat, u8 *update);
+int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
+ void *rxi);
+void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, bool legacy_prot,
+ int ht_mode);
+void mt76x02_mac_set_rts_thresh(struct mt76x02_dev *dev, u32 val);
+void mt76x02_mac_setaddr(struct mt76x02_dev *dev, const u8 *addr);
+void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta, int len);
+void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq);
+void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
+void mt76x02_update_channel(struct mt76_dev *mdev);
+void mt76x02_mac_work(struct work_struct *work);
+
+void mt76x02_mac_cc_reset(struct mt76x02_dev *dev);
+void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr);
+void mt76x02_mac_set_beacon(struct mt76x02_dev *dev, struct sk_buff *skb);
+void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
+ struct ieee80211_vif *vif, bool enable);
+
+void mt76x02_edcca_init(struct mt76x02_dev *dev);
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
new file mode 100644
index 000000000..267058086
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+
+#include "mt76x02_mcu.h"
+
+int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
+ int len, bool wait_resp)
+{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ unsigned long expires = jiffies + HZ;
+ struct sk_buff *skb;
+ u32 tx_info;
+ int ret;
+ u8 seq;
+
+ if (dev->mcu_timeout)
+ return -EIO;
+
+ skb = mt76_mcu_msg_alloc(mdev, data, len);
+ if (!skb)
+ return -ENOMEM;
+
+ mutex_lock(&mdev->mcu.mutex);
+
+ seq = ++mdev->mcu.msg_seq & 0xf;
+ if (!seq)
+ seq = ++mdev->mcu.msg_seq & 0xf;
+
+ tx_info = MT_MCU_MSG_TYPE_CMD |
+ FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
+ FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
+ FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
+ FIELD_PREP(MT_MCU_MSG_LEN, skb->len);
+
+ ret = mt76_tx_queue_skb_raw(dev, MT_TXQ_MCU, skb, tx_info);
+ if (ret)
+ goto out;
+
+ while (wait_resp) {
+ u32 *rxfce;
+ bool check_seq = false;
+
+ skb = mt76_mcu_get_response(&dev->mt76, expires);
+ if (!skb) {
+ dev_err(mdev->dev,
+ "MCU message %d (seq %d) timed out\n", cmd,
+ seq);
+ ret = -ETIMEDOUT;
+ dev->mcu_timeout = 1;
+ break;
+ }
+
+ rxfce = (u32 *)skb->cb;
+
+ if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce))
+ check_seq = true;
+
+ dev_kfree_skb(skb);
+ if (check_seq)
+ break;
+ }
+
+out:
+ mutex_unlock(&mdev->mcu.mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_send);
+
+int mt76x02_mcu_function_select(struct mt76x02_dev *dev, enum mcu_function func,
+ u32 val)
+{
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(func),
+ .value = cpu_to_le32(val),
+ };
+ bool wait = false;
+
+ if (func != Q_SELECT)
+ wait = true;
+
+ return mt76_mcu_send_msg(dev, CMD_FUN_SET_OP, &msg, sizeof(msg), wait);
+}
+EXPORT_SYMBOL_GPL(mt76x02_mcu_function_select);
+
+int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on)
+{
+ struct {
+ __le32 mode;
+ __le32 level;
+ } __packed __aligned(4) msg = {
+ .mode = cpu_to_le32(on ? RADIO_ON : RADIO_OFF),
+ .level = cpu_to_le32(0),
+ };
+
+ return mt76_mcu_send_msg(dev, CMD_POWER_SAVING_OP, &msg, sizeof(msg),
+ false);
+}
+EXPORT_SYMBOL_GPL(mt76x02_mcu_set_radio_state);
+
+int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, u32 param)
+{
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(type),
+ .value = cpu_to_le32(param),
+ };
+ bool is_mt76x2e = mt76_is_mmio(&dev->mt76) && is_mt76x2(dev);
+ int ret;
+
+ if (is_mt76x2e)
+ mt76_rmw(dev, MT_MCU_COM_REG0, BIT(31), 0);
+
+ ret = mt76_mcu_send_msg(dev, CMD_CALIBRATION_OP, &msg, sizeof(msg),
+ true);
+ if (ret)
+ return ret;
+
+ if (is_mt76x2e &&
+ WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0,
+ BIT(31), BIT(31), 100)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_mcu_calibrate);
+
+int mt76x02_mcu_cleanup(struct mt76x02_dev *dev)
+{
+ struct sk_buff *skb;
+
+ mt76_wr(dev, MT_MCU_INT_LEVEL, 1);
+ usleep_range(20000, 30000);
+
+ while ((skb = skb_dequeue(&dev->mt76.mcu.res_q)) != NULL)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_mcu_cleanup);
+
+void mt76x02_set_ethtool_fwver(struct mt76x02_dev *dev,
+ const struct mt76x02_fw_header *h)
+{
+ u16 bld = le16_to_cpu(h->build_ver);
+ u16 ver = le16_to_cpu(h->fw_ver);
+
+ snprintf(dev->mt76.hw->wiphy->fw_version,
+ sizeof(dev->mt76.hw->wiphy->fw_version),
+ "%d.%d.%02d-b%x",
+ (ver >> 12) & 0xf, (ver >> 8) & 0xf, ver & 0xf, bld);
+}
+EXPORT_SYMBOL_GPL(mt76x02_set_ethtool_fwver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h
new file mode 100644
index 000000000..5fba1266c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#ifndef __MT76x02_MCU_H
+#define __MT76x02_MCU_H
+
+#include "mt76x02.h"
+
+#define MT_MCU_RESET_CTL 0x070C
+#define MT_MCU_INT_LEVEL 0x0718
+#define MT_MCU_COM_REG0 0x0730
+#define MT_MCU_COM_REG1 0x0734
+#define MT_MCU_COM_REG2 0x0738
+#define MT_MCU_COM_REG3 0x073C
+
+#define MT_INBAND_PACKET_MAX_LEN 192
+#define MT_MCU_MEMMAP_WLAN 0x410000
+
+#define MT_MCU_PCIE_REMAP_BASE4 0x074C
+
+#define MT_MCU_SEMAPHORE_00 0x07B0
+#define MT_MCU_SEMAPHORE_01 0x07B4
+#define MT_MCU_SEMAPHORE_02 0x07B8
+#define MT_MCU_SEMAPHORE_03 0x07BC
+
+#define MT_MCU_ILM_ADDR 0x80000
+
+enum mcu_cmd {
+ CMD_FUN_SET_OP = 1,
+ CMD_LOAD_CR = 2,
+ CMD_INIT_GAIN_OP = 3,
+ CMD_DYNC_VGA_OP = 6,
+ CMD_TDLS_CH_SW = 7,
+ CMD_BURST_WRITE = 8,
+ CMD_READ_MODIFY_WRITE = 9,
+ CMD_RANDOM_READ = 10,
+ CMD_BURST_READ = 11,
+ CMD_RANDOM_WRITE = 12,
+ CMD_LED_MODE_OP = 16,
+ CMD_POWER_SAVING_OP = 20,
+ CMD_WOW_CONFIG = 21,
+ CMD_WOW_QUERY = 22,
+ CMD_WOW_FEATURE = 24,
+ CMD_CARRIER_DETECT_OP = 28,
+ CMD_RADOR_DETECT_OP = 29,
+ CMD_SWITCH_CHANNEL_OP = 30,
+ CMD_CALIBRATION_OP = 31,
+ CMD_BEACON_OP = 32,
+ CMD_ANTENNA_OP = 33,
+};
+
+enum mcu_power_mode {
+ RADIO_OFF = 0x30,
+ RADIO_ON = 0x31,
+ RADIO_OFF_AUTO_WAKEUP = 0x32,
+ RADIO_OFF_ADVANCE = 0x33,
+ RADIO_ON_ADVANCE = 0x34,
+};
+
+enum mcu_function {
+ Q_SELECT = 1,
+ BW_SETTING = 2,
+ USB2_SW_DISCONNECT = 2,
+ USB3_SW_DISCONNECT = 3,
+ LOG_FW_DEBUG_MSG = 4,
+ GET_FW_VERSION = 5,
+};
+
+struct mt76x02_fw_header {
+ __le32 ilm_len;
+ __le32 dlm_len;
+ __le16 build_ver;
+ __le16 fw_ver;
+ u8 pad[4];
+ char build_time[16];
+};
+
+struct mt76x02_patch_header {
+ char build_time[16];
+ char platform[4];
+ char hw_version[4];
+ char patch_version[4];
+ u8 pad[2];
+};
+
+int mt76x02_mcu_cleanup(struct mt76x02_dev *dev);
+int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, u32 param);
+int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
+ int len, bool wait_resp);
+int mt76x02_mcu_function_select(struct mt76x02_dev *dev, enum mcu_function func,
+ u32 val);
+int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on);
+void mt76x02_set_ethtool_fwver(struct mt76x02_dev *dev,
+ const struct mt76x02_fw_header *h);
+
+#endif /* __MT76x02_MCU_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
new file mode 100644
index 000000000..cf68731bd
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -0,0 +1,569 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+
+#include "mt76x02.h"
+#include "mt76x02_mcu.h"
+#include "trace.h"
+
+static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
+{
+ struct mt76x02_dev *dev = (struct mt76x02_dev *)arg;
+ struct mt76_queue *q = dev->mt76.q_tx[MT_TXQ_PSD];
+ struct beacon_bc_data data = {};
+ struct sk_buff *skb;
+ int i;
+
+ if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ return;
+
+ mt76x02_resync_beacon_timer(dev);
+
+ /* Prevent corrupt transmissions during update */
+ mt76_set(dev, MT_BCN_BYPASS_MASK, 0xffff);
+ dev->beacon_data_count = 0;
+
+ ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt76x02_update_beacon_iter, dev);
+
+ mt76_wr(dev, MT_BCN_BYPASS_MASK,
+ 0xff00 | ~(0xff00 >> dev->beacon_data_count));
+
+ mt76_csa_check(&dev->mt76);
+
+ if (dev->mt76.csa_complete)
+ return;
+
+ mt76x02_enqueue_buffered_bc(dev, &data, 8);
+
+ if (!skb_queue_len(&data.q))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
+ if (!data.tail[i])
+ continue;
+
+ mt76_skb_set_moredata(data.tail[i], false);
+ }
+
+ spin_lock_bh(&q->lock);
+ while ((skb = __skb_dequeue(&data.q)) != NULL) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+
+ mt76_tx_queue_skb(dev, MT_TXQ_PSD, skb, &mvif->group_wcid,
+ NULL);
+ }
+ spin_unlock_bh(&q->lock);
+}
+
+static void mt76x02e_pre_tbtt_enable(struct mt76x02_dev *dev, bool en)
+{
+ if (en)
+ tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
+ else
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
+}
+
+static void mt76x02e_beacon_enable(struct mt76x02_dev *dev, bool en)
+{
+ mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
+ if (en)
+ mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
+ else
+ mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
+}
+
+void mt76x02e_init_beacon_config(struct mt76x02_dev *dev)
+{
+ static const struct mt76x02_beacon_ops beacon_ops = {
+ .nslots = 8,
+ .slot_size = 1024,
+ .pre_tbtt_enable = mt76x02e_pre_tbtt_enable,
+ .beacon_enable = mt76x02e_beacon_enable,
+ };
+
+ dev->beacon_ops = &beacon_ops;
+
+ /* Fire a pre-TBTT interrupt 8 ms before TBTT */
+ mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT,
+ 8 << 4);
+ mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
+ MT_DFS_GP_INTERVAL);
+ mt76_wr(dev, MT_INT_TIMER_EN, 0);
+
+ mt76x02_init_beacon_config(dev);
+}
+EXPORT_SYMBOL_GPL(mt76x02e_init_beacon_config);
+
+static int
+mt76x02_init_tx_queue(struct mt76x02_dev *dev, int qid, int idx, int n_desc)
+{
+ struct mt76_queue *hwq;
+ int err;
+
+ hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
+ if (!hwq)
+ return -ENOMEM;
+
+ err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
+ if (err < 0)
+ return err;
+
+ dev->mt76.q_tx[qid] = hwq;
+
+ mt76x02_irq_enable(dev, MT_INT_TX_DONE(idx));
+
+ return 0;
+}
+
+static int
+mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
+ int idx, int n_desc, int bufsize)
+{
+ int err;
+
+ err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize,
+ MT_RX_RING_BASE);
+ if (err < 0)
+ return err;
+
+ mt76x02_irq_enable(dev, MT_INT_RX_DONE(idx));
+
+ return 0;
+}
+
+static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev)
+{
+ struct mt76x02_tx_status stat;
+ u8 update = 1;
+
+ while (kfifo_get(&dev->txstatus_fifo, &stat))
+ mt76x02_send_tx_status(dev, &stat, &update);
+}
+
+static void mt76x02_tx_worker(struct mt76_worker *w)
+{
+ struct mt76x02_dev *dev;
+
+ dev = container_of(w, struct mt76x02_dev, mt76.tx_worker);
+
+ mt76x02_mac_poll_tx_status(dev, false);
+ mt76x02_process_tx_status_fifo(dev);
+
+ mt76_txq_schedule_all(&dev->mphy);
+}
+
+static int mt76x02_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct mt76x02_dev *dev = container_of(napi, struct mt76x02_dev,
+ mt76.tx_napi);
+ int i;
+
+ mt76x02_mac_poll_tx_status(dev, false);
+
+ for (i = MT_TXQ_MCU; i >= 0; i--)
+ mt76_queue_tx_cleanup(dev, i, false);
+
+ if (napi_complete_done(napi, 0))
+ mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL);
+
+ for (i = MT_TXQ_MCU; i >= 0; i--)
+ mt76_queue_tx_cleanup(dev, i, false);
+
+ mt76_worker_schedule(&dev->mt76.tx_worker);
+
+ return 0;
+}
+
+int mt76x02_dma_init(struct mt76x02_dev *dev)
+{
+ struct mt76_txwi_cache __maybe_unused *t;
+ int i, ret, fifo_size;
+ struct mt76_queue *q;
+ void *status_fifo;
+
+ BUILD_BUG_ON(sizeof(struct mt76x02_rxwi) > MT_RX_HEADROOM);
+
+ fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status));
+ status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL);
+ if (!status_fifo)
+ return -ENOMEM;
+
+ dev->mt76.tx_worker.fn = mt76x02_tx_worker;
+ tasklet_init(&dev->mt76.pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet,
+ (unsigned long)dev);
+
+ spin_lock_init(&dev->txstatus_fifo_lock);
+ kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
+
+ mt76_dma_attach(&dev->mt76);
+
+ mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ ret = mt76x02_init_tx_queue(dev, i, mt76_ac_to_hwq(i),
+ MT76x02_TX_RING_SIZE);
+ if (ret)
+ return ret;
+ }
+
+ ret = mt76x02_init_tx_queue(dev, MT_TXQ_PSD,
+ MT_TX_HW_QUEUE_MGMT, MT76x02_PSD_RING_SIZE);
+ if (ret)
+ return ret;
+
+ ret = mt76x02_init_tx_queue(dev, MT_TXQ_MCU,
+ MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
+ if (ret)
+ return ret;
+
+ ret = mt76x02_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
+ MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
+ if (ret)
+ return ret;
+
+ q = &dev->mt76.q_rx[MT_RXQ_MAIN];
+ q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x02_rxwi);
+ ret = mt76x02_init_rx_queue(dev, q, 0, MT76X02_RX_RING_SIZE,
+ MT_RX_BUF_SIZE);
+ if (ret)
+ return ret;
+
+ ret = mt76_init_queues(dev);
+ if (ret)
+ return ret;
+
+ netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi,
+ mt76x02_poll_tx, NAPI_POLL_WEIGHT);
+ napi_enable(&dev->mt76.tx_napi);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_dma_init);
+
+void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
+{
+ struct mt76x02_dev *dev;
+
+ dev = container_of(mdev, struct mt76x02_dev, mt76);
+ mt76x02_irq_enable(dev, MT_INT_RX_DONE(q));
+}
+EXPORT_SYMBOL_GPL(mt76x02_rx_poll_complete);
+
+irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
+{
+ struct mt76x02_dev *dev = dev_instance;
+ u32 intr, mask;
+
+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ intr &= dev->mt76.mmio.irqmask;
+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+ return IRQ_NONE;
+
+ trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
+
+ mask = intr & (MT_INT_RX_DONE_ALL | MT_INT_GPTIMER);
+ if (intr & (MT_INT_TX_DONE_ALL | MT_INT_TX_STAT))
+ mask |= MT_INT_TX_DONE_ALL;
+
+ mt76x02_irq_disable(dev, mask);
+
+ if (intr & MT_INT_RX_DONE(0))
+ napi_schedule(&dev->mt76.napi[0]);
+
+ if (intr & MT_INT_RX_DONE(1))
+ napi_schedule(&dev->mt76.napi[1]);
+
+ if (intr & MT_INT_PRE_TBTT)
+ tasklet_schedule(&dev->mt76.pre_tbtt_tasklet);
+
+ /* send buffered multicast frames now */
+ if (intr & MT_INT_TBTT) {
+ if (dev->mt76.csa_complete)
+ mt76_csa_finish(&dev->mt76);
+ else
+ mt76_queue_kick(dev, dev->mt76.q_tx[MT_TXQ_PSD]);
+ }
+
+ if (intr & MT_INT_TX_STAT)
+ mt76x02_mac_poll_tx_status(dev, true);
+
+ if (intr & (MT_INT_TX_STAT | MT_INT_TX_DONE_ALL))
+ napi_schedule(&dev->mt76.tx_napi);
+
+ if (intr & MT_INT_GPTIMER)
+ tasklet_schedule(&dev->dfs_pd.dfs_tasklet);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(mt76x02_irq_handler);
+
+static void mt76x02_dma_enable(struct mt76x02_dev *dev)
+{
+ u32 val;
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+ mt76x02_wait_for_wpdma(&dev->mt76, 1000);
+ usleep_range(50, 100);
+
+ val = FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) |
+ MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_RX_DMA_EN;
+ mt76_set(dev, MT_WPDMA_GLO_CFG, val);
+ mt76_clear(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+}
+
+void mt76x02_dma_disable(struct mt76x02_dev *dev)
+{
+ u32 val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
+
+ val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
+ MT_WPDMA_GLO_CFG_BIG_ENDIAN |
+ MT_WPDMA_GLO_CFG_HDR_SEG_LEN;
+ val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE;
+ mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
+}
+EXPORT_SYMBOL_GPL(mt76x02_dma_disable);
+
+void mt76x02_mac_start(struct mt76x02_dev *dev)
+{
+ mt76x02_mac_reset_counters(dev);
+ mt76x02_dma_enable(dev);
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX |
+ MT_MAC_SYS_CTRL_ENABLE_RX);
+ mt76x02_irq_enable(dev,
+ MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
+ MT_INT_TX_STAT);
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_start);
+
+static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
+{
+ u32 dma_idx, prev_dma_idx;
+ struct mt76_queue *q;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ q = dev->mt76.q_tx[i];
+
+ if (!q->queued)
+ continue;
+
+ prev_dma_idx = dev->mt76.tx_dma_idx[i];
+ dma_idx = readl(&q->regs->dma_idx);
+ dev->mt76.tx_dma_idx[i] = dma_idx;
+
+ if (prev_dma_idx == dma_idx)
+ break;
+ }
+
+ return i < 4;
+}
+
+static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key, void *data)
+{
+ struct mt76x02_dev *dev = hw->priv;
+ struct mt76_wcid *wcid;
+
+ if (!sta)
+ return;
+
+ wcid = (struct mt76_wcid *)sta->drv_priv;
+
+ if (wcid->hw_key_idx != key->keyidx || wcid->sw_iv)
+ return;
+
+ mt76x02_mac_wcid_sync_pn(dev, wcid->idx, key);
+}
+
+static void mt76x02_reset_state(struct mt76x02_dev *dev)
+{
+ int i;
+
+ lockdep_assert_held(&dev->mt76.mutex);
+
+ clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
+
+ rcu_read_lock();
+ ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL);
+ rcu_read_unlock();
+
+ for (i = 0; i < MT76x02_N_WCIDS; i++) {
+ struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif;
+ struct mt76x02_sta *msta;
+ struct mt76_wcid *wcid;
+ void *priv;
+
+ wcid = rcu_dereference_protected(dev->mt76.wcid[i],
+ lockdep_is_held(&dev->mt76.mutex));
+ if (!wcid)
+ continue;
+
+ rcu_assign_pointer(dev->mt76.wcid[i], NULL);
+
+ priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
+ sta = container_of(priv, struct ieee80211_sta, drv_priv);
+
+ priv = msta->vif;
+ vif = container_of(priv, struct ieee80211_vif, drv_priv);
+
+ __mt76_sta_remove(&dev->mt76, vif, sta);
+ memset(msta, 0, sizeof(*msta));
+ }
+
+ dev->mphy.vif_mask = 0;
+ dev->mt76.beacon_mask = 0;
+}
+
+static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
+{
+ u32 mask = dev->mt76.mmio.irqmask;
+ bool restart = dev->mt76.mcu_ops->mcu_restart;
+ int i;
+
+ ieee80211_stop_queues(dev->mt76.hw);
+ set_bit(MT76_RESET, &dev->mphy.state);
+
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
+ mt76_worker_disable(&dev->mt76.tx_worker);
+ napi_disable(&dev->mt76.tx_napi);
+
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ napi_disable(&dev->mt76.napi[i]);
+ }
+
+ mutex_lock(&dev->mt76.mutex);
+
+ dev->mcu_timeout = 0;
+ if (restart)
+ mt76x02_reset_state(dev);
+
+ if (dev->mt76.beacon_mask)
+ mt76_clear(dev, MT_BEACON_TIME_CFG,
+ MT_BEACON_TIME_CFG_BEACON_TX |
+ MT_BEACON_TIME_CFG_TBTT_EN);
+
+ mt76x02_irq_disable(dev, mask);
+
+ /* perform device reset */
+ mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
+ mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
+ mt76_clear(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN);
+ usleep_range(5000, 10000);
+ mt76_wr(dev, MT_INT_SOURCE_CSR, 0xffffffff);
+
+ /* let fw reset DMA */
+ mt76_set(dev, 0x734, 0x3);
+
+ if (restart)
+ mt76_mcu_restart(dev);
+
+ for (i = 0; i < __MT_TXQ_MAX; i++)
+ mt76_queue_tx_cleanup(dev, i, true);
+
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ mt76_queue_rx_reset(dev, i);
+ }
+
+ mt76x02_mac_start(dev);
+
+ if (dev->ed_monitor)
+ mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
+
+ if (dev->mt76.beacon_mask && !restart)
+ mt76_set(dev, MT_BEACON_TIME_CFG,
+ MT_BEACON_TIME_CFG_BEACON_TX |
+ MT_BEACON_TIME_CFG_TBTT_EN);
+
+ mt76x02_irq_enable(dev, mask);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ clear_bit(MT76_RESET, &dev->mphy.state);
+
+ mt76_worker_enable(&dev->mt76.tx_worker);
+ napi_enable(&dev->mt76.tx_napi);
+ napi_schedule(&dev->mt76.tx_napi);
+
+ tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
+
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ napi_enable(&dev->mt76.napi[i]);
+ napi_schedule(&dev->mt76.napi[i]);
+ }
+
+ if (restart) {
+ set_bit(MT76_RESTART, &dev->mphy.state);
+ mt76x02_mcu_function_select(dev, Q_SELECT, 1);
+ ieee80211_restart_hw(dev->mt76.hw);
+ } else {
+ ieee80211_wake_queues(dev->mt76.hw);
+ mt76_txq_schedule_all(&dev->mphy);
+ }
+}
+
+void mt76x02_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct mt76x02_dev *dev = hw->priv;
+
+ if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
+ return;
+
+ clear_bit(MT76_RESTART, &dev->mphy.state);
+}
+EXPORT_SYMBOL_GPL(mt76x02_reconfig_complete);
+
+static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
+{
+ if (test_bit(MT76_RESTART, &dev->mphy.state))
+ return;
+
+ if (mt76x02_tx_hang(dev)) {
+ if (++dev->tx_hang_check >= MT_TX_HANG_TH)
+ goto restart;
+ } else {
+ dev->tx_hang_check = 0;
+ }
+
+ if (dev->mcu_timeout)
+ goto restart;
+
+ return;
+
+restart:
+ mt76x02_watchdog_reset(dev);
+
+ dev->tx_hang_reset++;
+ dev->tx_hang_check = 0;
+ memset(dev->mt76.tx_dma_idx, 0xff,
+ sizeof(dev->mt76.tx_dma_idx));
+}
+
+void mt76x02_wdt_work(struct work_struct *work)
+{
+ struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
+ wdt_work.work);
+
+ mt76x02_check_tx_hang(dev);
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work,
+ MT_WATCHDOG_TIME);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
new file mode 100644
index 000000000..aaadc15ea
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include <linux/kernel.h>
+
+#include "mt76x02.h"
+#include "mt76x02_phy.h"
+
+void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_BBP(AGC, 0));
+ val &= ~BIT(4);
+
+ switch (dev->chainmask & 0xf) {
+ case 2:
+ val |= BIT(3);
+ break;
+ default:
+ val &= ~BIT(3);
+ break;
+ }
+
+ mt76_wr(dev, MT_BBP(AGC, 0), val);
+ mb();
+ val = mt76_rr(dev, MT_BBP(AGC, 0));
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_set_rxpath);
+
+void mt76x02_phy_set_txdac(struct mt76x02_dev *dev)
+{
+ int txpath;
+
+ txpath = (dev->chainmask >> 8) & 0xf;
+ switch (txpath) {
+ case 2:
+ mt76_set(dev, MT_BBP(TXBE, 5), 0x3);
+ break;
+ default:
+ mt76_clear(dev, MT_BBP(TXBE, 5), 0x3);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_set_txdac);
+
+static u32
+mt76x02_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
+{
+ u32 val = 0;
+
+ val |= (v1 & (BIT(6) - 1)) << 0;
+ val |= (v2 & (BIT(6) - 1)) << 8;
+ val |= (v3 & (BIT(6) - 1)) << 16;
+ val |= (v4 & (BIT(6) - 1)) << 24;
+ return val;
+}
+
+int mt76x02_get_max_rate_power(struct mt76_rate_power *r)
+{
+ s8 ret = 0;
+ int i;
+
+ for (i = 0; i < sizeof(r->all); i++)
+ ret = max(ret, r->all[i]);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76x02_get_max_rate_power);
+
+void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit)
+{
+ int i;
+
+ for (i = 0; i < sizeof(r->all); i++)
+ if (r->all[i] > limit)
+ r->all[i] = limit;
+}
+EXPORT_SYMBOL_GPL(mt76x02_limit_rate_power);
+
+void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset)
+{
+ int i;
+
+ for (i = 0; i < sizeof(r->all); i++)
+ r->all[i] += offset;
+}
+EXPORT_SYMBOL_GPL(mt76x02_add_rate_power_offset);
+
+void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_1)
+{
+ struct mt76_rate_power *t = &dev->mt76.rate_power;
+
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
+
+ mt76_wr(dev, MT_TX_PWR_CFG_0,
+ mt76x02_tx_power_mask(t->cck[0], t->cck[2], t->ofdm[0],
+ t->ofdm[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_1,
+ mt76x02_tx_power_mask(t->ofdm[4], t->ofdm[6], t->ht[0],
+ t->ht[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_2,
+ mt76x02_tx_power_mask(t->ht[4], t->ht[6], t->ht[8],
+ t->ht[10]));
+ mt76_wr(dev, MT_TX_PWR_CFG_3,
+ mt76x02_tx_power_mask(t->ht[12], t->ht[14], t->stbc[0],
+ t->stbc[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_4,
+ mt76x02_tx_power_mask(t->stbc[4], t->stbc[6], 0, 0));
+ mt76_wr(dev, MT_TX_PWR_CFG_7,
+ mt76x02_tx_power_mask(t->ofdm[7], t->vht[8], t->ht[7],
+ t->vht[9]));
+ mt76_wr(dev, MT_TX_PWR_CFG_8,
+ mt76x02_tx_power_mask(t->ht[14], 0, t->vht[8], t->vht[9]));
+ mt76_wr(dev, MT_TX_PWR_CFG_9,
+ mt76x02_tx_power_mask(t->ht[7], 0, t->stbc[8], t->stbc[9]));
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_set_txpower);
+
+void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl)
+{
+ int core_val, agc_val;
+
+ switch (width) {
+ case NL80211_CHAN_WIDTH_80:
+ core_val = 3;
+ agc_val = 7;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ core_val = 2;
+ agc_val = 3;
+ break;
+ default:
+ core_val = 0;
+ agc_val = 1;
+ break;
+ }
+
+ mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
+ mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_set_bw);
+
+void mt76x02_phy_set_band(struct mt76x02_dev *dev, int band,
+ bool primary_upper)
+{
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+ break;
+ case NL80211_BAND_5GHZ:
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+ break;
+ }
+
+ mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
+ primary_upper);
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_set_band);
+
+bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev)
+{
+ u8 limit = dev->cal.low_gain > 0 ? 16 : 4;
+ bool ret = false;
+ u32 false_cca;
+
+ false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS,
+ mt76_rr(dev, MT_RX_STAT_1));
+ dev->cal.false_cca = false_cca;
+ if (false_cca > 800 && dev->cal.agc_gain_adjust < limit) {
+ dev->cal.agc_gain_adjust += 2;
+ ret = true;
+ } else if ((false_cca < 10 && dev->cal.agc_gain_adjust > 0) ||
+ (dev->cal.agc_gain_adjust >= limit && false_cca < 500)) {
+ dev->cal.agc_gain_adjust -= 2;
+ ret = true;
+ }
+
+ dev->cal.agc_lowest_gain = dev->cal.agc_gain_adjust >= limit;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_adjust_vga_gain);
+
+void mt76x02_init_agc_gain(struct mt76x02_dev *dev)
+{
+ dev->cal.agc_gain_init[0] = mt76_get_field(dev, MT_BBP(AGC, 8),
+ MT_BBP_AGC_GAIN);
+ dev->cal.agc_gain_init[1] = mt76_get_field(dev, MT_BBP(AGC, 9),
+ MT_BBP_AGC_GAIN);
+ memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init,
+ sizeof(dev->cal.agc_gain_cur));
+ dev->cal.low_gain = -1;
+ dev->cal.gain_init_done = true;
+}
+EXPORT_SYMBOL_GPL(mt76x02_init_agc_gain);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
new file mode 100644
index 000000000..1def25bf7
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#ifndef __MT76x02_PHY_H
+#define __MT76x02_PHY_H
+
+#include "mt76x02_regs.h"
+
+static inline int
+mt76x02_get_rssi_gain_thresh(struct mt76x02_dev *dev)
+{
+ switch (dev->mphy.chandef.width) {
+ case NL80211_CHAN_WIDTH_80:
+ return -62;
+ case NL80211_CHAN_WIDTH_40:
+ return -65;
+ default:
+ return -68;
+ }
+}
+
+static inline int
+mt76x02_get_low_rssi_gain_thresh(struct mt76x02_dev *dev)
+{
+ switch (dev->mphy.chandef.width) {
+ case NL80211_CHAN_WIDTH_80:
+ return -76;
+ case NL80211_CHAN_WIDTH_40:
+ return -79;
+ default:
+ return -82;
+ }
+}
+
+void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset);
+void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_2);
+void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit);
+int mt76x02_get_max_rate_power(struct mt76_rate_power *r);
+void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev);
+void mt76x02_phy_set_txdac(struct mt76x02_dev *dev);
+void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl);
+void mt76x02_phy_set_band(struct mt76x02_dev *dev, int band,
+ bool primary_upper);
+bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev);
+void mt76x02_init_agc_gain(struct mt76x02_dev *dev);
+
+#endif /* __MT76x02_PHY_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
new file mode 100644
index 000000000..3e722276b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
@@ -0,0 +1,706 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#ifndef __MT76X02_REGS_H
+#define __MT76X02_REGS_H
+
+#define MT_ASIC_VERSION 0x0000
+
+#define MT76XX_REV_E3 0x22
+#define MT76XX_REV_E4 0x33
+
+#define MT_CMB_CTRL 0x0020
+#define MT_CMB_CTRL_XTAL_RDY BIT(22)
+#define MT_CMB_CTRL_PLL_LD BIT(23)
+
+#define MT_EFUSE_CTRL 0x0024
+#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0)
+#define MT_EFUSE_CTRL_MODE GENMASK(7, 6)
+#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8)
+#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14)
+#define MT_EFUSE_CTRL_AIN GENMASK(25, 16)
+#define MT_EFUSE_CTRL_KICK BIT(30)
+#define MT_EFUSE_CTRL_SEL BIT(31)
+
+#define MT_EFUSE_DATA_BASE 0x0028
+#define MT_EFUSE_DATA(_n) (MT_EFUSE_DATA_BASE + ((_n) << 2))
+
+#define MT_COEXCFG0 0x0040
+#define MT_COEXCFG0_COEX_EN BIT(0)
+
+#define MT_WLAN_FUN_CTRL 0x0080
+#define MT_WLAN_FUN_CTRL_WLAN_EN BIT(0)
+#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN BIT(1)
+#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF BIT(2)
+
+#define MT_COEXCFG3 0x004c
+
+#define MT_LDO_CTRL_0 0x006c
+#define MT_LDO_CTRL_1 0x0070
+
+#define MT_WLAN_FUN_CTRL_WLAN_RESET BIT(3) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN BIT(3) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ BIT(4)
+#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL BIT(5)
+#define MT_WLAN_FUN_CTRL_INV_ANT_SEL BIT(6)
+#define MT_WLAN_FUN_CTRL_WAKE_HOST BIT(7)
+
+#define MT_WLAN_FUN_CTRL_THERM_RST BIT(8) /* MT76x2 */
+#define MT_WLAN_FUN_CTRL_THERM_CKEN BIT(9) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_GPIO_IN GENMASK(15, 8) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT GENMASK(23, 16) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN GENMASK(31, 24) /* MT76x0 */
+
+/* MT76x0 */
+#define MT_CSR_EE_CFG1 0x0104
+
+#define MT_XO_CTRL0 0x0100
+#define MT_XO_CTRL1 0x0104
+#define MT_XO_CTRL2 0x0108
+#define MT_XO_CTRL3 0x010c
+#define MT_XO_CTRL4 0x0110
+
+#define MT_XO_CTRL5 0x0114
+#define MT_XO_CTRL5_C2_VAL GENMASK(14, 8)
+
+#define MT_XO_CTRL6 0x0118
+#define MT_XO_CTRL6_C2_CTRL GENMASK(14, 8)
+
+#define MT_XO_CTRL7 0x011c
+
+#define MT_IOCFG_6 0x0124
+
+#define MT_USB_U3DMA_CFG 0x9018
+#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT GENMASK(7, 0)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT GENMASK(15, 8)
+#define MT_USB_DMA_CFG_UDMA_TX_WL_DROP BIT(16)
+#define MT_USB_DMA_CFG_WAKE_UP_EN BIT(17)
+#define MT_USB_DMA_CFG_RX_DROP_OR_PAD BIT(18)
+#define MT_USB_DMA_CFG_TX_CLR BIT(19)
+#define MT_USB_DMA_CFG_TXOP_HALT BIT(20)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21)
+#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22)
+#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23)
+#define MT_USB_DMA_CFG_EP_OUT_VALID GENMASK(29, 24)
+#define MT_USB_DMA_CFG_RX_BUSY BIT(30)
+#define MT_USB_DMA_CFG_TX_BUSY BIT(31)
+
+#define MT_WLAN_MTC_CTRL 0x10148
+#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0)
+#define MT_WLAN_MTC_CTRL_PWR_ACK BIT(12)
+#define MT_WLAN_MTC_CTRL_PWR_ACK_S BIT(13)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_PD GENMASK(19, 16)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_PD BIT(20)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_PD BIT(21)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_PD BIT(22)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_RB BIT(24)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_RB BIT(25)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_RB BIT(26)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_RB BIT(27)
+#define MT_WLAN_MTC_CTRL_STATE_UP BIT(28)
+
+#define MT_INT_SOURCE_CSR 0x0200
+#define MT_INT_MASK_CSR 0x0204
+
+#define MT_INT_RX_DONE(_n) BIT(_n)
+#define MT_INT_RX_DONE_ALL GENMASK(1, 0)
+#define MT_INT_TX_DONE_ALL GENMASK(13, 4)
+#define MT_INT_TX_DONE(_n) BIT((_n) + 4)
+#define MT_INT_RX_COHERENT BIT(16)
+#define MT_INT_TX_COHERENT BIT(17)
+#define MT_INT_ANY_COHERENT BIT(18)
+#define MT_INT_MCU_CMD BIT(19)
+#define MT_INT_TBTT BIT(20)
+#define MT_INT_PRE_TBTT BIT(21)
+#define MT_INT_TX_STAT BIT(22)
+#define MT_INT_AUTO_WAKEUP BIT(23)
+#define MT_INT_GPTIMER BIT(24)
+#define MT_INT_RXDELAYINT BIT(26)
+#define MT_INT_TXDELAYINT BIT(27)
+
+#define MT_WPDMA_GLO_CFG 0x0208
+#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0)
+#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
+#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2)
+#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
+#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4)
+#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6)
+#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
+#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN GENMASK(15, 8)
+#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS BIT(30)
+#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
+
+#define MT_WPDMA_RST_IDX 0x020c
+
+#define MT_WPDMA_DELAY_INT_CFG 0x0210
+
+#define MT_WMM_AIFSN 0x0214
+#define MT_WMM_AIFSN_MASK GENMASK(3, 0)
+#define MT_WMM_AIFSN_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_CWMIN 0x0218
+#define MT_WMM_CWMIN_MASK GENMASK(3, 0)
+#define MT_WMM_CWMIN_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_CWMAX 0x021c
+#define MT_WMM_CWMAX_MASK GENMASK(3, 0)
+#define MT_WMM_CWMAX_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_TXOP_BASE 0x0220
+#define MT_WMM_TXOP(_n) (MT_WMM_TXOP_BASE + (((_n) / 2) << 2))
+#define MT_WMM_TXOP_SHIFT(_n) (((_n) & 1) * 16)
+#define MT_WMM_TXOP_MASK GENMASK(15, 0)
+
+#define MT_WMM_CTRL 0x0230 /* MT76x0 */
+#define MT_FCE_DMA_ADDR 0x0230
+#define MT_FCE_DMA_LEN 0x0234
+#define MT_USB_DMA_CFG 0x0238
+
+#define MT_TSO_CTRL 0x0250
+#define MT_HEADER_TRANS_CTRL_REG 0x0260
+
+#define MT_US_CYC_CFG 0x02a4
+#define MT_US_CYC_CNT GENMASK(7, 0)
+
+#define MT_TX_RING_BASE 0x0300
+#define MT_RX_RING_BASE 0x03c0
+
+#define MT_TX_HW_QUEUE_MCU 8
+#define MT_TX_HW_QUEUE_MGMT 9
+
+#define MT_PBF_SYS_CTRL 0x0400
+#define MT_PBF_SYS_CTRL_MCU_RESET BIT(0)
+#define MT_PBF_SYS_CTRL_DMA_RESET BIT(1)
+#define MT_PBF_SYS_CTRL_MAC_RESET BIT(2)
+#define MT_PBF_SYS_CTRL_PBF_RESET BIT(3)
+#define MT_PBF_SYS_CTRL_ASY_RESET BIT(4)
+
+#define MT_PBF_CFG 0x0404
+#define MT_PBF_CFG_TX0Q_EN BIT(0)
+#define MT_PBF_CFG_TX1Q_EN BIT(1)
+#define MT_PBF_CFG_TX2Q_EN BIT(2)
+#define MT_PBF_CFG_TX3Q_EN BIT(3)
+#define MT_PBF_CFG_RX0Q_EN BIT(4)
+#define MT_PBF_CFG_RX_DROP_EN BIT(8)
+
+#define MT_PBF_TX_MAX_PCNT 0x0408
+#define MT_PBF_RX_MAX_PCNT 0x040c
+
+#define MT_BCN_OFFSET_BASE 0x041c
+#define MT_BCN_OFFSET(_n) (MT_BCN_OFFSET_BASE + ((_n) << 2))
+
+#define MT_RXQ_STA 0x0430
+#define MT_TXQ_STA 0x0434
+#define MT_RF_CSR_CFG 0x0500
+#define MT_RF_CSR_CFG_DATA GENMASK(7, 0)
+#define MT_RF_CSR_CFG_REG_ID GENMASK(14, 8)
+#define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 15)
+#define MT_RF_CSR_CFG_WR BIT(30)
+#define MT_RF_CSR_CFG_KICK BIT(31)
+
+#define MT_RF_BYPASS_0 0x0504
+#define MT_RF_BYPASS_1 0x0508
+#define MT_RF_SETTING_0 0x050c
+
+#define MT_RF_MISC 0x0518
+#define MT_RF_DATA_WRITE 0x0524
+
+#define MT_RF_CTRL 0x0528
+#define MT_RF_CTRL_ADDR GENMASK(11, 0)
+#define MT_RF_CTRL_WRITE BIT(12)
+#define MT_RF_CTRL_BUSY BIT(13)
+#define MT_RF_CTRL_IDX BIT(16)
+
+#define MT_RF_DATA_READ 0x052c
+
+#define MT_COM_REG0 0x0730
+#define MT_COM_REG1 0x0734
+#define MT_COM_REG2 0x0738
+#define MT_COM_REG3 0x073C
+
+#define MT_LED_CTRL 0x0770
+#define MT_LED_CTRL_REPLAY(_n) BIT(0 + (8 * (_n)))
+#define MT_LED_CTRL_POLARITY(_n) BIT(1 + (8 * (_n)))
+#define MT_LED_CTRL_TX_BLINK_MODE(_n) BIT(2 + (8 * (_n)))
+#define MT_LED_CTRL_KICK(_n) BIT(7 + (8 * (_n)))
+
+#define MT_LED_TX_BLINK_0 0x0774
+#define MT_LED_TX_BLINK_1 0x0778
+
+#define MT_LED_S0_BASE 0x077C
+#define MT_LED_S0(_n) (MT_LED_S0_BASE + 8 * (_n))
+#define MT_LED_S1_BASE 0x0780
+#define MT_LED_S1(_n) (MT_LED_S1_BASE + 8 * (_n))
+#define MT_LED_STATUS_OFF GENMASK(31, 24)
+#define MT_LED_STATUS_ON GENMASK(23, 16)
+#define MT_LED_STATUS_DURATION GENMASK(15, 8)
+
+#define MT_FCE_PSE_CTRL 0x0800
+#define MT_FCE_PARAMETERS 0x0804
+#define MT_FCE_CSO 0x0808
+
+#define MT_FCE_L2_STUFF 0x080c
+#define MT_FCE_L2_STUFF_HT_L2_EN BIT(0)
+#define MT_FCE_L2_STUFF_QOS_L2_EN BIT(1)
+#define MT_FCE_L2_STUFF_RX_STUFF_EN BIT(2)
+#define MT_FCE_L2_STUFF_TX_STUFF_EN BIT(3)
+#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN BIT(4)
+#define MT_FCE_L2_STUFF_MVINV_BSWAP BIT(5)
+#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN GENMASK(15, 8)
+#define MT_FCE_L2_STUFF_TS_LEN_EN GENMASK(23, 16)
+#define MT_FCE_L2_STUFF_OTHER_PORT GENMASK(25, 24)
+
+#define MT_FCE_WLAN_FLOW_CONTROL1 0x0824
+
+#define MT_TX_CPU_FROM_FCE_BASE_PTR 0x09a0
+#define MT_TX_CPU_FROM_FCE_MAX_COUNT 0x09a4
+#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8
+#define MT_FCE_PDMA_GLOBAL_CONF 0x09c4
+#define MT_FCE_SKIP_FS 0x0a6c
+
+#define MT_PAUSE_ENABLE_CONTROL1 0x0a38
+
+#define MT_MAC_CSR0 0x1000
+
+#define MT_MAC_SYS_CTRL 0x1004
+#define MT_MAC_SYS_CTRL_RESET_CSR BIT(0)
+#define MT_MAC_SYS_CTRL_RESET_BBP BIT(1)
+#define MT_MAC_SYS_CTRL_ENABLE_TX BIT(2)
+#define MT_MAC_SYS_CTRL_ENABLE_RX BIT(3)
+
+#define MT_MAC_ADDR_DW0 0x1008
+#define MT_MAC_ADDR_DW1 0x100c
+#define MT_MAC_ADDR_DW1_U2ME_MASK GENMASK(23, 16)
+
+#define MT_MAC_BSSID_DW0 0x1010
+#define MT_MAC_BSSID_DW1 0x1014
+#define MT_MAC_BSSID_DW1_ADDR GENMASK(15, 0)
+#define MT_MAC_BSSID_DW1_MBSS_MODE GENMASK(17, 16)
+#define MT_MAC_BSSID_DW1_MBEACON_N GENMASK(20, 18)
+#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT BIT(21)
+#define MT_MAC_BSSID_DW1_MBSS_MODE_B2 BIT(22)
+#define MT_MAC_BSSID_DW1_MBEACON_N_B3 BIT(23)
+#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE GENMASK(26, 24)
+
+#define MT_MAX_LEN_CFG 0x1018
+#define MT_MAX_LEN_CFG_AMPDU GENMASK(13, 12)
+
+#define MT_LED_CFG 0x102c
+
+#define MT_AMPDU_MAX_LEN_20M1S 0x1030
+#define MT_AMPDU_MAX_LEN_20M2S 0x1034
+#define MT_AMPDU_MAX_LEN_40M1S 0x1038
+#define MT_AMPDU_MAX_LEN_40M2S 0x103c
+#define MT_AMPDU_MAX_LEN 0x1040
+
+#define MT_WCID_DROP_BASE 0x106c
+#define MT_WCID_DROP(_n) (MT_WCID_DROP_BASE + ((_n) >> 5) * 4)
+#define MT_WCID_DROP_MASK(_n) BIT((_n) % 32)
+
+#define MT_BCN_BYPASS_MASK 0x108c
+
+#define MT_MAC_APC_BSSID_BASE 0x1090
+#define MT_MAC_APC_BSSID_L(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8))
+#define MT_MAC_APC_BSSID_H(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4))
+#define MT_MAC_APC_BSSID_H_ADDR GENMASK(15, 0)
+#define MT_MAC_APC_BSSID0_H_EN BIT(16)
+
+#define MT_XIFS_TIME_CFG 0x1100
+#define MT_XIFS_TIME_CFG_CCK_SIFS GENMASK(7, 0)
+#define MT_XIFS_TIME_CFG_OFDM_SIFS GENMASK(15, 8)
+#define MT_XIFS_TIME_CFG_OFDM_XIFS GENMASK(19, 16)
+#define MT_XIFS_TIME_CFG_EIFS GENMASK(28, 20)
+#define MT_XIFS_TIME_CFG_BB_RXEND_EN BIT(29)
+
+#define MT_BKOFF_SLOT_CFG 0x1104
+#define MT_BKOFF_SLOT_CFG_SLOTTIME GENMASK(7, 0)
+#define MT_BKOFF_SLOT_CFG_CC_DELAY GENMASK(11, 8)
+
+#define MT_CH_TIME_CFG 0x110c
+#define MT_CH_TIME_CFG_TIMER_EN BIT(0)
+#define MT_CH_TIME_CFG_TX_AS_BUSY BIT(1)
+#define MT_CH_TIME_CFG_RX_AS_BUSY BIT(2)
+#define MT_CH_TIME_CFG_NAV_AS_BUSY BIT(3)
+#define MT_CH_TIME_CFG_EIFS_AS_BUSY BIT(4)
+#define MT_CH_TIME_CFG_MDRDY_CNT_EN BIT(5)
+#define MT_CH_CCA_RC_EN BIT(6)
+#define MT_CH_TIME_CFG_CH_TIMER_CLR GENMASK(9, 8)
+#define MT_CH_TIME_CFG_MDRDY_CLR GENMASK(11, 10)
+
+#define MT_PBF_LIFE_TIMER 0x1110
+
+#define MT_BEACON_TIME_CFG 0x1114
+#define MT_BEACON_TIME_CFG_INTVAL GENMASK(15, 0)
+#define MT_BEACON_TIME_CFG_TIMER_EN BIT(16)
+#define MT_BEACON_TIME_CFG_SYNC_MODE GENMASK(18, 17)
+#define MT_BEACON_TIME_CFG_TBTT_EN BIT(19)
+#define MT_BEACON_TIME_CFG_BEACON_TX BIT(20)
+#define MT_BEACON_TIME_CFG_TSF_COMP GENMASK(31, 24)
+
+#define MT_TBTT_SYNC_CFG 0x1118
+#define MT_TSF_TIMER_DW0 0x111c
+#define MT_TSF_TIMER_DW1 0x1120
+#define MT_TBTT_TIMER 0x1124
+#define MT_TBTT_TIMER_VAL GENMASK(16, 0)
+
+#define MT_INT_TIMER_CFG 0x1128
+#define MT_INT_TIMER_CFG_PRE_TBTT GENMASK(15, 0)
+#define MT_INT_TIMER_CFG_GP_TIMER GENMASK(31, 16)
+
+#define MT_INT_TIMER_EN 0x112c
+#define MT_INT_TIMER_EN_PRE_TBTT_EN BIT(0)
+#define MT_INT_TIMER_EN_GP_TIMER_EN BIT(1)
+
+#define MT_CH_IDLE 0x1130
+#define MT_CH_BUSY 0x1134
+#define MT_EXT_CH_BUSY 0x1138
+#define MT_ED_CCA_TIMER 0x1140
+
+#define MT_MAC_STATUS 0x1200
+#define MT_MAC_STATUS_TX BIT(0)
+#define MT_MAC_STATUS_RX BIT(1)
+
+#define MT_PWR_PIN_CFG 0x1204
+#define MT_AUX_CLK_CFG 0x120c
+
+#define MT_BB_PA_MODE_CFG0 0x1214
+#define MT_BB_PA_MODE_CFG1 0x1218
+#define MT_RF_PA_MODE_CFG0 0x121c
+#define MT_RF_PA_MODE_CFG1 0x1220
+
+#define MT_RF_PA_MODE_ADJ0 0x1228
+#define MT_RF_PA_MODE_ADJ1 0x122c
+
+#define MT_DACCLK_EN_DLY_CFG 0x1264
+
+#define MT_EDCA_CFG_BASE 0x1300
+#define MT_EDCA_CFG_AC(_n) (MT_EDCA_CFG_BASE + ((_n) << 2))
+#define MT_EDCA_CFG_TXOP GENMASK(7, 0)
+#define MT_EDCA_CFG_AIFSN GENMASK(11, 8)
+#define MT_EDCA_CFG_CWMIN GENMASK(15, 12)
+#define MT_EDCA_CFG_CWMAX GENMASK(19, 16)
+
+#define MT_TX_PWR_CFG_0 0x1314
+#define MT_TX_PWR_CFG_1 0x1318
+#define MT_TX_PWR_CFG_2 0x131c
+#define MT_TX_PWR_CFG_3 0x1320
+#define MT_TX_PWR_CFG_4 0x1324
+#define MT_TX_PIN_CFG 0x1328
+#define MT_TX_PIN_CFG_TXANT GENMASK(3, 0)
+#define MT_TX_PIN_CFG_RXANT GENMASK(11, 8)
+#define MT_TX_PIN_RFTR_EN BIT(16)
+#define MT_TX_PIN_TRSW_EN BIT(18)
+
+#define MT_TX_BAND_CFG 0x132c
+#define MT_TX_BAND_CFG_UPPER_40M BIT(0)
+#define MT_TX_BAND_CFG_5G BIT(1)
+#define MT_TX_BAND_CFG_2G BIT(2)
+
+#define MT_HT_FBK_TO_LEGACY 0x1384
+#define MT_TX_MPDU_ADJ_INT 0x1388
+
+#define MT_TX_PWR_CFG_7 0x13d4
+#define MT_TX_PWR_CFG_8 0x13d8
+#define MT_TX_PWR_CFG_9 0x13dc
+
+#define MT_TX_SW_CFG0 0x1330
+#define MT_TX_SW_CFG1 0x1334
+#define MT_TX_SW_CFG2 0x1338
+
+#define MT_TXOP_CTRL_CFG 0x1340
+#define MT_TXOP_TRUN_EN GENMASK(5, 0)
+#define MT_TXOP_EXT_CCA_DLY GENMASK(15, 8)
+#define MT_TXOP_ED_CCA_EN BIT(20)
+
+#define MT_TX_RTS_CFG 0x1344
+#define MT_TX_RTS_CFG_RETRY_LIMIT GENMASK(7, 0)
+#define MT_TX_RTS_CFG_THRESH GENMASK(23, 8)
+#define MT_TX_RTS_FALLBACK BIT(24)
+
+#define MT_TX_TIMEOUT_CFG 0x1348
+#define MT_TX_TIMEOUT_CFG_ACKTO GENMASK(15, 8)
+
+#define MT_TX_RETRY_CFG 0x134c
+#define MT_TX_LINK_CFG 0x1350
+#define MT_TX_CFACK_EN BIT(12)
+#define MT_VHT_HT_FBK_CFG0 0x1354
+#define MT_VHT_HT_FBK_CFG1 0x1358
+#define MT_LG_FBK_CFG0 0x135c
+#define MT_LG_FBK_CFG1 0x1360
+
+#define MT_PROT_CFG_RATE GENMASK(15, 0)
+#define MT_PROT_CFG_CTRL GENMASK(17, 16)
+#define MT_PROT_CFG_NAV GENMASK(19, 18)
+#define MT_PROT_CFG_TXOP_ALLOW GENMASK(25, 20)
+#define MT_PROT_CFG_RTS_THRESH BIT(26)
+
+#define MT_CCK_PROT_CFG 0x1364
+#define MT_OFDM_PROT_CFG 0x1368
+#define MT_MM20_PROT_CFG 0x136c
+#define MT_MM40_PROT_CFG 0x1370
+#define MT_GF20_PROT_CFG 0x1374
+#define MT_GF40_PROT_CFG 0x1378
+
+#define MT_PROT_RATE GENMASK(15, 0)
+#define MT_PROT_CTRL_RTS_CTS BIT(16)
+#define MT_PROT_CTRL_CTS2SELF BIT(17)
+#define MT_PROT_NAV_SHORT BIT(18)
+#define MT_PROT_NAV_LONG BIT(19)
+#define MT_PROT_TXOP_ALLOW_CCK BIT(20)
+#define MT_PROT_TXOP_ALLOW_OFDM BIT(21)
+#define MT_PROT_TXOP_ALLOW_MM20 BIT(22)
+#define MT_PROT_TXOP_ALLOW_MM40 BIT(23)
+#define MT_PROT_TXOP_ALLOW_GF20 BIT(24)
+#define MT_PROT_TXOP_ALLOW_GF40 BIT(25)
+#define MT_PROT_RTS_THR_EN BIT(26)
+#define MT_PROT_RATE_CCK_11 0x0003
+#define MT_PROT_RATE_OFDM_6 0x2000
+#define MT_PROT_RATE_OFDM_24 0x2004
+#define MT_PROT_RATE_DUP_OFDM_24 0x2084
+#define MT_PROT_RATE_SGI_OFDM_24 0x2104
+#define MT_PROT_TXOP_ALLOW_ALL GENMASK(25, 20)
+#define MT_PROT_TXOP_ALLOW_BW20 (MT_PROT_TXOP_ALLOW_ALL & \
+ ~MT_PROT_TXOP_ALLOW_MM40 & \
+ ~MT_PROT_TXOP_ALLOW_GF40)
+
+#define MT_EXP_ACK_TIME 0x1380
+
+#define MT_TX_PWR_CFG_0_EXT 0x1390
+#define MT_TX_PWR_CFG_1_EXT 0x1394
+
+#define MT_TX_FBK_LIMIT 0x1398
+#define MT_TX_FBK_LIMIT_MPDU_FBK GENMASK(7, 0)
+#define MT_TX_FBK_LIMIT_AMPDU_FBK GENMASK(15, 8)
+#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR BIT(16)
+#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR BIT(17)
+#define MT_TX_FBK_LIMIT_RATE_LUT BIT(18)
+
+#define MT_TX0_RF_GAIN_CORR 0x13a0
+#define MT_TX1_RF_GAIN_CORR 0x13a4
+#define MT_TX0_RF_GAIN_ATTEN 0x13a8
+#define MT_TX0_RF_GAIN_ATTEN 0x13a8 /* MT76x0 */
+
+#define MT_TX_ALC_CFG_0 0x13b0
+#define MT_TX_ALC_CFG_0_CH_INIT_0 GENMASK(5, 0)
+#define MT_TX_ALC_CFG_0_CH_INIT_1 GENMASK(13, 8)
+#define MT_TX_ALC_CFG_0_LIMIT_0 GENMASK(21, 16)
+#define MT_TX_ALC_CFG_0_LIMIT_1 GENMASK(29, 24)
+
+#define MT_TX_ALC_CFG_1 0x13b4
+#define MT_TX_ALC_CFG_1_TEMP_COMP GENMASK(5, 0)
+
+#define MT_TX_ALC_CFG_2 0x13a8
+#define MT_TX_ALC_CFG_2_TEMP_COMP GENMASK(5, 0)
+
+#define MT_TX_ALC_CFG_3 0x13ac
+#define MT_TX_ALC_CFG_4 0x13c0
+#define MT_TX_ALC_CFG_4_LOWGAIN_CH_EN BIT(31)
+#define MT_TX0_BB_GAIN_ATTEN 0x13c0 /* MT76x0 */
+
+#define MT_TX_ALC_VGA3 0x13c8
+
+#define MT_TX_PROT_CFG6 0x13e0
+#define MT_TX_PROT_CFG7 0x13e4
+#define MT_TX_PROT_CFG8 0x13e8
+
+#define MT_PIFS_TX_CFG 0x13ec
+
+#define MT_RX_FILTR_CFG 0x1400
+
+#define MT_RX_FILTR_CFG_CRC_ERR BIT(0)
+#define MT_RX_FILTR_CFG_PHY_ERR BIT(1)
+#define MT_RX_FILTR_CFG_PROMISC BIT(2)
+#define MT_RX_FILTR_CFG_OTHER_BSS BIT(3)
+#define MT_RX_FILTR_CFG_VER_ERR BIT(4)
+#define MT_RX_FILTR_CFG_MCAST BIT(5)
+#define MT_RX_FILTR_CFG_BCAST BIT(6)
+#define MT_RX_FILTR_CFG_DUP BIT(7)
+#define MT_RX_FILTR_CFG_CFACK BIT(8)
+#define MT_RX_FILTR_CFG_CFEND BIT(9)
+#define MT_RX_FILTR_CFG_ACK BIT(10)
+#define MT_RX_FILTR_CFG_CTS BIT(11)
+#define MT_RX_FILTR_CFG_RTS BIT(12)
+#define MT_RX_FILTR_CFG_PSPOLL BIT(13)
+#define MT_RX_FILTR_CFG_BA BIT(14)
+#define MT_RX_FILTR_CFG_BAR BIT(15)
+#define MT_RX_FILTR_CFG_CTRL_RSV BIT(16)
+
+#define MT_AUTO_RSP_CFG 0x1404
+#define MT_AUTO_RSP_EN BIT(0)
+#define MT_AUTO_RSP_PREAMB_SHORT BIT(4)
+#define MT_LEGACY_BASIC_RATE 0x1408
+#define MT_HT_BASIC_RATE 0x140c
+
+#define MT_HT_CTRL_CFG 0x1410
+#define MT_RX_PARSER_CFG 0x1418
+#define MT_RX_PARSER_RX_SET_NAV_ALL BIT(0)
+
+#define MT_EXT_CCA_CFG 0x141c
+#define MT_EXT_CCA_CFG_CCA0 GENMASK(1, 0)
+#define MT_EXT_CCA_CFG_CCA1 GENMASK(3, 2)
+#define MT_EXT_CCA_CFG_CCA2 GENMASK(5, 4)
+#define MT_EXT_CCA_CFG_CCA3 GENMASK(7, 6)
+#define MT_EXT_CCA_CFG_CCA_MASK GENMASK(11, 8)
+#define MT_EXT_CCA_CFG_ED_CCA_MASK GENMASK(15, 12)
+
+#define MT_TX_SW_CFG3 0x1478
+
+#define MT_PN_PAD_MODE 0x150c
+
+#define MT_TXOP_HLDR_ET 0x1608
+#define MT_TXOP_HLDR_TX40M_BLK_EN BIT(1)
+
+#define MT_PROT_AUTO_TX_CFG 0x1648
+#define MT_PROT_AUTO_TX_CFG_PROT_PADJ GENMASK(11, 8)
+#define MT_PROT_AUTO_TX_CFG_AUTO_PADJ GENMASK(27, 24)
+
+#define MT_RX_STAT_0 0x1700
+#define MT_RX_STAT_0_CRC_ERRORS GENMASK(15, 0)
+#define MT_RX_STAT_0_PHY_ERRORS GENMASK(31, 16)
+
+#define MT_RX_STAT_1 0x1704
+#define MT_RX_STAT_1_CCA_ERRORS GENMASK(15, 0)
+#define MT_RX_STAT_1_PLCP_ERRORS GENMASK(31, 16)
+
+#define MT_RX_STAT_2 0x1708
+#define MT_RX_STAT_2_DUP_ERRORS GENMASK(15, 0)
+#define MT_RX_STAT_2_OVERFLOW_ERRORS GENMASK(31, 16)
+
+#define MT_TX_STA_0 0x170c
+#define MT_TX_STA_1 0x1710
+#define MT_TX_STA_2 0x1714
+
+#define MT_TX_STAT_FIFO 0x1718
+#define MT_TX_STAT_FIFO_VALID BIT(0)
+#define MT_TX_STAT_FIFO_SUCCESS BIT(5)
+#define MT_TX_STAT_FIFO_AGGR BIT(6)
+#define MT_TX_STAT_FIFO_ACKREQ BIT(7)
+#define MT_TX_STAT_FIFO_WCID GENMASK(15, 8)
+#define MT_TX_STAT_FIFO_RATE GENMASK(31, 16)
+
+#define MT_TX_AGG_STAT 0x171c
+
+#define MT_TX_AGG_CNT_BASE0 0x1720
+#define MT_MPDU_DENSITY_CNT 0x1740
+#define MT_TX_AGG_CNT_BASE1 0x174c
+
+#define MT_TX_AGG_CNT(_id) ((_id) < 8 ? \
+ MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \
+ MT_TX_AGG_CNT_BASE1 + (((_id) - 8) << 2))
+
+#define MT_TX_STAT_FIFO_EXT 0x1798
+#define MT_TX_STAT_FIFO_EXT_RETRY GENMASK(7, 0)
+#define MT_TX_STAT_FIFO_EXT_PKTID GENMASK(15, 8)
+
+#define MT_WCID_TX_RATE_BASE 0x1c00
+#define MT_WCID_TX_RATE(_i) (MT_WCID_TX_RATE_BASE + ((_i) << 3))
+
+#define MT_BBP_CORE_BASE 0x2000
+#define MT_BBP_IBI_BASE 0x2100
+#define MT_BBP_AGC_BASE 0x2300
+#define MT_BBP_TXC_BASE 0x2400
+#define MT_BBP_RXC_BASE 0x2500
+#define MT_BBP_TXO_BASE 0x2600
+#define MT_BBP_TXBE_BASE 0x2700
+#define MT_BBP_RXFE_BASE 0x2800
+#define MT_BBP_RXO_BASE 0x2900
+#define MT_BBP_DFS_BASE 0x2a00
+#define MT_BBP_TR_BASE 0x2b00
+#define MT_BBP_CAL_BASE 0x2c00
+#define MT_BBP_DSC_BASE 0x2e00
+#define MT_BBP_PFMU_BASE 0x2f00
+
+#define MT_BBP(_type, _n) (MT_BBP_##_type##_BASE + ((_n) << 2))
+
+#define MT_BBP_CORE_R1_BW GENMASK(4, 3)
+
+#define MT_BBP_AGC_R0_CTRL_CHAN GENMASK(9, 8)
+#define MT_BBP_AGC_R0_BW GENMASK(14, 12)
+
+/* AGC, R4/R5 */
+#define MT_BBP_AGC_LNA_HIGH_GAIN GENMASK(21, 16)
+#define MT_BBP_AGC_LNA_MID_GAIN GENMASK(13, 8)
+#define MT_BBP_AGC_LNA_LOW_GAIN GENMASK(5, 0)
+
+/* AGC, R6/R7 */
+#define MT_BBP_AGC_LNA_ULOW_GAIN GENMASK(5, 0)
+
+/* AGC, R8/R9 */
+#define MT_BBP_AGC_LNA_GAIN_MODE GENMASK(7, 6)
+#define MT_BBP_AGC_GAIN GENMASK(14, 8)
+
+#define MT_BBP_AGC20_RSSI0 GENMASK(7, 0)
+#define MT_BBP_AGC20_RSSI1 GENMASK(15, 8)
+
+#define MT_BBP_TXBE_R0_CTRL_CHAN GENMASK(1, 0)
+
+#define MT_WCID_ADDR_BASE 0x1800
+#define MT_WCID_ADDR(_n) (MT_WCID_ADDR_BASE + (_n) * 8)
+
+#define MT_SRAM_BASE 0x4000
+
+#define MT_WCID_KEY_BASE 0x8000
+#define MT_WCID_KEY(_n) (MT_WCID_KEY_BASE + (_n) * 32)
+
+#define MT_WCID_IV_BASE 0xa000
+#define MT_WCID_IV(_n) (MT_WCID_IV_BASE + (_n) * 8)
+
+#define MT_WCID_ATTR_BASE 0xa800
+#define MT_WCID_ATTR(_n) (MT_WCID_ATTR_BASE + (_n) * 4)
+
+#define MT_WCID_ATTR_PAIRWISE BIT(0)
+#define MT_WCID_ATTR_PKEY_MODE GENMASK(3, 1)
+#define MT_WCID_ATTR_BSS_IDX GENMASK(6, 4)
+#define MT_WCID_ATTR_RXWI_UDF GENMASK(9, 7)
+#define MT_WCID_ATTR_PKEY_MODE_EXT BIT(10)
+#define MT_WCID_ATTR_BSS_IDX_EXT BIT(11)
+#define MT_WCID_ATTR_WAPI_MCBC BIT(15)
+#define MT_WCID_ATTR_WAPI_KEYID GENMASK(31, 24)
+
+#define MT_SKEY_BASE_0 0xac00
+#define MT_SKEY_BASE_1 0xb400
+#define MT_SKEY_0(_bss, _idx) (MT_SKEY_BASE_0 + (4 * (_bss) + (_idx)) * 32)
+#define MT_SKEY_1(_bss, _idx) (MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + (_idx)) * 32)
+#define MT_SKEY(_bss, _idx) (((_bss) & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx))
+
+#define MT_SKEY_MODE_BASE_0 0xb000
+#define MT_SKEY_MODE_BASE_1 0xb3f0
+#define MT_SKEY_MODE_0(_bss) (MT_SKEY_MODE_BASE_0 + (((_bss) / 2) << 2))
+#define MT_SKEY_MODE_1(_bss) (MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2))
+#define MT_SKEY_MODE(_bss) (((_bss) & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss))
+#define MT_SKEY_MODE_MASK GENMASK(3, 0)
+#define MT_SKEY_MODE_SHIFT(_bss, _idx) (4 * ((_idx) + 4 * ((_bss) & 1)))
+
+#define MT_BEACON_BASE 0xc000
+
+#define MT_TEMP_SENSOR 0x1d000
+#define MT_TEMP_SENSOR_VAL GENMASK(6, 0)
+
+struct mt76_wcid_addr {
+ u8 macaddr[6];
+ __le16 ba_mask;
+} __packed __aligned(4);
+
+struct mt76_wcid_key {
+ u8 key[16];
+ u8 tx_mic[8];
+ u8 rx_mic[8];
+} __packed __aligned(4);
+
+enum mt76x02_cipher_type {
+ MT_CIPHER_NONE,
+ MT_CIPHER_WEP40,
+ MT_CIPHER_WEP104,
+ MT_CIPHER_TKIP,
+ MT_CIPHER_AES_CCMP,
+ MT_CIPHER_CKIP40,
+ MT_CIPHER_CKIP104,
+ MT_CIPHER_CKIP128,
+ MT_CIPHER_WAPI,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_trace.c b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.c
new file mode 100644
index 000000000..a812c3a1e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "mt76x02_trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h
new file mode 100644
index 000000000..6a98092e9
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#if !defined(__MT76x02_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76x02_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt76x02.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt76x02
+
+#define MAXNAME 32
+#define DEV_ENTRY __array(char, wiphy_name, 32)
+#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \
+ wiphy_name(mt76_hw(dev)->wiphy), MAXNAME)
+#define DEV_PR_FMT "%s"
+#define DEV_PR_ARG __entry->wiphy_name
+
+#define TXID_ENTRY __field(u8, wcid) __field(u8, pktid)
+#define TXID_PR_FMT " [%d:%d]"
+#define TXID_PR_ARG __entry->wcid, __entry->pktid
+
+DECLARE_EVENT_CLASS(dev_evt,
+ TP_PROTO(struct mt76x02_dev *dev),
+ TP_ARGS(dev),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ ),
+ TP_printk(DEV_PR_FMT, DEV_PR_ARG)
+);
+
+DEFINE_EVENT(dev_evt, mac_txstat_poll,
+ TP_PROTO(struct mt76x02_dev *dev),
+ TP_ARGS(dev)
+);
+
+TRACE_EVENT(mac_txstat_fetch,
+ TP_PROTO(struct mt76x02_dev *dev,
+ struct mt76x02_tx_status *stat),
+
+ TP_ARGS(dev, stat),
+
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ TXID_ENTRY
+ __field(bool, success)
+ __field(bool, aggr)
+ __field(bool, ack_req)
+ __field(u16, rate)
+ __field(u8, retry)
+ ),
+
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->success = stat->success;
+ __entry->aggr = stat->aggr;
+ __entry->ack_req = stat->ack_req;
+ __entry->wcid = stat->wcid;
+ __entry->pktid = stat->pktid;
+ __entry->rate = stat->rate;
+ __entry->retry = stat->retry;
+ ),
+
+ TP_printk(
+ DEV_PR_FMT TXID_PR_FMT
+ " success:%d aggr:%d ack_req:%d"
+ " rate:%04x retry:%d",
+ DEV_PR_ARG, TXID_PR_ARG,
+ __entry->success, __entry->aggr, __entry->ack_req,
+ __entry->rate, __entry->retry
+ )
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mt76x02_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
new file mode 100644
index 000000000..96fdf423a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include <linux/kernel.h>
+
+#include "mt76x02.h"
+
+void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76x02_dev *dev = hw->priv;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+
+ if (control->sta) {
+ struct mt76x02_sta *msta;
+
+ msta = (struct mt76x02_sta *)control->sta->drv_priv;
+ wcid = &msta->wcid;
+ } else if (vif) {
+ struct mt76x02_vif *mvif;
+
+ mvif = (struct mt76x02_vif *)vif->drv_priv;
+ wcid = &mvif->group_wcid;
+ }
+
+ mt76_tx(&dev->mphy, control->sta, wcid, skb);
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx);
+
+void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb)
+{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ void *rxwi = skb->data;
+
+ if (q == MT_RXQ_MCU) {
+ mt76_mcu_rx_event(&dev->mt76, skb);
+ return;
+ }
+
+ skb_pull(skb, sizeof(struct mt76x02_rxwi));
+ if (mt76x02_mac_process_rx(dev, skb, rxwi)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ mt76_rx(mdev, q, skb);
+}
+EXPORT_SYMBOL_GPL(mt76x02_queue_rx_skb);
+
+s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
+ const struct ieee80211_tx_rate *rate)
+{
+ s8 max_txpwr;
+
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ u8 mcs = ieee80211_rate_get_vht_mcs(rate);
+
+ if (mcs == 8 || mcs == 9) {
+ max_txpwr = dev->mt76.rate_power.vht[8];
+ } else {
+ u8 nss, idx;
+
+ nss = ieee80211_rate_get_vht_nss(rate);
+ idx = ((nss - 1) << 3) + mcs;
+ max_txpwr = dev->mt76.rate_power.ht[idx & 0xf];
+ }
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ max_txpwr = dev->mt76.rate_power.ht[rate->idx & 0xf];
+ } else {
+ enum nl80211_band band = dev->mphy.chandef.chan->band;
+
+ if (band == NL80211_BAND_2GHZ) {
+ const struct ieee80211_rate *r;
+ struct wiphy *wiphy = dev->mt76.hw->wiphy;
+ struct mt76_rate_power *rp = &dev->mt76.rate_power;
+
+ r = &wiphy->bands[band]->bitrates[rate->idx];
+ if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
+ max_txpwr = rp->cck[r->hw_value & 0x3];
+ else
+ max_txpwr = rp->ofdm[r->hw_value & 0x7];
+ } else {
+ max_txpwr = dev->mt76.rate_power.ofdm[rate->idx & 0x7];
+ }
+ }
+
+ return max_txpwr;
+}
+
+s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 max_txpwr_adj)
+{
+ txpwr = min_t(s8, txpwr, dev->txpower_conf);
+ txpwr -= (dev->target_power + dev->target_power_delta[0]);
+ txpwr = min_t(s8, txpwr, max_txpwr_adj);
+
+ if (!dev->enable_tpc)
+ return 0;
+ else if (txpwr >= 0)
+ return min_t(s8, txpwr, 7);
+ else
+ return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
+}
+
+void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr)
+{
+ s8 txpwr_adj;
+
+ txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, txpwr,
+ dev->mt76.rate_power.ofdm[4]);
+ mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+ MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
+ mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+ MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj);
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_set_txpwr_auto);
+
+bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update)
+{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ struct mt76x02_tx_status stat;
+
+ if (!mt76x02_mac_load_tx_status(dev, &stat))
+ return false;
+
+ mt76x02_send_tx_status(dev, &stat, update);
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_status_data);
+
+int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
+{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+ struct mt76x02_txwi *txwi = txwi_ptr;
+ bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU;
+ int hdrlen, len, pid, qsel = MT_QSEL_EDCA;
+
+ if (qid == MT_TXQ_PSD && wcid && wcid->idx < 128)
+ mt76x02_mac_wcid_set_drop(dev, wcid->idx, false);
+
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ len = tx_info->skb->len - (hdrlen & 2);
+ mt76x02_mac_write_txwi(dev, txwi, tx_info->skb, wcid, sta, len);
+
+ pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+
+ /* encode packet rate for no-skb packet id to fix up status reporting */
+ if (pid == MT_PACKET_ID_NO_SKB)
+ pid = MT_PACKET_ID_HAS_RATE |
+ (le16_to_cpu(txwi->rate) & MT_RXWI_RATE_INDEX) |
+ FIELD_PREP(MT_PKTID_AC,
+ skb_get_queue_mapping(tx_info->skb));
+
+ txwi->pktid = pid;
+
+ if (mt76_is_skb_pktid(pid) && ampdu)
+ qsel = MT_QSEL_MGMT;
+
+ tx_info->info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
+ MT_TXD_INFO_80211;
+
+ if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
+ tx_info->info |= MT_TXD_INFO_WIV;
+
+ if (sta) {
+ struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
+
+ ewma_pktlen_add(&msta->pktlen, tx_info->skb->len);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_prepare_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
new file mode 100644
index 000000000..b5be884b3
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#ifndef __MT76x02_USB_H
+#define __MT76x02_USB_H
+
+#include "mt76x02.h"
+
+int mt76x02u_mac_start(struct mt76x02_dev *dev);
+void mt76x02u_init_mcu(struct mt76_dev *dev);
+void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev);
+int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
+ int data_len, u32 max_payload, u32 offset);
+
+int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags);
+int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info);
+void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
+void mt76x02u_init_beacon_config(struct mt76x02_dev *dev);
+void mt76x02u_exit_beacon_config(struct mt76x02_dev *dev);
+#endif /* __MT76x02_USB_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
new file mode 100644
index 000000000..2c2f56112
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include "mt76x02_usb.h"
+
+static void mt76x02u_remove_dma_hdr(struct sk_buff *skb)
+{
+ int hdr_len;
+
+ skb_pull(skb, sizeof(struct mt76x02_txwi) + MT_DMA_HDR_LEN);
+ hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ if (hdr_len % 4)
+ mt76x02_remove_hdr_pad(skb, 2);
+}
+
+void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
+{
+ mt76x02u_remove_dma_hdr(e->skb);
+ mt76_tx_complete_skb(mdev, e->wcid, e->skb);
+}
+EXPORT_SYMBOL_GPL(mt76x02u_tx_complete_skb);
+
+int mt76x02u_mac_start(struct mt76x02_dev *dev)
+{
+ mt76x02_mac_reset_counters(dev);
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+ if (!mt76x02_wait_for_wpdma(&dev->mt76, 200000))
+ return -ETIMEDOUT;
+
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX |
+ MT_MAC_SYS_CTRL_ENABLE_RX);
+
+ if (!mt76x02_wait_for_wpdma(&dev->mt76, 50))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02u_mac_start);
+
+int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
+{
+ u32 info, pad;
+
+ /* Buffer layout:
+ * | 4B | xfer len | pad | 4B |
+ * | TXINFO | pkt/cmd | zero pad to 4B | zero |
+ *
+ * length field of TXINFO should be set to 'xfer len'.
+ */
+ info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
+ FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
+ put_unaligned_le32(info, skb_push(skb, sizeof(info)));
+
+ pad = round_up(skb->len, 4) + 4 - skb->len;
+ return mt76_skb_adjust_pad(skb, pad);
+}
+
+int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
+{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ int pid, len = tx_info->skb->len, ep = q2ep(mdev->q_tx[qid]->hw_idx);
+ struct mt76x02_txwi *txwi;
+ bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU;
+ enum mt76_qsel qsel;
+ u32 flags;
+
+ mt76_insert_hdr_pad(tx_info->skb);
+
+ txwi = (struct mt76x02_txwi *)(tx_info->skb->data - sizeof(*txwi));
+ mt76x02_mac_write_txwi(dev, txwi, tx_info->skb, wcid, sta, len);
+ skb_push(tx_info->skb, sizeof(*txwi));
+
+ pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+
+ /* encode packet rate for no-skb packet id to fix up status reporting */
+ if (pid == MT_PACKET_ID_NO_SKB)
+ pid = MT_PACKET_ID_HAS_RATE |
+ (le16_to_cpu(txwi->rate) & MT_PKTID_RATE) |
+ FIELD_PREP(MT_PKTID_AC,
+ skb_get_queue_mapping(tx_info->skb));
+
+ txwi->pktid = pid;
+
+ if ((mt76_is_skb_pktid(pid) && ampdu) || ep == MT_EP_OUT_HCCA)
+ qsel = MT_QSEL_MGMT;
+ else
+ qsel = MT_QSEL_EDCA;
+
+ flags = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
+ MT_TXD_INFO_80211;
+ if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
+ flags |= MT_TXD_INFO_WIV;
+
+ if (sta) {
+ struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
+
+ ewma_pktlen_add(&msta->pktlen, tx_info->skb->len);
+ }
+
+ return mt76x02u_skb_dma_info(tx_info->skb, WLAN_PORT, flags);
+}
+EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb);
+
+/* Trigger pre-TBTT event 8 ms before TBTT */
+#define PRE_TBTT_USEC 8000
+
+/* Beacon SRAM memory is limited to 8kB. We need to send PS buffered frames
+ * (which can be 1500 bytes big) via beacon memory. That make limit of number
+ * of slots to 5. TODO: dynamically calculate offsets in beacon SRAM.
+ */
+#define N_BCN_SLOTS 5
+
+static void mt76x02u_start_pre_tbtt_timer(struct mt76x02_dev *dev)
+{
+ u64 time;
+ u32 tbtt;
+
+ /* Get remaining TBTT in usec */
+ tbtt = mt76_get_field(dev, MT_TBTT_TIMER, MT_TBTT_TIMER_VAL);
+ tbtt *= 32;
+
+ if (tbtt <= PRE_TBTT_USEC) {
+ queue_work(system_highpri_wq, &dev->pre_tbtt_work);
+ return;
+ }
+
+ time = (tbtt - PRE_TBTT_USEC) * 1000ull;
+ hrtimer_start(&dev->pre_tbtt_timer, time, HRTIMER_MODE_REL);
+}
+
+static void mt76x02u_restart_pre_tbtt_timer(struct mt76x02_dev *dev)
+{
+ u32 tbtt, dw0, dw1;
+ u64 tsf, time;
+
+ /* Get remaining TBTT in usec */
+ tbtt = mt76_get_field(dev, MT_TBTT_TIMER, MT_TBTT_TIMER_VAL);
+ tbtt *= 32;
+
+ dw0 = mt76_rr(dev, MT_TSF_TIMER_DW0);
+ dw1 = mt76_rr(dev, MT_TSF_TIMER_DW1);
+ tsf = (u64)dw0 << 32 | dw1;
+ dev_dbg(dev->mt76.dev, "TSF: %llu us TBTT %u us\n", tsf, tbtt);
+
+ /* Convert beacon interval in TU (1024 usec) to nsec */
+ time = ((1000000000ull * dev->mt76.beacon_int) >> 10);
+
+ /* Adjust time to trigger hrtimer 8ms before TBTT */
+ if (tbtt < PRE_TBTT_USEC)
+ time -= (PRE_TBTT_USEC - tbtt) * 1000ull;
+ else
+ time += (tbtt - PRE_TBTT_USEC) * 1000ull;
+
+ hrtimer_start(&dev->pre_tbtt_timer, time, HRTIMER_MODE_REL);
+}
+
+static void mt76x02u_stop_pre_tbtt_timer(struct mt76x02_dev *dev)
+{
+ do {
+ hrtimer_cancel(&dev->pre_tbtt_timer);
+ cancel_work_sync(&dev->pre_tbtt_work);
+ /* Timer can be rearmed by work. */
+ } while (hrtimer_active(&dev->pre_tbtt_timer));
+}
+
+static void mt76x02u_pre_tbtt_work(struct work_struct *work)
+{
+ struct mt76x02_dev *dev =
+ container_of(work, struct mt76x02_dev, pre_tbtt_work);
+ struct beacon_bc_data data = {};
+ struct sk_buff *skb;
+ int nbeacons;
+
+ if (!dev->mt76.beacon_mask)
+ return;
+
+ if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ return;
+
+ mt76x02_resync_beacon_timer(dev);
+
+ /* Prevent corrupt transmissions during update */
+ mt76_set(dev, MT_BCN_BYPASS_MASK, 0xffff);
+ dev->beacon_data_count = 0;
+
+ ieee80211_iterate_active_interfaces(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt76x02_update_beacon_iter, dev);
+
+ mt76_csa_check(&dev->mt76);
+
+ if (dev->mt76.csa_complete) {
+ mt76_csa_finish(&dev->mt76);
+ goto out;
+ }
+
+ nbeacons = hweight8(dev->mt76.beacon_mask);
+ mt76x02_enqueue_buffered_bc(dev, &data, N_BCN_SLOTS - nbeacons);
+
+ while ((skb = __skb_dequeue(&data.q)) != NULL)
+ mt76x02_mac_set_beacon(dev, skb);
+
+out:
+ mt76_wr(dev, MT_BCN_BYPASS_MASK,
+ 0xff00 | ~(0xff00 >> dev->beacon_data_count));
+
+ mt76x02u_restart_pre_tbtt_timer(dev);
+}
+
+static enum hrtimer_restart mt76x02u_pre_tbtt_interrupt(struct hrtimer *timer)
+{
+ struct mt76x02_dev *dev =
+ container_of(timer, struct mt76x02_dev, pre_tbtt_timer);
+
+ queue_work(system_highpri_wq, &dev->pre_tbtt_work);
+
+ return HRTIMER_NORESTART;
+}
+
+static void mt76x02u_pre_tbtt_enable(struct mt76x02_dev *dev, bool en)
+{
+ if (en && dev->mt76.beacon_mask &&
+ !hrtimer_active(&dev->pre_tbtt_timer))
+ mt76x02u_start_pre_tbtt_timer(dev);
+ if (!en)
+ mt76x02u_stop_pre_tbtt_timer(dev);
+}
+
+static void mt76x02u_beacon_enable(struct mt76x02_dev *dev, bool en)
+{
+ if (WARN_ON_ONCE(!dev->mt76.beacon_int))
+ return;
+
+ if (en)
+ mt76x02u_start_pre_tbtt_timer(dev);
+}
+
+void mt76x02u_init_beacon_config(struct mt76x02_dev *dev)
+{
+ static const struct mt76x02_beacon_ops beacon_ops = {
+ .nslots = N_BCN_SLOTS,
+ .slot_size = (8192 / N_BCN_SLOTS) & ~63,
+ .pre_tbtt_enable = mt76x02u_pre_tbtt_enable,
+ .beacon_enable = mt76x02u_beacon_enable,
+ };
+ dev->beacon_ops = &beacon_ops;
+
+ hrtimer_init(&dev->pre_tbtt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ dev->pre_tbtt_timer.function = mt76x02u_pre_tbtt_interrupt;
+ INIT_WORK(&dev->pre_tbtt_work, mt76x02u_pre_tbtt_work);
+
+ mt76x02_init_beacon_config(dev);
+}
+EXPORT_SYMBOL_GPL(mt76x02u_init_beacon_config);
+
+void mt76x02u_exit_beacon_config(struct mt76x02_dev *dev)
+{
+ if (!test_bit(MT76_REMOVED, &dev->mphy.state))
+ mt76_clear(dev, MT_BEACON_TIME_CFG,
+ MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_BEACON_TX);
+
+ mt76x02u_stop_pre_tbtt_timer(dev);
+}
+EXPORT_SYMBOL_GPL(mt76x02u_exit_beacon_config);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
new file mode 100644
index 000000000..2dad61fd4
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+
+#include "mt76x02.h"
+#include "mt76x02_mcu.h"
+#include "mt76x02_usb.h"
+
+#define MT_CMD_HDR_LEN 4
+
+#define MT_FCE_DMA_ADDR 0x0230
+#define MT_FCE_DMA_LEN 0x0234
+
+#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8
+
+static void
+mt76x02u_multiple_mcu_reads(struct mt76_dev *dev, u8 *data, int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+ u32 reg, val;
+ int i;
+
+ if (usb->mcu.burst) {
+ WARN_ON_ONCE(len / 4 != usb->mcu.rp_len);
+
+ reg = usb->mcu.rp[0].reg - usb->mcu.base;
+ for (i = 0; i < usb->mcu.rp_len; i++) {
+ val = get_unaligned_le32(data + 4 * i);
+ usb->mcu.rp[i].reg = reg++;
+ usb->mcu.rp[i].value = val;
+ }
+ } else {
+ WARN_ON_ONCE(len / 8 != usb->mcu.rp_len);
+
+ for (i = 0; i < usb->mcu.rp_len; i++) {
+ reg = get_unaligned_le32(data + 8 * i) -
+ usb->mcu.base;
+ val = get_unaligned_le32(data + 8 * i + 4);
+
+ WARN_ON_ONCE(usb->mcu.rp[i].reg != reg);
+ usb->mcu.rp[i].value = val;
+ }
+ }
+}
+
+static int mt76x02u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
+{
+ struct mt76_usb *usb = &dev->usb;
+ u8 *data = usb->mcu.data;
+ int i, len, ret;
+ u32 rxfce;
+
+ for (i = 0; i < 5; i++) {
+ ret = mt76u_bulk_msg(dev, data, MCU_RESP_URB_SIZE, &len,
+ 300, MT_EP_IN_CMD_RESP);
+ if (ret == -ETIMEDOUT)
+ continue;
+ if (ret)
+ goto out;
+
+ if (usb->mcu.rp)
+ mt76x02u_multiple_mcu_reads(dev, data + 4, len - 8);
+
+ rxfce = get_unaligned_le32(data);
+ if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce) &&
+ FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce) == EVT_CMD_DONE)
+ return 0;
+
+ dev_err(dev->dev, "error: MCU resp evt:%lx seq:%hhx-%lx\n",
+ FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce),
+ seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce));
+ }
+out:
+ dev_err(dev->dev, "error: %s failed with %d\n", __func__, ret);
+ return ret;
+}
+
+static int
+__mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
+ int cmd, bool wait_resp)
+{
+ u8 seq = 0;
+ u32 info;
+ int ret;
+
+ if (test_bit(MT76_REMOVED, &dev->phy.state)) {
+ ret = 0;
+ goto out;
+ }
+
+ if (wait_resp) {
+ seq = ++dev->mcu.msg_seq & 0xf;
+ if (!seq)
+ seq = ++dev->mcu.msg_seq & 0xf;
+ }
+
+ info = FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
+ FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
+ MT_MCU_MSG_TYPE_CMD;
+ ret = mt76x02u_skb_dma_info(skb, CPU_TX_PORT, info);
+ if (ret)
+ return ret;
+
+ ret = mt76u_bulk_msg(dev, skb->data, skb->len, NULL, 500,
+ MT_EP_OUT_INBAND_CMD);
+ if (ret)
+ goto out;
+
+ if (wait_resp)
+ ret = mt76x02u_mcu_wait_resp(dev, seq);
+
+out:
+ consume_skb(skb);
+
+ return ret;
+}
+
+static int
+mt76x02u_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data,
+ int len, bool wait_resp)
+{
+ struct sk_buff *skb;
+ int err;
+
+ skb = mt76_mcu_msg_alloc(dev, data, len);
+ if (!skb)
+ return -ENOMEM;
+
+ mutex_lock(&dev->mcu.mutex);
+ err = __mt76x02u_mcu_send_msg(dev, skb, cmd, wait_resp);
+ mutex_unlock(&dev->mcu.mutex);
+
+ return err;
+}
+
+static inline void skb_put_le32(struct sk_buff *skb, u32 val)
+{
+ put_unaligned_le32(val, skb_put(skb, 4));
+}
+
+static int
+mt76x02u_mcu_wr_rp(struct mt76_dev *dev, u32 base,
+ const struct mt76_reg_pair *data, int n)
+{
+ const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
+ const int CMD_RANDOM_WRITE = 12;
+ struct sk_buff *skb;
+ int cnt, i, ret;
+
+ if (!n)
+ return 0;
+
+ cnt = min(max_vals_per_cmd, n);
+
+ skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+
+ for (i = 0; i < cnt; i++) {
+ skb_put_le32(skb, base + data[i].reg);
+ skb_put_le32(skb, data[i].value);
+ }
+
+ mutex_lock(&dev->mcu.mutex);
+ ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_WRITE, cnt == n);
+ mutex_unlock(&dev->mcu.mutex);
+ if (ret)
+ return ret;
+
+ return mt76x02u_mcu_wr_rp(dev, base, data + cnt, n - cnt);
+}
+
+static int
+mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
+ struct mt76_reg_pair *data, int n)
+{
+ const int CMD_RANDOM_READ = 10;
+ const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
+ struct mt76_usb *usb = &dev->usb;
+ struct sk_buff *skb;
+ int cnt, i, ret;
+
+ if (!n)
+ return 0;
+
+ cnt = min(max_vals_per_cmd, n);
+ if (cnt != n)
+ return -EINVAL;
+
+ skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+
+ for (i = 0; i < cnt; i++) {
+ skb_put_le32(skb, base + data[i].reg);
+ skb_put_le32(skb, data[i].value);
+ }
+
+ mutex_lock(&dev->mcu.mutex);
+
+ usb->mcu.rp = data;
+ usb->mcu.rp_len = n;
+ usb->mcu.base = base;
+ usb->mcu.burst = false;
+
+ ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_READ, true);
+
+ usb->mcu.rp = NULL;
+
+ mutex_unlock(&dev->mcu.mutex);
+
+ return ret;
+}
+
+void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev)
+{
+ mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0x1, 0, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_reset);
+
+static int
+__mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, u8 *data,
+ const void *fw_data, int len, u32 dst_addr)
+{
+ __le32 info;
+ u32 val;
+ int err, data_len;
+
+ info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
+ FIELD_PREP(MT_MCU_MSG_LEN, len) |
+ MT_MCU_MSG_TYPE_CMD);
+
+ memcpy(data, &info, sizeof(info));
+ memcpy(data + sizeof(info), fw_data, len);
+ memset(data + sizeof(info) + len, 0, 4);
+
+ mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
+ MT_FCE_DMA_ADDR, dst_addr);
+ len = roundup(len, 4);
+ mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
+ MT_FCE_DMA_LEN, len << 16);
+
+ data_len = MT_CMD_HDR_LEN + len + sizeof(info);
+
+ err = mt76u_bulk_msg(&dev->mt76, data, data_len, NULL, 1000,
+ MT_EP_OUT_INBAND_CMD);
+ if (err) {
+ dev_err(dev->mt76.dev, "firmware upload failed: %d\n", err);
+ return err;
+ }
+
+ val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
+ val++;
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
+
+ return 0;
+}
+
+int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
+ int data_len, u32 max_payload, u32 offset)
+{
+ int len, err = 0, pos = 0, max_len = max_payload - 8;
+ u8 *buf;
+
+ buf = kmalloc(max_payload, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ while (data_len > 0) {
+ len = min_t(int, data_len, max_len);
+ err = __mt76x02u_mcu_fw_send_data(dev, buf, data + pos,
+ len, offset + pos);
+ if (err < 0)
+ break;
+
+ data_len -= len;
+ pos += len;
+ usleep_range(5000, 10000);
+ }
+ kfree(buf);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_send_data);
+
+void mt76x02u_init_mcu(struct mt76_dev *dev)
+{
+ static const struct mt76_mcu_ops mt76x02u_mcu_ops = {
+ .headroom = MT_CMD_HDR_LEN,
+ .tailroom = 8,
+ .mcu_send_msg = mt76x02u_mcu_send_msg,
+ .mcu_wr_rp = mt76x02u_mcu_wr_rp,
+ .mcu_rd_rp = mt76x02u_mcu_rd_rp,
+ };
+
+ dev->mcu_ops = &mt76x02u_mcu_ops;
+}
+EXPORT_SYMBOL_GPL(mt76x02u_init_mcu);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
new file mode 100644
index 000000000..0f191bd28
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -0,0 +1,697 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/module.h>
+#include "mt76x02.h"
+
+#define CCK_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
+ .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \
+ .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + (_idx)), \
+}
+
+#define OFDM_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
+ .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
+}
+
+struct ieee80211_rate mt76x02_rates[] = {
+ CCK_RATE(0, 10),
+ CCK_RATE(1, 20),
+ CCK_RATE(2, 55),
+ CCK_RATE(3, 110),
+ OFDM_RATE(0, 60),
+ OFDM_RATE(1, 90),
+ OFDM_RATE(2, 120),
+ OFDM_RATE(3, 180),
+ OFDM_RATE(4, 240),
+ OFDM_RATE(5, 360),
+ OFDM_RATE(6, 480),
+ OFDM_RATE(7, 540),
+};
+EXPORT_SYMBOL_GPL(mt76x02_rates);
+
+static const struct ieee80211_iface_limit mt76x02_if_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_ADHOC)
+ }, {
+ .max = 8,
+ .types = BIT(NL80211_IFTYPE_STATION) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_AP)
+ },
+};
+
+static const struct ieee80211_iface_limit mt76x02u_if_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_ADHOC)
+ }, {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_STATION) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_AP)
+ },
+};
+
+static const struct ieee80211_iface_combination mt76x02_if_comb[] = {
+ {
+ .limits = mt76x02_if_limits,
+ .n_limits = ARRAY_SIZE(mt76x02_if_limits),
+ .max_interfaces = 8,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
+ }
+};
+
+static const struct ieee80211_iface_combination mt76x02u_if_comb[] = {
+ {
+ .limits = mt76x02u_if_limits,
+ .n_limits = ARRAY_SIZE(mt76x02u_if_limits),
+ .max_interfaces = 2,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ }
+};
+
+static void
+mt76x02_led_set_config(struct mt76_dev *mdev, u8 delay_on,
+ u8 delay_off)
+{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev,
+ mt76);
+ u32 val;
+
+ val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xff) |
+ FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
+ FIELD_PREP(MT_LED_STATUS_ON, delay_on);
+
+ mt76_wr(dev, MT_LED_S0(mdev->led_pin), val);
+ mt76_wr(dev, MT_LED_S1(mdev->led_pin), val);
+
+ val = MT_LED_CTRL_REPLAY(mdev->led_pin) |
+ MT_LED_CTRL_KICK(mdev->led_pin);
+ if (mdev->led_al)
+ val |= MT_LED_CTRL_POLARITY(mdev->led_pin);
+ mt76_wr(dev, MT_LED_CTRL, val);
+}
+
+static int
+mt76x02_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct mt76_dev *mdev = container_of(led_cdev, struct mt76_dev,
+ led_cdev);
+ u8 delta_on, delta_off;
+
+ delta_off = max_t(u8, *delay_off / 10, 1);
+ delta_on = max_t(u8, *delay_on / 10, 1);
+
+ mt76x02_led_set_config(mdev, delta_on, delta_off);
+
+ return 0;
+}
+
+static void
+mt76x02_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct mt76_dev *mdev = container_of(led_cdev, struct mt76_dev,
+ led_cdev);
+
+ if (!brightness)
+ mt76x02_led_set_config(mdev, 0, 0xff);
+ else
+ mt76x02_led_set_config(mdev, 0xff, 0);
+}
+
+void mt76x02_init_device(struct mt76x02_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct wiphy *wiphy = hw->wiphy;
+
+ INIT_DELAYED_WORK(&dev->mt76.mac_work, mt76x02_mac_work);
+
+ hw->queues = 4;
+ hw->max_rates = 1;
+ hw->max_report_rates = 7;
+ hw->max_rate_tries = 1;
+ hw->extra_tx_headroom = 2;
+
+ if (mt76_is_usb(&dev->mt76)) {
+ hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) +
+ MT_DMA_HDR_LEN;
+ wiphy->iface_combinations = mt76x02u_if_comb;
+ wiphy->n_iface_combinations = ARRAY_SIZE(mt76x02u_if_comb);
+ } else {
+ INIT_DELAYED_WORK(&dev->wdt_work, mt76x02_wdt_work);
+
+ mt76x02_dfs_init_detector(dev);
+
+ wiphy->reg_notifier = mt76x02_regd_notifier;
+ wiphy->iface_combinations = mt76x02_if_comb;
+ wiphy->n_iface_combinations = ARRAY_SIZE(mt76x02_if_comb);
+
+ /* init led callbacks */
+ if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+ dev->mt76.led_cdev.brightness_set =
+ mt76x02_led_set_brightness;
+ dev->mt76.led_cdev.blink_set = mt76x02_led_set_blink;
+ }
+ }
+
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+
+ hw->sta_data_size = sizeof(struct mt76x02_sta);
+ hw->vif_data_size = sizeof(struct mt76x02_vif);
+
+ ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+ ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+
+ dev->mt76.global_wcid.idx = 255;
+ dev->mt76.global_wcid.hw_key_idx = -1;
+ dev->slottime = 9;
+
+ if (is_mt76x2(dev)) {
+ dev->mphy.sband_2g.sband.ht_cap.cap |=
+ IEEE80211_HT_CAP_LDPC_CODING;
+ dev->mphy.sband_5g.sband.ht_cap.cap |=
+ IEEE80211_HT_CAP_LDPC_CODING;
+ dev->chainmask = 0x202;
+ dev->mphy.antenna_mask = 3;
+ } else {
+ dev->chainmask = 0x101;
+ dev->mphy.antenna_mask = 1;
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x02_init_device);
+
+void mt76x02_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct mt76x02_dev *dev = hw->priv;
+ u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+ flags |= *total_flags & FIF_##_flag; \
+ dev->mt76.rxfilter &= ~(_hw); \
+ dev->mt76.rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ } while (0)
+
+ mutex_lock(&dev->mt76.mutex);
+
+ dev->mt76.rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
+
+ MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
+ MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
+ MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
+ MT_RX_FILTR_CFG_CTS |
+ MT_RX_FILTR_CFG_CFEND |
+ MT_RX_FILTR_CFG_CFACK |
+ MT_RX_FILTR_CFG_BA |
+ MT_RX_FILTR_CFG_CTRL_RSV);
+ MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
+
+ *total_flags = flags;
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
+
+ mutex_unlock(&dev->mt76.mutex);
+}
+EXPORT_SYMBOL_GPL(mt76x02_configure_filter);
+
+int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+ int idx = 0;
+
+ memset(msta, 0, sizeof(*msta));
+
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT76x02_N_WCIDS);
+ if (idx < 0)
+ return -ENOSPC;
+
+ msta->vif = mvif;
+ msta->wcid.sta = 1;
+ msta->wcid.idx = idx;
+ msta->wcid.hw_key_idx = -1;
+ mt76x02_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
+ mt76x02_mac_wcid_set_drop(dev, idx, false);
+ ewma_pktlen_init(&msta->pktlen);
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_sta_add);
+
+void mt76x02_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+ int idx = wcid->idx;
+
+ mt76x02_mac_wcid_set_drop(dev, idx, true);
+ mt76x02_mac_wcid_setup(dev, idx, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(mt76x02_sta_remove);
+
+static void
+mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
+ unsigned int idx)
+{
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+ struct mt76_txq *mtxq;
+
+ memset(mvif, 0, sizeof(*mvif));
+
+ mvif->idx = idx;
+ mvif->group_wcid.idx = MT_VIF_WCID(idx);
+ mvif->group_wcid.hw_key_idx = -1;
+ mtxq = (struct mt76_txq *)vif->txq->drv_priv;
+ mtxq->wcid = &mvif->group_wcid;
+}
+
+int
+mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt76x02_dev *dev = hw->priv;
+ unsigned int idx = 0;
+
+ /* Allow to change address in HW if we create first interface. */
+ if (!dev->mphy.vif_mask &&
+ (((vif->addr[0] ^ dev->mt76.macaddr[0]) & ~GENMASK(4, 1)) ||
+ memcmp(vif->addr + 1, dev->mt76.macaddr + 1, ETH_ALEN - 1)))
+ mt76x02_mac_setaddr(dev, vif->addr);
+
+ if (vif->addr[0] & BIT(1))
+ idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7);
+
+ /*
+ * Client mode typically only has one configurable BSSID register,
+ * which is used for bssidx=0. This is linked to the MAC address.
+ * Since mac80211 allows changing interface types, and we cannot
+ * force the use of the primary MAC address for a station mode
+ * interface, we need some other way of configuring a per-interface
+ * remote BSSID.
+ * The hardware provides an AP-Client feature, where bssidx 0-7 are
+ * used for AP mode and bssidx 8-15 for client mode.
+ * We shift the station interface bss index by 8 to force the
+ * hardware to recognize the BSSID.
+ * The resulting bssidx mismatch for unicast frames is ignored by hw.
+ */
+ if (vif->type == NL80211_IFTYPE_STATION)
+ idx += 8;
+
+ /* vif is already set or idx is 8 for AP/Mesh/... */
+ if (dev->mphy.vif_mask & BIT(idx) ||
+ (vif->type != NL80211_IFTYPE_STATION && idx > 7))
+ return -EBUSY;
+
+ dev->mphy.vif_mask |= BIT(idx);
+
+ mt76x02_vif_init(dev, vif, idx);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_add_interface);
+
+void mt76x02_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt76x02_dev *dev = hw->priv;
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+
+ dev->mphy.vif_mask &= ~BIT(mvif->idx);
+}
+EXPORT_SYMBOL_GPL(mt76x02_remove_interface);
+
+int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ struct ieee80211_sta *sta = params->sta;
+ struct mt76x02_dev *dev = hw->priv;
+ struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
+ struct ieee80211_txq *txq = sta->txq[params->tid];
+ u16 tid = params->tid;
+ u16 ssn = params->ssn;
+ struct mt76_txq *mtxq;
+ int ret = 0;
+
+ if (!txq)
+ return -EINVAL;
+
+ mtxq = (struct mt76_txq *)txq->drv_priv;
+
+ mutex_lock(&dev->mt76.mutex);
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid,
+ ssn, params->buf_size);
+ mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+ mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
+ BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ mtxq->aggr = true;
+ mtxq->send_bar = false;
+ ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ mtxq->aggr = false;
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ mtxq->aggr = false;
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76x02_ampdu_action);
+
+int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt76x02_dev *dev = hw->priv;
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+ struct mt76x02_sta *msta;
+ struct mt76_wcid *wcid;
+ int idx = key->keyidx;
+ int ret;
+
+ /* fall back to sw encryption for unsupported ciphers */
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * The hardware does not support per-STA RX GTK, fall back
+ * to software mode for these.
+ */
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
+ (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+ key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ /*
+ * In USB AP mode, broadcast/multicast frames are setup in beacon
+ * data registers and sent via HW beacons engine, they require to
+ * be already encrypted.
+ */
+ if (mt76_is_usb(&dev->mt76) &&
+ vif->type == NL80211_IFTYPE_AP &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ /* MT76x0 GTK offloading does not work with more than one VIF */
+ if (is_mt76x0(dev) && !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ msta = sta ? (struct mt76x02_sta *)sta->drv_priv : NULL;
+ wcid = msta ? &msta->wcid : &mvif->group_wcid;
+
+ if (cmd == SET_KEY) {
+ key->hw_key_idx = wcid->idx;
+ wcid->hw_key_idx = idx;
+ if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ wcid->sw_iv = true;
+ }
+ } else {
+ if (idx == wcid->hw_key_idx) {
+ wcid->hw_key_idx = -1;
+ wcid->sw_iv = false;
+ }
+
+ key = NULL;
+ }
+ mt76_wcid_key_setup(&dev->mt76, wcid, key);
+
+ if (!msta) {
+ if (key || wcid->hw_key_idx == idx) {
+ ret = mt76x02_mac_wcid_set_key(dev, wcid->idx, key);
+ if (ret)
+ return ret;
+ }
+
+ return mt76x02_mac_shared_key_setup(dev, mvif->idx, idx, key);
+ }
+
+ return mt76x02_mac_wcid_set_key(dev, msta->wcid.idx, key);
+}
+EXPORT_SYMBOL_GPL(mt76x02_set_key);
+
+int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params)
+{
+ struct mt76x02_dev *dev = hw->priv;
+ u8 cw_min = 5, cw_max = 10, qid;
+ u32 val;
+
+ qid = dev->mt76.q_tx[queue]->hw_idx;
+
+ if (params->cw_min)
+ cw_min = fls(params->cw_min);
+ if (params->cw_max)
+ cw_max = fls(params->cw_max);
+
+ val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) |
+ FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
+ FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
+ FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
+ mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
+
+ val = mt76_rr(dev, MT_WMM_TXOP(qid));
+ val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
+ val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_TXOP(qid), val);
+
+ val = mt76_rr(dev, MT_WMM_AIFSN);
+ val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
+ val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_AIFSN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMIN);
+ val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
+ val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_CWMIN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMAX);
+ val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
+ val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_CWMAX, val);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_conf_tx);
+
+void mt76x02_set_tx_ackto(struct mt76x02_dev *dev)
+{
+ u8 ackto, sifs, slottime = dev->slottime;
+
+ /* As defined by IEEE 802.11-2007 17.3.8.6 */
+ slottime += 3 * dev->coverage_class;
+ mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
+ MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
+
+ sifs = mt76_get_field(dev, MT_XIFS_TIME_CFG,
+ MT_XIFS_TIME_CFG_OFDM_SIFS);
+
+ ackto = slottime + sifs;
+ mt76_rmw_field(dev, MT_TX_TIMEOUT_CFG,
+ MT_TX_TIMEOUT_CFG_ACKTO, ackto);
+}
+EXPORT_SYMBOL_GPL(mt76x02_set_tx_ackto);
+
+void mt76x02_set_coverage_class(struct ieee80211_hw *hw,
+ s16 coverage_class)
+{
+ struct mt76x02_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mt76.mutex);
+ dev->coverage_class = max_t(s16, coverage_class, 0);
+ mt76x02_set_tx_ackto(dev);
+ mutex_unlock(&dev->mt76.mutex);
+}
+EXPORT_SYMBOL_GPL(mt76x02_set_coverage_class);
+
+int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
+{
+ struct mt76x02_dev *dev = hw->priv;
+
+ if (val != ~0 && val > 0xffff)
+ return -EINVAL;
+
+ mutex_lock(&dev->mt76.mutex);
+ mt76x02_mac_set_rts_thresh(dev, val);
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_set_rts_threshold);
+
+void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x02_dev *dev = hw->priv;
+ struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
+ struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
+ struct ieee80211_tx_rate rate = {};
+
+ if (!rates)
+ return;
+
+ rate.idx = rates->rate[0].idx;
+ rate.flags = rates->rate[0].flags;
+ mt76x02_mac_wcid_set_rate(dev, &msta->wcid, &rate);
+}
+EXPORT_SYMBOL_GPL(mt76x02_sta_rate_tbl_update);
+
+void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len)
+{
+ int hdrlen;
+
+ if (!len)
+ return;
+
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+ memmove(skb->data + len, skb->data, hdrlen);
+ skb_pull(skb, len);
+}
+EXPORT_SYMBOL_GPL(mt76x02_remove_hdr_pad);
+
+void mt76x02_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt76x02_dev *dev = hw->priv;
+
+ clear_bit(MT76_SCANNING, &dev->mphy.state);
+ if (dev->cal.gain_init_done) {
+ /* Restore AGC gain and resume calibration after scanning. */
+ dev->cal.low_gain = -1;
+ ieee80211_queue_delayed_work(hw, &dev->cal_work, 0);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x02_sw_scan_complete);
+
+void mt76x02_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta,
+ bool ps)
+{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
+ int idx = msta->wcid.idx;
+
+ mt76_stop_tx_queues(&dev->mt76, sta, true);
+ if (mt76_is_mmio(mdev))
+ mt76x02_mac_wcid_set_drop(dev, idx, ps);
+}
+EXPORT_SYMBOL_GPL(mt76x02_sta_ps);
+
+void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u32 changed)
+{
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+ struct mt76x02_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (changed & BSS_CHANGED_BSSID)
+ mt76x02_mac_set_bssid(dev, mvif->idx, info->bssid);
+
+ if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT)
+ mt76x02_mac_set_tx_protection(dev, info->use_cts_prot,
+ info->ht_operation_mode);
+
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
+ MT_BEACON_TIME_CFG_INTVAL,
+ info->beacon_int << 4);
+ dev->mt76.beacon_int = info->beacon_int;
+ }
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED)
+ mt76x02_mac_set_beacon_enable(dev, vif, info->enable_beacon);
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE)
+ mt76x02_mac_set_short_preamble(dev, info->use_short_preamble);
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ int slottime = info->use_short_slot ? 9 : 20;
+
+ dev->slottime = slottime;
+ mt76x02_set_tx_ackto(dev);
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
+}
+EXPORT_SYMBOL_GPL(mt76x02_bss_info_changed);
+
+void mt76x02_config_mac_addr_list(struct mt76x02_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct wiphy *wiphy = hw->wiphy;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) {
+ u8 *addr = dev->macaddr_list[i].addr;
+
+ memcpy(addr, dev->mt76.macaddr, ETH_ALEN);
+
+ if (!i)
+ continue;
+
+ addr[0] |= BIT(1);
+ addr[0] ^= ((i - 1) << 2);
+ }
+ wiphy->addresses = dev->macaddr_list;
+ wiphy->n_addresses = ARRAY_SIZE(dev->macaddr_list);
+}
+EXPORT_SYMBOL_GPL(mt76x02_config_mac_addr_list);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig b/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig
new file mode 100644
index 000000000..5fd4973e3
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config MT76x2_COMMON
+ tristate
+ select MT76x02_LIB
+
+config MT76x2E
+ tristate "MediaTek MT76x2E (PCIe) support"
+ select MT76x2_COMMON
+ depends on MAC80211
+ depends on PCI
+ help
+ This adds support for MT7612/MT7602/MT7662-based wireless PCIe
+ devices, which comply with IEEE 802.11ac standards and support
+ 2SS to 866Mbit/s PHY rate.
+
+ To compile this driver as a module, choose M here.
+
+config MT76x2U
+ tristate "MediaTek MT76x2U (USB) support"
+ select MT76x2_COMMON
+ select MT76x02_USB
+ depends on MAC80211
+ depends on USB
+ help
+ This adds support for MT7612U-based wireless USB 3.0 dongles,
+ which comply with IEEE 802.11ac standards and support 2SS to
+ 866Mbit/s PHY rate.
+
+ To compile this driver as a module, choose M here.
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile b/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile
new file mode 100644
index 000000000..caf089538
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MT76x2_COMMON) += mt76x2-common.o
+obj-$(CONFIG_MT76x2E) += mt76x2e.o
+obj-$(CONFIG_MT76x2U) += mt76x2u.o
+
+mt76x2-common-y := \
+ eeprom.o mac.o init.o phy.o mcu.o
+
+mt76x2e-y := \
+ pci.o pci_main.o pci_init.o pci_mcu.o \
+ pci_phy.o
+
+mt76x2u-y := \
+ usb.o usb_init.o usb_main.o usb_mac.o usb_mcu.o \
+ usb_phy.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
new file mode 100644
index 000000000..604781160
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
@@ -0,0 +1,521 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <asm/unaligned.h>
+#include "mt76x2.h"
+#include "eeprom.h"
+
+#define EE_FIELD(_name, _value) [MT_EE_##_name] = (_value) | 1
+
+static int
+mt76x2_eeprom_get_macaddr(struct mt76x02_dev *dev)
+{
+ void *src = dev->mt76.eeprom.data + MT_EE_MAC_ADDR;
+
+ memcpy(dev->mt76.macaddr, src, ETH_ALEN);
+ return 0;
+}
+
+static bool
+mt76x2_has_cal_free_data(struct mt76x02_dev *dev, u8 *efuse)
+{
+ u16 *efuse_w = (u16 *)efuse;
+
+ if (efuse_w[MT_EE_NIC_CONF_0] != 0)
+ return false;
+
+ if (efuse_w[MT_EE_XTAL_TRIM_1] == 0xffff)
+ return false;
+
+ if (efuse_w[MT_EE_TX_POWER_DELTA_BW40] != 0)
+ return false;
+
+ if (efuse_w[MT_EE_TX_POWER_0_START_2G] == 0xffff)
+ return false;
+
+ if (efuse_w[MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA] != 0)
+ return false;
+
+ if (efuse_w[MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE] == 0xffff)
+ return false;
+
+ return true;
+}
+
+static void
+mt76x2_apply_cal_free_data(struct mt76x02_dev *dev, u8 *efuse)
+{
+#define GROUP_5G(_id) \
+ MT_EE_TX_POWER_0_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id), \
+ MT_EE_TX_POWER_0_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id) + 1, \
+ MT_EE_TX_POWER_1_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id), \
+ MT_EE_TX_POWER_1_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id) + 1
+
+ static const u8 cal_free_bytes[] = {
+ MT_EE_XTAL_TRIM_1,
+ MT_EE_TX_POWER_EXT_PA_5G + 1,
+ MT_EE_TX_POWER_0_START_2G,
+ MT_EE_TX_POWER_0_START_2G + 1,
+ MT_EE_TX_POWER_1_START_2G,
+ MT_EE_TX_POWER_1_START_2G + 1,
+ GROUP_5G(0),
+ GROUP_5G(1),
+ GROUP_5G(2),
+ GROUP_5G(3),
+ GROUP_5G(4),
+ GROUP_5G(5),
+ MT_EE_RF_2G_TSSI_OFF_TXPOWER,
+ MT_EE_RF_2G_RX_HIGH_GAIN + 1,
+ MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN,
+ MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN + 1,
+ MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN,
+ MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN + 1,
+ MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN,
+ MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN + 1,
+ };
+ struct device_node *np = dev->mt76.dev->of_node;
+ u8 *eeprom = dev->mt76.eeprom.data;
+ u8 prev_grp0[4] = {
+ eeprom[MT_EE_TX_POWER_0_START_5G],
+ eeprom[MT_EE_TX_POWER_0_START_5G + 1],
+ eeprom[MT_EE_TX_POWER_1_START_5G],
+ eeprom[MT_EE_TX_POWER_1_START_5G + 1]
+ };
+ u16 val;
+ int i;
+
+ if (!np || !of_property_read_bool(np, "mediatek,eeprom-merge-otp"))
+ return;
+
+ if (!mt76x2_has_cal_free_data(dev, efuse))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(cal_free_bytes); i++) {
+ int offset = cal_free_bytes[i];
+
+ eeprom[offset] = efuse[offset];
+ }
+
+ if (!(efuse[MT_EE_TX_POWER_0_START_5G] |
+ efuse[MT_EE_TX_POWER_0_START_5G + 1]))
+ memcpy(eeprom + MT_EE_TX_POWER_0_START_5G, prev_grp0, 2);
+ if (!(efuse[MT_EE_TX_POWER_1_START_5G] |
+ efuse[MT_EE_TX_POWER_1_START_5G + 1]))
+ memcpy(eeprom + MT_EE_TX_POWER_1_START_5G, prev_grp0 + 2, 2);
+
+ val = get_unaligned_le16(efuse + MT_EE_BT_RCAL_RESULT);
+ if (val != 0xffff)
+ eeprom[MT_EE_BT_RCAL_RESULT] = val & 0xff;
+
+ val = get_unaligned_le16(efuse + MT_EE_BT_VCDL_CALIBRATION);
+ if (val != 0xffff)
+ eeprom[MT_EE_BT_VCDL_CALIBRATION + 1] = val >> 8;
+
+ val = get_unaligned_le16(efuse + MT_EE_BT_PMUCFG);
+ if (val != 0xffff)
+ eeprom[MT_EE_BT_PMUCFG] = val & 0xff;
+}
+
+static int mt76x2_check_eeprom(struct mt76x02_dev *dev)
+{
+ u16 val = get_unaligned_le16(dev->mt76.eeprom.data);
+
+ if (!val)
+ val = get_unaligned_le16(dev->mt76.eeprom.data + MT_EE_PCI_ID);
+
+ switch (val) {
+ case 0x7662:
+ case 0x7612:
+ return 0;
+ default:
+ dev_err(dev->mt76.dev, "EEPROM data check failed: %04x\n", val);
+ return -EINVAL;
+ }
+}
+
+static int
+mt76x2_eeprom_load(struct mt76x02_dev *dev)
+{
+ void *efuse;
+ bool found;
+ int ret;
+
+ ret = mt76_eeprom_init(&dev->mt76, MT7662_EEPROM_SIZE);
+ if (ret < 0)
+ return ret;
+
+ found = ret;
+ if (found)
+ found = !mt76x2_check_eeprom(dev);
+
+ dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, MT7662_EEPROM_SIZE,
+ GFP_KERNEL);
+ dev->mt76.otp.size = MT7662_EEPROM_SIZE;
+ if (!dev->mt76.otp.data)
+ return -ENOMEM;
+
+ efuse = dev->mt76.otp.data;
+
+ if (mt76x02_get_efuse_data(dev, 0, efuse, MT7662_EEPROM_SIZE,
+ MT_EE_READ))
+ goto out;
+
+ if (found) {
+ mt76x2_apply_cal_free_data(dev, efuse);
+ } else {
+ /* FIXME: check if efuse data is complete */
+ found = true;
+ memcpy(dev->mt76.eeprom.data, efuse, MT7662_EEPROM_SIZE);
+ }
+
+out:
+ if (!found)
+ return -ENOENT;
+
+ return 0;
+}
+
+static void
+mt76x2_set_rx_gain_group(struct mt76x02_dev *dev, u8 val)
+{
+ s8 *dest = dev->cal.rx.high_gain;
+
+ if (!mt76x02_field_valid(val)) {
+ dest[0] = 0;
+ dest[1] = 0;
+ return;
+ }
+
+ dest[0] = mt76x02_sign_extend(val, 4);
+ dest[1] = mt76x02_sign_extend(val >> 4, 4);
+}
+
+static void
+mt76x2_set_rssi_offset(struct mt76x02_dev *dev, int chain, u8 val)
+{
+ s8 *dest = dev->cal.rx.rssi_offset;
+
+ if (!mt76x02_field_valid(val)) {
+ dest[chain] = 0;
+ return;
+ }
+
+ dest[chain] = mt76x02_sign_extend_optional(val, 7);
+}
+
+static enum mt76x2_cal_channel_group
+mt76x2_get_cal_channel_group(int channel)
+{
+ if (channel >= 184 && channel <= 196)
+ return MT_CH_5G_JAPAN;
+ if (channel <= 48)
+ return MT_CH_5G_UNII_1;
+ if (channel <= 64)
+ return MT_CH_5G_UNII_2;
+ if (channel <= 114)
+ return MT_CH_5G_UNII_2E_1;
+ if (channel <= 144)
+ return MT_CH_5G_UNII_2E_2;
+ return MT_CH_5G_UNII_3;
+}
+
+static u8
+mt76x2_get_5g_rx_gain(struct mt76x02_dev *dev, u8 channel)
+{
+ enum mt76x2_cal_channel_group group;
+
+ group = mt76x2_get_cal_channel_group(channel);
+ switch (group) {
+ case MT_CH_5G_JAPAN:
+ return mt76x02_eeprom_get(dev,
+ MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN);
+ case MT_CH_5G_UNII_1:
+ return mt76x02_eeprom_get(dev,
+ MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN) >> 8;
+ case MT_CH_5G_UNII_2:
+ return mt76x02_eeprom_get(dev,
+ MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN);
+ case MT_CH_5G_UNII_2E_1:
+ return mt76x02_eeprom_get(dev,
+ MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN) >> 8;
+ case MT_CH_5G_UNII_2E_2:
+ return mt76x02_eeprom_get(dev,
+ MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN);
+ default:
+ return mt76x02_eeprom_get(dev,
+ MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN) >> 8;
+ }
+}
+
+void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
+ int channel = chan->hw_value;
+ s8 lna_5g[3], lna_2g;
+ bool use_lna;
+ u8 lna = 0;
+ u16 val;
+
+ if (chan->band == NL80211_BAND_2GHZ)
+ val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN) >> 8;
+ else
+ val = mt76x2_get_5g_rx_gain(dev, channel);
+
+ mt76x2_set_rx_gain_group(dev, val);
+
+ mt76x02_get_rx_gain(dev, chan->band, &val, &lna_2g, lna_5g);
+ mt76x2_set_rssi_offset(dev, 0, val);
+ mt76x2_set_rssi_offset(dev, 1, val >> 8);
+
+ dev->cal.rx.mcu_gain = (lna_2g & 0xff);
+ dev->cal.rx.mcu_gain |= (lna_5g[0] & 0xff) << 8;
+ dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16;
+ dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24;
+
+ val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
+ if (chan->band == NL80211_BAND_2GHZ)
+ use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_2G);
+ else
+ use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_5G);
+
+ if (use_lna)
+ lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
+
+ dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8);
+}
+EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);
+
+void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76_rate_power *t,
+ struct ieee80211_channel *chan)
+{
+ bool is_5ghz;
+ u16 val;
+
+ is_5ghz = chan->band == NL80211_BAND_5GHZ;
+
+ memset(t, 0, sizeof(*t));
+
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_CCK);
+ t->cck[0] = t->cck[1] = mt76x02_rate_power_val(val);
+ t->cck[2] = t->cck[3] = mt76x02_rate_power_val(val >> 8);
+
+ if (is_5ghz)
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_6M);
+ else
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_6M);
+ t->ofdm[0] = t->ofdm[1] = mt76x02_rate_power_val(val);
+ t->ofdm[2] = t->ofdm[3] = mt76x02_rate_power_val(val >> 8);
+
+ if (is_5ghz)
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_24M);
+ else
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_24M);
+ t->ofdm[4] = t->ofdm[5] = mt76x02_rate_power_val(val);
+ t->ofdm[6] = t->ofdm[7] = mt76x02_rate_power_val(val >> 8);
+
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS0);
+ t->ht[0] = t->ht[1] = mt76x02_rate_power_val(val);
+ t->ht[2] = t->ht[3] = mt76x02_rate_power_val(val >> 8);
+
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS4);
+ t->ht[4] = t->ht[5] = mt76x02_rate_power_val(val);
+ t->ht[6] = t->ht[7] = mt76x02_rate_power_val(val >> 8);
+
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS8);
+ t->ht[8] = t->ht[9] = mt76x02_rate_power_val(val);
+ t->ht[10] = t->ht[11] = mt76x02_rate_power_val(val >> 8);
+
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS12);
+ t->ht[12] = t->ht[13] = mt76x02_rate_power_val(val);
+ t->ht[14] = t->ht[15] = mt76x02_rate_power_val(val >> 8);
+
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS0);
+ t->vht[0] = t->vht[1] = mt76x02_rate_power_val(val);
+ t->vht[2] = t->vht[3] = mt76x02_rate_power_val(val >> 8);
+
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS4);
+ t->vht[4] = t->vht[5] = mt76x02_rate_power_val(val);
+ t->vht[6] = t->vht[7] = mt76x02_rate_power_val(val >> 8);
+
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS8);
+ if (!is_5ghz)
+ val >>= 8;
+ t->vht[8] = t->vht[9] = mt76x02_rate_power_val(val >> 8);
+
+ memcpy(t->stbc, t->ht, sizeof(t->stbc[0]) * 8);
+ t->stbc[8] = t->vht[8];
+ t->stbc[9] = t->vht[9];
+}
+EXPORT_SYMBOL_GPL(mt76x2_get_rate_power);
+
+static void
+mt76x2_get_power_info_2g(struct mt76x02_dev *dev,
+ struct mt76x2_tx_power_info *t,
+ struct ieee80211_channel *chan,
+ int chain, int offset)
+{
+ int channel = chan->hw_value;
+ int delta_idx;
+ u8 data[6];
+ u16 val;
+
+ if (channel < 6)
+ delta_idx = 3;
+ else if (channel < 11)
+ delta_idx = 4;
+ else
+ delta_idx = 5;
+
+ mt76x02_eeprom_copy(dev, offset, data, sizeof(data));
+
+ t->chain[chain].tssi_slope = data[0];
+ t->chain[chain].tssi_offset = data[1];
+ t->chain[chain].target_power = data[2];
+ t->chain[chain].delta =
+ mt76x02_sign_extend_optional(data[delta_idx], 7);
+
+ val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_TSSI_OFF_TXPOWER);
+ t->target_power = val >> 8;
+}
+
+static void
+mt76x2_get_power_info_5g(struct mt76x02_dev *dev,
+ struct mt76x2_tx_power_info *t,
+ struct ieee80211_channel *chan,
+ int chain, int offset)
+{
+ int channel = chan->hw_value;
+ enum mt76x2_cal_channel_group group;
+ int delta_idx;
+ u16 val;
+ u8 data[5];
+
+ group = mt76x2_get_cal_channel_group(channel);
+ offset += group * MT_TX_POWER_GROUP_SIZE_5G;
+
+ if (channel >= 192)
+ delta_idx = 4;
+ else if (channel >= 184)
+ delta_idx = 3;
+ else if (channel < 44)
+ delta_idx = 3;
+ else if (channel < 52)
+ delta_idx = 4;
+ else if (channel < 58)
+ delta_idx = 3;
+ else if (channel < 98)
+ delta_idx = 4;
+ else if (channel < 106)
+ delta_idx = 3;
+ else if (channel < 116)
+ delta_idx = 4;
+ else if (channel < 130)
+ delta_idx = 3;
+ else if (channel < 149)
+ delta_idx = 4;
+ else if (channel < 157)
+ delta_idx = 3;
+ else
+ delta_idx = 4;
+
+ mt76x02_eeprom_copy(dev, offset, data, sizeof(data));
+
+ t->chain[chain].tssi_slope = data[0];
+ t->chain[chain].tssi_offset = data[1];
+ t->chain[chain].target_power = data[2];
+ t->chain[chain].delta =
+ mt76x02_sign_extend_optional(data[delta_idx], 7);
+
+ val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN);
+ t->target_power = val & 0xff;
+}
+
+void mt76x2_get_power_info(struct mt76x02_dev *dev,
+ struct mt76x2_tx_power_info *t,
+ struct ieee80211_channel *chan)
+{
+ u16 bw40, bw80;
+
+ memset(t, 0, sizeof(*t));
+
+ bw40 = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW40);
+ bw80 = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80);
+
+ if (chan->band == NL80211_BAND_5GHZ) {
+ bw40 >>= 8;
+ mt76x2_get_power_info_5g(dev, t, chan, 0,
+ MT_EE_TX_POWER_0_START_5G);
+ mt76x2_get_power_info_5g(dev, t, chan, 1,
+ MT_EE_TX_POWER_1_START_5G);
+ } else {
+ mt76x2_get_power_info_2g(dev, t, chan, 0,
+ MT_EE_TX_POWER_0_START_2G);
+ mt76x2_get_power_info_2g(dev, t, chan, 1,
+ MT_EE_TX_POWER_1_START_2G);
+ }
+
+ if (mt76x2_tssi_enabled(dev) ||
+ !mt76x02_field_valid(t->target_power))
+ t->target_power = t->chain[0].target_power;
+
+ t->delta_bw40 = mt76x02_rate_power_val(bw40);
+ t->delta_bw80 = mt76x02_rate_power_val(bw80);
+}
+EXPORT_SYMBOL_GPL(mt76x2_get_power_info);
+
+int mt76x2_get_temp_comp(struct mt76x02_dev *dev, struct mt76x2_temp_comp *t)
+{
+ enum nl80211_band band = dev->mphy.chandef.chan->band;
+ u16 val, slope;
+ u8 bounds;
+
+ memset(t, 0, sizeof(*t));
+
+ if (!mt76x2_temp_tx_alc_enabled(dev))
+ return -EINVAL;
+
+ if (!mt76x02_ext_pa_enabled(dev, band))
+ return -EINVAL;
+
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G) >> 8;
+ t->temp_25_ref = val & 0x7f;
+ if (band == NL80211_BAND_5GHZ) {
+ slope = mt76x02_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_5G);
+ bounds = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
+ } else {
+ slope = mt76x02_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_2G);
+ bounds = mt76x02_eeprom_get(dev,
+ MT_EE_TX_POWER_DELTA_BW80) >> 8;
+ }
+
+ t->high_slope = slope & 0xff;
+ t->low_slope = slope >> 8;
+ t->lower_bound = 0 - (bounds & 0xf);
+ t->upper_bound = (bounds >> 4) & 0xf;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_get_temp_comp);
+
+int mt76x2_eeprom_init(struct mt76x02_dev *dev)
+{
+ int ret;
+
+ ret = mt76x2_eeprom_load(dev);
+ if (ret)
+ return ret;
+
+ mt76x02_eeprom_parse_hw_cap(dev);
+ mt76x2_eeprom_get_macaddr(dev);
+ mt76_eeprom_override(&dev->mt76);
+ dev->mt76.macaddr[0] &= ~BIT(1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_eeprom_init);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
new file mode 100644
index 000000000..3755632e6
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#ifndef __MT76x2_EEPROM_H
+#define __MT76x2_EEPROM_H
+
+#include "../mt76x02_eeprom.h"
+
+enum mt76x2_cal_channel_group {
+ MT_CH_5G_JAPAN,
+ MT_CH_5G_UNII_1,
+ MT_CH_5G_UNII_2,
+ MT_CH_5G_UNII_2E_1,
+ MT_CH_5G_UNII_2E_2,
+ MT_CH_5G_UNII_3,
+ __MT_CH_MAX
+};
+
+struct mt76x2_tx_power_info {
+ u8 target_power;
+
+ s8 delta_bw40;
+ s8 delta_bw80;
+
+ struct {
+ s8 tssi_slope;
+ s8 tssi_offset;
+ s8 target_power;
+ s8 delta;
+ } chain[MT_MAX_CHAINS];
+};
+
+struct mt76x2_temp_comp {
+ u8 temp_25_ref;
+ int lower_bound; /* J */
+ int upper_bound; /* J */
+ unsigned int high_slope; /* J / dB */
+ unsigned int low_slope; /* J / dB */
+};
+
+void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76_rate_power *t,
+ struct ieee80211_channel *chan);
+void mt76x2_get_power_info(struct mt76x02_dev *dev,
+ struct mt76x2_tx_power_info *t,
+ struct ieee80211_channel *chan);
+int mt76x2_get_temp_comp(struct mt76x02_dev *dev, struct mt76x2_temp_comp *t);
+void mt76x2_read_rx_gain(struct mt76x02_dev *dev);
+
+static inline bool
+mt76x2_has_ext_lna(struct mt76x02_dev *dev)
+{
+ u32 val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
+
+ if (dev->mphy.chandef.chan->band == NL80211_BAND_2GHZ)
+ return val & MT_EE_NIC_CONF_1_LNA_EXT_2G;
+ else
+ return val & MT_EE_NIC_CONF_1_LNA_EXT_5G;
+}
+
+static inline bool
+mt76x2_temp_tx_alc_enabled(struct mt76x02_dev *dev)
+{
+ u16 val;
+
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
+ if (!(val & BIT(15)))
+ return false;
+
+ return mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
+ MT_EE_NIC_CONF_1_TEMP_TX_ALC;
+}
+
+static inline bool
+mt76x2_tssi_enabled(struct mt76x02_dev *dev)
+{
+ return !mt76x2_temp_tx_alc_enabled(dev) &&
+ (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
+ MT_EE_NIC_CONF_1_TX_ALC_EN);
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
new file mode 100644
index 000000000..a92a479ae
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include "mt76x2.h"
+#include "eeprom.h"
+#include "../mt76x02_phy.h"
+
+static void
+mt76x2_set_wlan_state(struct mt76x02_dev *dev, bool enable)
+{
+ u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+ if (enable)
+ val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
+ MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+ else
+ val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN |
+ MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+}
+
+void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable)
+{
+ u32 val;
+
+ if (!enable)
+ goto out;
+
+ val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+ val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
+
+ if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
+ val |= MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ val &= ~MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
+ }
+
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+out:
+ mt76x2_set_wlan_state(dev, enable);
+}
+EXPORT_SYMBOL_GPL(mt76x2_reset_wlan);
+
+void mt76_write_mac_initvals(struct mt76x02_dev *dev)
+{
+#define DEFAULT_PROT_CFG_CCK \
+ (FIELD_PREP(MT_PROT_CFG_RATE, 0x3) | \
+ FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
+ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) | \
+ MT_PROT_CFG_RTS_THRESH)
+
+#define DEFAULT_PROT_CFG_OFDM \
+ (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \
+ FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
+ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) | \
+ MT_PROT_CFG_RTS_THRESH)
+
+#define DEFAULT_PROT_CFG_20 \
+ (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \
+ FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \
+ FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
+ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x17))
+
+#define DEFAULT_PROT_CFG_40 \
+ (FIELD_PREP(MT_PROT_CFG_RATE, 0x2084) | \
+ FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \
+ FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
+ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f))
+
+ static const struct mt76_reg_pair vals[] = {
+ /* Copied from MediaTek reference source */
+ { MT_PBF_SYS_CTRL, 0x00080c00 },
+ { MT_PBF_CFG, 0x1efebcff },
+ { MT_FCE_PSE_CTRL, 0x00000001 },
+ { MT_MAC_SYS_CTRL, 0x00000000 },
+ { MT_MAX_LEN_CFG, 0x003e3f00 },
+ { MT_AMPDU_MAX_LEN_20M1S, 0xaaa99887 },
+ { MT_AMPDU_MAX_LEN_20M2S, 0x000000aa },
+ { MT_XIFS_TIME_CFG, 0x33a40d0a },
+ { MT_BKOFF_SLOT_CFG, 0x00000209 },
+ { MT_TBTT_SYNC_CFG, 0x00422010 },
+ { MT_PWR_PIN_CFG, 0x00000000 },
+ { 0x1238, 0x001700c8 },
+ { MT_TX_SW_CFG0, 0x00101001 },
+ { MT_TX_SW_CFG1, 0x00010000 },
+ { MT_TX_SW_CFG2, 0x00000000 },
+ { MT_TXOP_CTRL_CFG, 0x0400583f },
+ { MT_TX_RTS_CFG, 0x00ffff20 },
+ { MT_TX_TIMEOUT_CFG, 0x000a2290 },
+ { MT_TX_RETRY_CFG, 0x47f01f0f },
+ { MT_EXP_ACK_TIME, 0x002c00dc },
+ { MT_TX_PROT_CFG6, 0xe3f42004 },
+ { MT_TX_PROT_CFG7, 0xe3f42084 },
+ { MT_TX_PROT_CFG8, 0xe3f42104 },
+ { MT_PIFS_TX_CFG, 0x00060fff },
+ { MT_RX_FILTR_CFG, 0x00015f97 },
+ { MT_LEGACY_BASIC_RATE, 0x0000017f },
+ { MT_HT_BASIC_RATE, 0x00004003 },
+ { MT_PN_PAD_MODE, 0x00000003 },
+ { MT_TXOP_HLDR_ET, 0x00000002 },
+ { 0xa44, 0x00000000 },
+ { MT_HEADER_TRANS_CTRL_REG, 0x00000000 },
+ { MT_TSO_CTRL, 0x00000000 },
+ { MT_AUX_CLK_CFG, 0x00000000 },
+ { MT_DACCLK_EN_DLY_CFG, 0x00000000 },
+ { MT_TX_ALC_CFG_4, 0x00000000 },
+ { MT_TX_ALC_VGA3, 0x00000000 },
+ { MT_TX_PWR_CFG_0, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_1, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_2, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_3, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_4, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_7, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_8, 0x0000003a },
+ { MT_TX_PWR_CFG_9, 0x0000003a },
+ { MT_EFUSE_CTRL, 0x0000d000 },
+ { MT_PAUSE_ENABLE_CONTROL1, 0x0000000a },
+ { MT_FCE_WLAN_FLOW_CONTROL1, 0x60401c18 },
+ { MT_WPDMA_DELAY_INT_CFG, 0x94ff0000 },
+ { MT_TX_SW_CFG3, 0x00000004 },
+ { MT_HT_FBK_TO_LEGACY, 0x00001818 },
+ { MT_VHT_HT_FBK_CFG1, 0xedcba980 },
+ { MT_PROT_AUTO_TX_CFG, 0x00830083 },
+ { MT_HT_CTRL_CFG, 0x000001ff },
+ { MT_TX_LINK_CFG, 0x00001020 },
+ };
+ struct mt76_reg_pair prot_vals[] = {
+ { MT_CCK_PROT_CFG, DEFAULT_PROT_CFG_CCK },
+ { MT_OFDM_PROT_CFG, DEFAULT_PROT_CFG_OFDM },
+ { MT_MM20_PROT_CFG, DEFAULT_PROT_CFG_20 },
+ { MT_MM40_PROT_CFG, DEFAULT_PROT_CFG_40 },
+ { MT_GF20_PROT_CFG, DEFAULT_PROT_CFG_20 },
+ { MT_GF40_PROT_CFG, DEFAULT_PROT_CFG_40 },
+ };
+
+ mt76_wr_rp(dev, 0, vals, ARRAY_SIZE(vals));
+ mt76_wr_rp(dev, 0, prot_vals, ARRAY_SIZE(prot_vals));
+}
+EXPORT_SYMBOL_GPL(mt76_write_mac_initvals);
+
+void mt76x2_init_txpower(struct mt76x02_dev *dev,
+ struct ieee80211_supported_band *sband)
+{
+ struct ieee80211_channel *chan;
+ struct mt76x2_tx_power_info txp;
+ struct mt76_rate_power t = {};
+ int i;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ chan = &sband->channels[i];
+
+ mt76x2_get_power_info(dev, &txp, chan);
+ mt76x2_get_rate_power(dev, &t, chan);
+
+ chan->orig_mpwr = mt76x02_get_max_rate_power(&t) +
+ txp.target_power;
+ chan->orig_mpwr = DIV_ROUND_UP(chan->orig_mpwr, 2);
+
+ /* convert to combined output power on 2x2 devices */
+ chan->orig_mpwr += 3;
+ chan->max_power = min_t(int, chan->max_reg_power,
+ chan->orig_mpwr);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x2_init_txpower);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c
new file mode 100644
index 000000000..e08740ca3
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include "mt76x2.h"
+
+void mt76x2_mac_stop(struct mt76x02_dev *dev, bool force)
+{
+ bool stopped = false;
+ u32 rts_cfg;
+ int i;
+
+ mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
+ mt76_clear(dev, MT_TXOP_HLDR_ET, MT_TXOP_HLDR_TX40M_BLK_EN);
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
+
+ rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
+
+ /* Wait for MAC to become idle */
+ for (i = 0; i < 300; i++) {
+ if ((mt76_rr(dev, MT_MAC_STATUS) &
+ (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) ||
+ mt76_rr(dev, MT_BBP(IBI, 12))) {
+ udelay(1);
+ continue;
+ }
+
+ stopped = true;
+ break;
+ }
+
+ if (force && !stopped) {
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
+
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+ }
+
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_stop);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h
new file mode 100644
index 000000000..d5c3d26b9
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#ifndef __MT76x2_MAC_H
+#define __MT76x2_MAC_H
+
+#include "mt76x2.h"
+
+struct mt76x02_dev;
+struct mt76x2_sta;
+struct mt76x02_vif;
+
+void mt76x2_mac_stop(struct mt76x02_dev *dev, bool force);
+
+static inline void mt76x2_mac_resume(struct mt76x02_dev *dev)
+{
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX |
+ MT_MAC_SYS_CTRL_ENABLE_RX);
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
new file mode 100644
index 000000000..9635c04ce
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+
+#include "mt76x2.h"
+#include "mcu.h"
+#include "eeprom.h"
+
+int mt76x2_mcu_set_channel(struct mt76x02_dev *dev, u8 channel, u8 bw,
+ u8 bw_index, bool scan)
+{
+ struct {
+ u8 idx;
+ u8 scan;
+ u8 bw;
+ u8 _pad0;
+
+ __le16 chainmask;
+ u8 ext_chan;
+ u8 _pad1;
+
+ } __packed __aligned(4) msg = {
+ .idx = channel,
+ .scan = scan,
+ .bw = bw,
+ .chainmask = cpu_to_le16(dev->chainmask),
+ };
+
+ /* first set the channel without the extension channel info */
+ mt76_mcu_send_msg(dev, CMD_SWITCH_CHANNEL_OP, &msg, sizeof(msg), true);
+
+ usleep_range(5000, 10000);
+
+ msg.ext_chan = 0xe0 + bw_index;
+ return mt76_mcu_send_msg(dev, CMD_SWITCH_CHANNEL_OP, &msg, sizeof(msg),
+ true);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mcu_set_channel);
+
+int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
+ u8 channel)
+{
+ struct {
+ u8 cr_mode;
+ u8 temp;
+ u8 ch;
+ u8 _pad0;
+
+ __le32 cfg;
+ } __packed __aligned(4) msg = {
+ .cr_mode = type,
+ .temp = temp_level,
+ .ch = channel,
+ };
+ u32 val;
+
+ val = BIT(31);
+ val |= (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
+ val |= (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
+ msg.cfg = cpu_to_le32(val);
+
+ /* first set the channel without the extension channel info */
+ return mt76_mcu_send_msg(dev, CMD_LOAD_CR, &msg, sizeof(msg), true);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mcu_load_cr);
+
+int mt76x2_mcu_init_gain(struct mt76x02_dev *dev, u8 channel, u32 gain,
+ bool force)
+{
+ struct {
+ __le32 channel;
+ __le32 gain_val;
+ } __packed __aligned(4) msg = {
+ .channel = cpu_to_le32(channel),
+ .gain_val = cpu_to_le32(gain),
+ };
+
+ if (force)
+ msg.channel |= cpu_to_le32(BIT(31));
+
+ return mt76_mcu_send_msg(dev, CMD_INIT_GAIN_OP, &msg, sizeof(msg),
+ true);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mcu_init_gain);
+
+int mt76x2_mcu_tssi_comp(struct mt76x02_dev *dev,
+ struct mt76x2_tssi_comp *tssi_data)
+{
+ struct {
+ __le32 id;
+ struct mt76x2_tssi_comp data;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(MCU_CAL_TSSI_COMP),
+ .data = *tssi_data,
+ };
+
+ return mt76_mcu_send_msg(dev, CMD_CALIBRATION_OP, &msg, sizeof(msg),
+ true);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mcu_tssi_comp);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h
new file mode 100644
index 000000000..41fd66563
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#ifndef __MT76x2_MCU_H
+#define __MT76x2_MCU_H
+
+#include "../mt76x02_mcu.h"
+
+/* Register definitions */
+#define MT_MCU_CPU_CTL 0x0704
+#define MT_MCU_CLOCK_CTL 0x0708
+#define MT_MCU_PCIE_REMAP_BASE1 0x0740
+#define MT_MCU_PCIE_REMAP_BASE2 0x0744
+#define MT_MCU_PCIE_REMAP_BASE3 0x0748
+
+#define MT_MCU_ROM_PATCH_OFFSET 0x80000
+#define MT_MCU_ROM_PATCH_ADDR 0x90000
+
+#define MT_MCU_ILM_OFFSET 0x80000
+
+#define MT_MCU_DLM_OFFSET 0x100000
+#define MT_MCU_DLM_ADDR 0x90000
+#define MT_MCU_DLM_ADDR_E3 0x90800
+
+enum mcu_calibration {
+ MCU_CAL_R = 1,
+ MCU_CAL_TEMP_SENSOR,
+ MCU_CAL_RXDCOC,
+ MCU_CAL_RC,
+ MCU_CAL_SX_LOGEN,
+ MCU_CAL_LC,
+ MCU_CAL_TX_LOFT,
+ MCU_CAL_TXIQ,
+ MCU_CAL_TSSI,
+ MCU_CAL_TSSI_COMP,
+ MCU_CAL_DPD,
+ MCU_CAL_RXIQC_FI,
+ MCU_CAL_RXIQC_FD,
+ MCU_CAL_PWRON,
+ MCU_CAL_TX_SHAPING,
+};
+
+enum mt76x2_mcu_cr_mode {
+ MT_RF_CR,
+ MT_BBP_CR,
+ MT_RF_BBP_CR,
+ MT_HL_TEMP_CR_UPDATE,
+};
+
+struct mt76x2_tssi_comp {
+ u8 pa_mode;
+ u8 cal_mode;
+ u16 pad;
+
+ u8 slope0;
+ u8 slope1;
+ u8 offset0;
+ u8 offset1;
+} __packed __aligned(4);
+
+int mt76x2_mcu_tssi_comp(struct mt76x02_dev *dev,
+ struct mt76x2_tssi_comp *tssi_data);
+int mt76x2_mcu_init_gain(struct mt76x02_dev *dev, u8 channel, u32 gain,
+ bool force);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
new file mode 100644
index 000000000..d01f47c83
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#ifndef __MT76x2_H
+#define __MT76x2_H
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+
+#define MT7662_FIRMWARE "mt7662.bin"
+#define MT7662_ROM_PATCH "mt7662_rom_patch.bin"
+#define MT7662_EEPROM_SIZE 512
+
+#include "../mt76x02.h"
+#include "mac.h"
+
+static inline bool is_mt7612(struct mt76x02_dev *dev)
+{
+ return mt76_chip(&dev->mt76) == 0x7612;
+}
+
+static inline bool mt76x2_channel_silent(struct mt76x02_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
+
+ return ((chan->flags & IEEE80211_CHAN_RADAR) &&
+ chan->dfs_state != NL80211_DFS_AVAILABLE);
+}
+
+extern const struct ieee80211_ops mt76x2_ops;
+
+int mt76x2_register_device(struct mt76x02_dev *dev);
+int mt76x2_resume_device(struct mt76x02_dev *dev);
+
+void mt76x2_phy_power_on(struct mt76x02_dev *dev);
+void mt76x2_stop_hardware(struct mt76x02_dev *dev);
+int mt76x2_eeprom_init(struct mt76x02_dev *dev);
+int mt76x2_apply_calibration_data(struct mt76x02_dev *dev, int channel);
+
+void mt76x2_phy_set_antenna(struct mt76x02_dev *dev);
+int mt76x2_phy_start(struct mt76x02_dev *dev);
+int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
+ struct cfg80211_chan_def *chandef);
+void mt76x2_phy_calibrate(struct work_struct *work);
+void mt76x2_phy_set_txpower(struct mt76x02_dev *dev);
+
+int mt76x2_mcu_init(struct mt76x02_dev *dev);
+int mt76x2_mcu_set_channel(struct mt76x02_dev *dev, u8 channel, u8 bw,
+ u8 bw_index, bool scan);
+int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
+ u8 channel);
+
+void mt76x2_cleanup(struct mt76x02_dev *dev);
+
+int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard);
+void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable);
+void mt76x2_init_txpower(struct mt76x02_dev *dev,
+ struct ieee80211_supported_band *sband);
+void mt76_write_mac_initvals(struct mt76x02_dev *dev);
+
+void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev);
+void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
+ enum nl80211_band band);
+void mt76x2_configure_tx_delay(struct mt76x02_dev *dev,
+ enum nl80211_band band, u8 bw);
+void mt76x2_apply_gain_adj(struct mt76x02_dev *dev);
+void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h
new file mode 100644
index 000000000..f9d37c6cf
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#ifndef __MT76x2U_H
+#define __MT76x2U_H
+
+#include <linux/device.h>
+
+#include "mt76x2.h"
+#include "mcu.h"
+
+#define MT7612U_EEPROM_SIZE 512
+
+#define MT_USB_AGGR_SIZE_LIMIT 21 /* 1024B unit */
+#define MT_USB_AGGR_TIMEOUT 0x80 /* 33ns unit */
+
+extern const struct ieee80211_ops mt76x2u_ops;
+
+int mt76x2u_register_device(struct mt76x02_dev *dev);
+int mt76x2u_init_hardware(struct mt76x02_dev *dev);
+void mt76x2u_cleanup(struct mt76x02_dev *dev);
+void mt76x2u_stop_hw(struct mt76x02_dev *dev);
+
+int mt76x2u_mac_reset(struct mt76x02_dev *dev);
+int mt76x2u_mac_stop(struct mt76x02_dev *dev);
+
+int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
+ struct cfg80211_chan_def *chandef);
+void mt76x2u_phy_calibrate(struct work_struct *work);
+
+void mt76x2u_mcu_complete_urb(struct urb *urb);
+int mt76x2u_mcu_init(struct mt76x02_dev *dev);
+int mt76x2u_mcu_fw_init(struct mt76x02_dev *dev);
+
+int mt76x2u_alloc_queues(struct mt76x02_dev *dev);
+void mt76x2u_queues_deinit(struct mt76x02_dev *dev);
+void mt76x2u_stop_queues(struct mt76x02_dev *dev);
+int mt76x2u_skb_dma_info(struct sk_buff *skb, enum dma_msg_port port,
+ u32 flags);
+
+#endif /* __MT76x2U_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
new file mode 100644
index 000000000..e57e49a72
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mt76x2.h"
+
+static const struct pci_device_id mt76x2e_device_table[] = {
+ { PCI_DEVICE(0x14c3, 0x7662) },
+ { PCI_DEVICE(0x14c3, 0x7612) },
+ { PCI_DEVICE(0x14c3, 0x7602) },
+ { },
+};
+
+static int
+mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ .txwi_size = sizeof(struct mt76x02_txwi),
+ .drv_flags = MT_DRV_TX_ALIGNED4_SKBS |
+ MT_DRV_SW_RX_AIRTIME,
+ .survey_flags = SURVEY_INFO_TIME_TX,
+ .update_survey = mt76x02_update_channel,
+ .tx_prepare_skb = mt76x02_tx_prepare_skb,
+ .tx_complete_skb = mt76x02_tx_complete_skb,
+ .rx_skb = mt76x02_queue_rx_skb,
+ .rx_poll_complete = mt76x02_rx_poll_complete,
+ .sta_ps = mt76x02_sta_ps,
+ .sta_add = mt76x02_sta_add,
+ .sta_remove = mt76x02_sta_remove,
+ };
+ struct mt76x02_dev *dev;
+ struct mt76_dev *mdev;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt76x2_ops,
+ &drv_ops);
+ if (!mdev)
+ return -ENOMEM;
+
+ dev = container_of(mdev, struct mt76x02_dev, mt76);
+ mt76_mmio_init(mdev, pcim_iomap_table(pdev)[0]);
+ mt76x2_reset_wlan(dev, false);
+
+ mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
+ dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
+
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
+ ret = devm_request_irq(mdev->dev, pdev->irq, mt76x02_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+ goto error;
+
+ ret = mt76x2_register_device(dev);
+ if (ret)
+ goto error;
+
+ /* Fix up ASPM configuration */
+
+ /* RG_SSUSB_G1_CDR_BIR_LTR = 0x9 */
+ mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);
+
+ /* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
+ mt76_rmw_field(dev, 0x15a0c, 0xfU << 28, 0xf);
+
+ /* RG_SSUSB_CDR_BR_PE1D = 0x3 */
+ mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
+
+ mt76_pci_disable_aspm(pdev);
+
+ return 0;
+
+error:
+ mt76_free_device(&dev->mt76);
+
+ return ret;
+}
+
+static void
+mt76x2e_remove(struct pci_dev *pdev)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+
+ mt76_unregister_device(mdev);
+ mt76x2_cleanup(dev);
+ mt76_free_device(mdev);
+}
+
+static int __maybe_unused
+mt76x2e_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ int i, err;
+
+ napi_disable(&mdev->tx_napi);
+ tasklet_kill(&mdev->pre_tbtt_tasklet);
+ mt76_worker_disable(&mdev->tx_worker);
+
+ mt76_for_each_q_rx(mdev, i)
+ napi_disable(&mdev->napi[i]);
+
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), true);
+ pci_save_state(pdev);
+ err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ if (err)
+ goto restore;
+
+ return 0;
+
+restore:
+ mt76_for_each_q_rx(mdev, i)
+ napi_enable(&mdev->napi[i]);
+ napi_enable(&mdev->tx_napi);
+
+ return err;
+}
+
+static int __maybe_unused
+mt76x2e_resume(struct pci_dev *pdev)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ int i, err;
+
+ err = pci_set_power_state(pdev, PCI_D0);
+ if (err)
+ return err;
+
+ pci_restore_state(pdev);
+
+ mt76_worker_enable(&mdev->tx_worker);
+ mt76_for_each_q_rx(mdev, i) {
+ napi_enable(&mdev->napi[i]);
+ napi_schedule(&mdev->napi[i]);
+ }
+ napi_enable(&mdev->tx_napi);
+ napi_schedule(&mdev->tx_napi);
+
+ return mt76x2_resume_device(dev);
+}
+
+MODULE_DEVICE_TABLE(pci, mt76x2e_device_table);
+MODULE_FIRMWARE(MT7662_FIRMWARE);
+MODULE_FIRMWARE(MT7662_ROM_PATCH);
+MODULE_LICENSE("Dual BSD/GPL");
+
+static struct pci_driver mt76pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt76x2e_device_table,
+ .probe = mt76x2e_probe,
+ .remove = mt76x2e_remove,
+#ifdef CONFIG_PM
+ .suspend = mt76x2e_suspend,
+ .resume = mt76x2e_resume,
+#endif /* CONFIG_PM */
+};
+
+module_pci_driver(mt76pci_driver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
new file mode 100644
index 000000000..48a3ebc98
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/delay.h>
+#include "mt76x2.h"
+#include "eeprom.h"
+#include "mcu.h"
+#include "../mt76x02_mac.h"
+
+static void
+mt76x2_mac_pbf_init(struct mt76x02_dev *dev)
+{
+ u32 val;
+
+ val = MT_PBF_SYS_CTRL_MCU_RESET |
+ MT_PBF_SYS_CTRL_DMA_RESET |
+ MT_PBF_SYS_CTRL_MAC_RESET |
+ MT_PBF_SYS_CTRL_PBF_RESET |
+ MT_PBF_SYS_CTRL_ASY_RESET;
+
+ mt76_set(dev, MT_PBF_SYS_CTRL, val);
+ mt76_clear(dev, MT_PBF_SYS_CTRL, val);
+
+ mt76_wr(dev, MT_PBF_TX_MAX_PCNT, 0xefef3f1f);
+ mt76_wr(dev, MT_PBF_RX_MAX_PCNT, 0xfebf);
+}
+
+static void
+mt76x2_fixup_xtal(struct mt76x02_dev *dev)
+{
+ u16 eep_val;
+ s8 offset = 0;
+
+ eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
+
+ offset = eep_val & 0x7f;
+ if ((eep_val & 0xff) == 0xff)
+ offset = 0;
+ else if (eep_val & 0x80)
+ offset = 0 - offset;
+
+ eep_val >>= 8;
+ if (eep_val == 0x00 || eep_val == 0xff) {
+ eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
+ eep_val &= 0xff;
+
+ if (eep_val == 0x00 || eep_val == 0xff)
+ eep_val = 0x14;
+ }
+
+ eep_val &= 0x7f;
+ mt76_rmw_field(dev, MT_XO_CTRL5, MT_XO_CTRL5_C2_VAL, eep_val + offset);
+ mt76_set(dev, MT_XO_CTRL6, MT_XO_CTRL6_C2_CTRL);
+
+ eep_val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
+ switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
+ case 0:
+ mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
+ break;
+ case 1:
+ mt76_wr(dev, MT_XO_CTRL7, 0x5c1feed0);
+ break;
+ default:
+ break;
+ }
+}
+
+int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
+{
+ const u8 *macaddr = dev->mt76.macaddr;
+ u32 val;
+ int i, k;
+
+ if (!mt76x02_wait_for_mac(&dev->mt76))
+ return -ETIMEDOUT;
+
+ val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
+
+ val &= ~(MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_EN |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_DMA_BURST_SIZE);
+ val |= FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3);
+
+ mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
+
+ mt76x2_mac_pbf_init(dev);
+ mt76_write_mac_initvals(dev);
+ mt76x2_fixup_xtal(dev);
+
+ mt76_clear(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_RESET_CSR |
+ MT_MAC_SYS_CTRL_RESET_BBP);
+
+ if (is_mt7612(dev))
+ mt76_clear(dev, MT_COEXCFG0, MT_COEXCFG0_COEX_EN);
+
+ mt76_set(dev, MT_EXT_CCA_CFG, 0x0000f000);
+ mt76_clear(dev, MT_TX_ALC_CFG_4, BIT(31));
+
+ mt76_wr(dev, MT_RF_BYPASS_0, 0x06000000);
+ mt76_wr(dev, MT_RF_SETTING_0, 0x08800000);
+ usleep_range(5000, 10000);
+ mt76_wr(dev, MT_RF_BYPASS_0, 0x00000000);
+
+ mt76_wr(dev, MT_MCU_CLOCK_CTL, 0x1401);
+ mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
+
+ mt76x02_mac_setaddr(dev, macaddr);
+ mt76x02e_init_beacon_config(dev);
+ if (!hard)
+ return 0;
+
+ for (i = 0; i < 256 / 32; i++)
+ mt76_wr(dev, MT_WCID_DROP_BASE + i * 4, 0);
+
+ for (i = 0; i < 256; i++) {
+ mt76x02_mac_wcid_setup(dev, i, 0, NULL);
+ mt76_wr(dev, MT_WCID_TX_RATE(i), 0);
+ mt76_wr(dev, MT_WCID_TX_RATE(i) + 4, 0);
+ }
+
+ for (i = 0; i < MT_MAX_VIFS; i++)
+ mt76x02_mac_wcid_setup(dev, MT_VIF_WCID(i), i, NULL);
+
+ for (i = 0; i < 16; i++)
+ for (k = 0; k < 4; k++)
+ mt76x02_mac_shared_key_setup(dev, i, k, NULL);
+
+ for (i = 0; i < 16; i++)
+ mt76_rr(dev, MT_TX_STAT_FIFO);
+
+ mt76x02_set_tx_ackto(dev);
+
+ return 0;
+}
+
+static void
+mt76x2_power_on_rf_patch(struct mt76x02_dev *dev)
+{
+ mt76_set(dev, 0x10130, BIT(0) | BIT(16));
+ udelay(1);
+
+ mt76_clear(dev, 0x1001c, 0xff);
+ mt76_set(dev, 0x1001c, 0x30);
+
+ mt76_wr(dev, 0x10014, 0x484f);
+ udelay(1);
+
+ mt76_set(dev, 0x10130, BIT(17));
+ udelay(125);
+
+ mt76_clear(dev, 0x10130, BIT(16));
+ udelay(50);
+
+ mt76_set(dev, 0x1014c, BIT(19) | BIT(20));
+}
+
+static void
+mt76x2_power_on_rf(struct mt76x02_dev *dev, int unit)
+{
+ int shift = unit ? 8 : 0;
+
+ /* Enable RF BG */
+ mt76_set(dev, 0x10130, BIT(0) << shift);
+ udelay(10);
+
+ /* Enable RFDIG LDO/AFE/ABB/ADDA */
+ mt76_set(dev, 0x10130, (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift);
+ udelay(10);
+
+ /* Switch RFDIG power to internal LDO */
+ mt76_clear(dev, 0x10130, BIT(2) << shift);
+ udelay(10);
+
+ mt76x2_power_on_rf_patch(dev);
+
+ mt76_set(dev, 0x530, 0xf);
+}
+
+static void
+mt76x2_power_on(struct mt76x02_dev *dev)
+{
+ u32 val;
+
+ /* Turn on WL MTCMOS */
+ mt76_set(dev, MT_WLAN_MTC_CTRL, MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP);
+
+ val = MT_WLAN_MTC_CTRL_STATE_UP |
+ MT_WLAN_MTC_CTRL_PWR_ACK |
+ MT_WLAN_MTC_CTRL_PWR_ACK_S;
+
+ mt76_poll(dev, MT_WLAN_MTC_CTRL, val, val, 1000);
+
+ mt76_clear(dev, MT_WLAN_MTC_CTRL, 0x7f << 16);
+ udelay(10);
+
+ mt76_clear(dev, MT_WLAN_MTC_CTRL, 0xf << 24);
+ udelay(10);
+
+ mt76_set(dev, MT_WLAN_MTC_CTRL, 0xf << 24);
+ mt76_clear(dev, MT_WLAN_MTC_CTRL, 0xfff);
+
+ /* Turn on AD/DA power down */
+ mt76_clear(dev, 0x11204, BIT(3));
+
+ /* WLAN function enable */
+ mt76_set(dev, 0x10080, BIT(0));
+
+ /* Release BBP software reset */
+ mt76_clear(dev, 0x10064, BIT(18));
+
+ mt76x2_power_on_rf(dev, 0);
+ mt76x2_power_on_rf(dev, 1);
+}
+
+int mt76x2_resume_device(struct mt76x02_dev *dev)
+{
+ int err;
+
+ mt76x02_dma_disable(dev);
+ mt76x2_reset_wlan(dev, true);
+ mt76x2_power_on(dev);
+
+ err = mt76x2_mac_reset(dev, true);
+ if (err)
+ return err;
+
+ mt76x02_mac_start(dev);
+
+ return mt76x2_mcu_init(dev);
+}
+
+static int mt76x2_init_hardware(struct mt76x02_dev *dev)
+{
+ int ret;
+
+ mt76x02_dma_disable(dev);
+ mt76x2_reset_wlan(dev, true);
+ mt76x2_power_on(dev);
+
+ ret = mt76x2_eeprom_init(dev);
+ if (ret)
+ return ret;
+
+ ret = mt76x2_mac_reset(dev, true);
+ if (ret)
+ return ret;
+
+ dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
+
+ ret = mt76x02_dma_init(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+ mt76x02_mac_start(dev);
+
+ ret = mt76x2_mcu_init(dev);
+ if (ret)
+ return ret;
+
+ mt76x2_mac_stop(dev, false);
+
+ return 0;
+}
+
+void mt76x2_stop_hardware(struct mt76x02_dev *dev)
+{
+ cancel_delayed_work_sync(&dev->cal_work);
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
+ cancel_delayed_work_sync(&dev->wdt_work);
+ clear_bit(MT76_RESTART, &dev->mphy.state);
+ mt76x02_mcu_set_radio_state(dev, false);
+ mt76x2_mac_stop(dev, false);
+}
+
+void mt76x2_cleanup(struct mt76x02_dev *dev)
+{
+ tasklet_disable(&dev->dfs_pd.dfs_tasklet);
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
+ mt76x2_stop_hardware(dev);
+ mt76_dma_cleanup(&dev->mt76);
+ mt76x02_mcu_cleanup(dev);
+}
+
+int mt76x2_register_device(struct mt76x02_dev *dev)
+{
+ int ret;
+
+ INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate);
+
+ mt76x02_init_device(dev);
+
+ ret = mt76x2_init_hardware(dev);
+ if (ret)
+ return ret;
+
+ mt76x02_config_mac_addr_list(dev);
+
+ ret = mt76_register_device(&dev->mt76, true, mt76x02_rates,
+ ARRAY_SIZE(mt76x02_rates));
+ if (ret)
+ goto fail;
+
+ mt76x02_init_debugfs(dev);
+ mt76x2_init_txpower(dev, &dev->mphy.sband_2g.sband);
+ mt76x2_init_txpower(dev, &dev->mphy.sband_5g.sband);
+
+ return 0;
+
+fail:
+ mt76x2_stop_hardware(dev);
+ return ret;
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
new file mode 100644
index 000000000..98f4cf398
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include "mt76x2.h"
+#include "../mt76x02_mac.h"
+
+static int
+mt76x2_start(struct ieee80211_hw *hw)
+{
+ struct mt76x02_dev *dev = hw->priv;
+
+ mt76x02_mac_start(dev);
+ mt76x2_phy_start(dev);
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+ MT_MAC_WORK_INTERVAL);
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work,
+ MT_WATCHDOG_TIME);
+
+ set_bit(MT76_STATE_RUNNING, &dev->mphy.state);
+ return 0;
+}
+
+static void
+mt76x2_stop(struct ieee80211_hw *hw)
+{
+ struct mt76x02_dev *dev = hw->priv;
+
+ clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
+ mt76x2_stop_hardware(dev);
+}
+
+static void
+mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
+{
+ cancel_delayed_work_sync(&dev->cal_work);
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
+ tasklet_disable(&dev->dfs_pd.dfs_tasklet);
+
+ mutex_lock(&dev->mt76.mutex);
+ set_bit(MT76_RESET, &dev->mphy.state);
+
+ mt76_set_channel(&dev->mphy);
+
+ mt76x2_mac_stop(dev, true);
+ mt76x2_phy_set_channel(dev, chandef);
+
+ mt76x02_mac_cc_reset(dev);
+ mt76x02_dfs_init_params(dev);
+
+ mt76x2_mac_resume(dev);
+
+ clear_bit(MT76_RESET, &dev->mphy.state);
+ mutex_unlock(&dev->mt76.mutex);
+
+ tasklet_enable(&dev->dfs_pd.dfs_tasklet);
+ tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
+
+ mt76_txq_schedule_all(&dev->mphy);
+}
+
+static int
+mt76x2_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct mt76x02_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+ dev->mt76.rxfilter |= MT_RX_FILTR_CFG_PROMISC;
+ else
+ dev->mt76.rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
+
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ dev->txpower_conf = hw->conf.power_level * 2;
+
+ /* convert to per-chain power for 2x2 devices */
+ dev->txpower_conf -= 6;
+
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) {
+ mt76x2_phy_set_txpower(dev);
+ mt76x02_tx_set_txpwr_auto(dev, dev->txpower_conf);
+ }
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ieee80211_stop_queues(hw);
+ mt76x2_set_channel(dev, &hw->conf.chandef);
+ ieee80211_wake_queues(hw);
+ }
+
+ return 0;
+}
+
+static void
+mt76x2_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+}
+
+static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant,
+ u32 rx_ant)
+{
+ struct mt76x02_dev *dev = hw->priv;
+
+ if (!tx_ant || tx_ant > 3 || tx_ant != rx_ant)
+ return -EINVAL;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ dev->chainmask = (tx_ant == 3) ? 0x202 : 0x101;
+ dev->mphy.antenna_mask = tx_ant;
+
+ mt76_set_stream_caps(&dev->mphy, true);
+ mt76x2_phy_set_antenna(dev);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+const struct ieee80211_ops mt76x2_ops = {
+ .tx = mt76x02_tx,
+ .start = mt76x2_start,
+ .stop = mt76x2_stop,
+ .add_interface = mt76x02_add_interface,
+ .remove_interface = mt76x02_remove_interface,
+ .config = mt76x2_config,
+ .configure_filter = mt76x02_configure_filter,
+ .bss_info_changed = mt76x02_bss_info_changed,
+ .sta_state = mt76_sta_state,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
+ .set_key = mt76x02_set_key,
+ .conf_tx = mt76x02_conf_tx,
+ .sw_scan_start = mt76_sw_scan,
+ .sw_scan_complete = mt76x02_sw_scan_complete,
+ .flush = mt76x2_flush,
+ .ampdu_action = mt76x02_ampdu_action,
+ .get_txpower = mt76_get_txpower,
+ .wake_tx_queue = mt76_wake_tx_queue,
+ .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
+ .release_buffered_frames = mt76_release_buffered_frames,
+ .set_coverage_class = mt76x02_set_coverage_class,
+ .get_survey = mt76_get_survey,
+ .set_tim = mt76_set_tim,
+ .set_antenna = mt76x2_set_antenna,
+ .get_antenna = mt76_get_antenna,
+ .set_rts_threshold = mt76x02_set_rts_threshold,
+ .reconfig_complete = mt76x02_reconfig_complete,
+};
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
new file mode 100644
index 000000000..ca6f96841
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+
+#include "mt76x2.h"
+#include "mcu.h"
+#include "eeprom.h"
+
+static int
+mt76pci_load_rom_patch(struct mt76x02_dev *dev)
+{
+ const struct firmware *fw = NULL;
+ struct mt76x02_patch_header *hdr;
+ bool rom_protect = !is_mt7612(dev);
+ int len, ret = 0;
+ __le32 *cur;
+ u32 patch_mask, patch_reg;
+
+ if (rom_protect && !mt76_poll(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) {
+ dev_err(dev->mt76.dev,
+ "Could not get hardware semaphore for ROM PATCH\n");
+ return -ETIMEDOUT;
+ }
+
+ if (mt76xx_rev(dev) >= MT76XX_REV_E3) {
+ patch_mask = BIT(0);
+ patch_reg = MT_MCU_CLOCK_CTL;
+ } else {
+ patch_mask = BIT(1);
+ patch_reg = MT_MCU_COM_REG0;
+ }
+
+ if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) {
+ dev_info(dev->mt76.dev, "ROM patch already applied\n");
+ goto out;
+ }
+
+ ret = request_firmware(&fw, MT7662_ROM_PATCH, dev->mt76.dev);
+ if (ret)
+ goto out;
+
+ if (!fw || !fw->data || fw->size <= sizeof(*hdr)) {
+ ret = -EIO;
+ dev_err(dev->mt76.dev, "Failed to load firmware\n");
+ goto out;
+ }
+
+ hdr = (struct mt76x02_patch_header *)fw->data;
+ dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time);
+
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ROM_PATCH_OFFSET);
+
+ cur = (__le32 *)(fw->data + sizeof(*hdr));
+ len = fw->size - sizeof(*hdr);
+ mt76_wr_copy(dev, MT_MCU_ROM_PATCH_ADDR, cur, len);
+
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
+
+ /* Trigger ROM */
+ mt76_wr(dev, MT_MCU_INT_LEVEL, 4);
+
+ if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 2000)) {
+ dev_err(dev->mt76.dev, "Failed to load ROM patch\n");
+ ret = -ETIMEDOUT;
+ }
+
+out:
+ /* release semaphore */
+ if (rom_protect)
+ mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1);
+ release_firmware(fw);
+ return ret;
+}
+
+static int
+mt76pci_load_firmware(struct mt76x02_dev *dev)
+{
+ const struct firmware *fw;
+ const struct mt76x02_fw_header *hdr;
+ int len, ret;
+ __le32 *cur;
+ u32 offset, val;
+
+ ret = request_firmware(&fw, MT7662_FIRMWARE, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr))
+ goto error;
+
+ hdr = (const struct mt76x02_fw_header *)fw->data;
+
+ len = sizeof(*hdr);
+ len += le32_to_cpu(hdr->ilm_len);
+ len += le32_to_cpu(hdr->dlm_len);
+
+ if (fw->size != len)
+ goto error;
+
+ val = le16_to_cpu(hdr->fw_ver);
+ dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
+ (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
+
+ val = le16_to_cpu(hdr->build_ver);
+ dev_info(dev->mt76.dev, "Build: %x\n", val);
+ dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
+
+ cur = (__le32 *)(fw->data + sizeof(*hdr));
+ len = le32_to_cpu(hdr->ilm_len);
+
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ILM_OFFSET);
+ mt76_wr_copy(dev, MT_MCU_ILM_ADDR, cur, len);
+
+ cur += len / sizeof(*cur);
+ len = le32_to_cpu(hdr->dlm_len);
+
+ if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+ offset = MT_MCU_DLM_ADDR_E3;
+ else
+ offset = MT_MCU_DLM_ADDR;
+
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_DLM_OFFSET);
+ mt76_wr_copy(dev, offset, cur, len);
+
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
+
+ val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
+ if (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1)
+ mt76_set(dev, MT_MCU_COM_REG0, BIT(30));
+
+ /* trigger firmware */
+ mt76_wr(dev, MT_MCU_INT_LEVEL, 2);
+ if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 200)) {
+ dev_err(dev->mt76.dev, "Firmware failed to start\n");
+ release_firmware(fw);
+ return -ETIMEDOUT;
+ }
+
+ mt76x02_set_ethtool_fwver(dev, hdr);
+ dev_info(dev->mt76.dev, "Firmware running!\n");
+
+ release_firmware(fw);
+
+ return ret;
+
+error:
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ release_firmware(fw);
+ return -ENOENT;
+}
+
+static int
+mt76pci_mcu_restart(struct mt76_dev *mdev)
+{
+ struct mt76x02_dev *dev;
+ int ret;
+
+ dev = container_of(mdev, struct mt76x02_dev, mt76);
+
+ mt76x02_mcu_cleanup(dev);
+ mt76x2_mac_reset(dev, true);
+
+ ret = mt76pci_load_firmware(dev);
+ if (ret)
+ return ret;
+
+ mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
+
+ return 0;
+}
+
+int mt76x2_mcu_init(struct mt76x02_dev *dev)
+{
+ static const struct mt76_mcu_ops mt76x2_mcu_ops = {
+ .mcu_restart = mt76pci_mcu_restart,
+ .mcu_send_msg = mt76x02_mcu_msg_send,
+ };
+ int ret;
+
+ dev->mt76.mcu_ops = &mt76x2_mcu_ops;
+
+ ret = mt76pci_load_rom_patch(dev);
+ if (ret)
+ return ret;
+
+ ret = mt76pci_load_firmware(dev);
+ if (ret)
+ return ret;
+
+ mt76x02_mcu_function_select(dev, Q_SELECT, 1);
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
new file mode 100644
index 000000000..8831337df
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/delay.h>
+#include "mt76x2.h"
+#include "mcu.h"
+#include "eeprom.h"
+#include "../mt76x02_phy.h"
+
+static bool
+mt76x2_phy_tssi_init_cal(struct mt76x02_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
+ u32 flag = 0;
+
+ if (!mt76x2_tssi_enabled(dev))
+ return false;
+
+ if (mt76x2_channel_silent(dev))
+ return false;
+
+ if (chan->band == NL80211_BAND_5GHZ)
+ flag |= BIT(0);
+
+ if (mt76x02_ext_pa_enabled(dev, chan->band))
+ flag |= BIT(8);
+
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag);
+ dev->cal.tssi_cal_done = true;
+ return true;
+}
+
+static void
+mt76x2_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
+{
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
+ bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
+
+ if (dev->cal.channel_cal_done)
+ return;
+
+ if (mt76x2_channel_silent(dev))
+ return;
+
+ if (!dev->cal.tssi_cal_done)
+ mt76x2_phy_tssi_init_cal(dev);
+
+ if (!mac_stopped)
+ mt76x2_mac_stop(dev, false);
+
+ if (is_5ghz)
+ mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0);
+
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0);
+
+ if (!mac_stopped)
+ mt76x2_mac_resume(dev);
+
+ mt76x2_apply_gain_adj(dev);
+ mt76x02_edcca_init(dev);
+
+ dev->cal.channel_cal_done = true;
+}
+
+void mt76x2_phy_set_antenna(struct mt76x02_dev *dev)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_BBP(AGC, 0));
+ val &= ~(BIT(4) | BIT(1));
+ switch (dev->mphy.antenna_mask) {
+ case 1:
+ /* disable mac DAC control */
+ mt76_clear(dev, MT_BBP(IBI, 9), BIT(11));
+ mt76_clear(dev, MT_BBP(TXBE, 5), 3);
+ mt76_rmw_field(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT, 0x3);
+ mt76_rmw_field(dev, MT_BBP(CORE, 32), GENMASK(21, 20), 2);
+ /* disable DAC 1 */
+ mt76_rmw_field(dev, MT_BBP(CORE, 33), GENMASK(12, 9), 4);
+
+ val &= ~(BIT(3) | BIT(0));
+ break;
+ case 2:
+ /* disable mac DAC control */
+ mt76_clear(dev, MT_BBP(IBI, 9), BIT(11));
+ mt76_rmw_field(dev, MT_BBP(TXBE, 5), 3, 1);
+ mt76_rmw_field(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT, 0xc);
+ mt76_rmw_field(dev, MT_BBP(CORE, 32), GENMASK(21, 20), 1);
+ /* disable DAC 0 */
+ mt76_rmw_field(dev, MT_BBP(CORE, 33), GENMASK(12, 9), 1);
+
+ val &= ~BIT(3);
+ val |= BIT(0);
+ break;
+ case 3:
+ default:
+ /* enable mac DAC control */
+ mt76_set(dev, MT_BBP(IBI, 9), BIT(11));
+ mt76_set(dev, MT_BBP(TXBE, 5), 3);
+ mt76_rmw_field(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT, 0xf);
+ mt76_clear(dev, MT_BBP(CORE, 32), GENMASK(21, 20));
+ mt76_clear(dev, MT_BBP(CORE, 33), GENMASK(12, 9));
+
+ val &= ~BIT(0);
+ val |= BIT(3);
+ break;
+ }
+ mt76_wr(dev, MT_BBP(AGC, 0), val);
+}
+
+int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+ struct ieee80211_channel *chan = chandef->chan;
+ bool scan = test_bit(MT76_SCANNING, &dev->mphy.state);
+ enum nl80211_band band = chan->band;
+ u8 channel;
+
+ u32 ext_cca_chan[4] = {
+ [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)),
+ [1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)),
+ [2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)),
+ [3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
+ };
+ int ch_group_index;
+ u8 bw, bw_index;
+ int freq, freq1;
+ int ret;
+
+ dev->cal.channel_cal_done = false;
+ freq = chandef->chan->center_freq;
+ freq1 = chandef->center_freq1;
+ channel = chan->hw_value;
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_40:
+ bw = 1;
+ if (freq1 > freq) {
+ bw_index = 1;
+ ch_group_index = 0;
+ } else {
+ bw_index = 3;
+ ch_group_index = 1;
+ }
+ channel += 2 - ch_group_index * 4;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ ch_group_index = (freq - freq1 + 30) / 20;
+ if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+ ch_group_index = 0;
+ bw = 2;
+ bw_index = ch_group_index;
+ channel += 6 - ch_group_index * 4;
+ break;
+ default:
+ bw = 0;
+ bw_index = 0;
+ ch_group_index = 0;
+ break;
+ }
+
+ mt76x2_read_rx_gain(dev);
+ mt76x2_phy_set_txpower_regs(dev, band);
+ mt76x2_configure_tx_delay(dev, band, bw);
+ mt76x2_phy_set_txpower(dev);
+
+ mt76x02_phy_set_band(dev, chan->band, ch_group_index & 1);
+ mt76x02_phy_set_bw(dev, chandef->width, ch_group_index);
+
+ mt76_rmw(dev, MT_EXT_CCA_CFG,
+ (MT_EXT_CCA_CFG_CCA0 |
+ MT_EXT_CCA_CFG_CCA1 |
+ MT_EXT_CCA_CFG_CCA2 |
+ MT_EXT_CCA_CFG_CCA3 |
+ MT_EXT_CCA_CFG_CCA_MASK),
+ ext_cca_chan[ch_group_index]);
+
+ ret = mt76x2_mcu_set_channel(dev, channel, bw, bw_index, scan);
+ if (ret)
+ return ret;
+
+ mt76x2_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true);
+
+ mt76x2_phy_set_antenna(dev);
+
+ /* Enable LDPC Rx */
+ if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+ mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
+
+ if (!dev->cal.init_cal_done) {
+ u8 val = mt76x02_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
+
+ if (val != 0xff)
+ mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0);
+ }
+
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel);
+
+ /* Rx LPF calibration */
+ if (!dev->cal.init_cal_done)
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0);
+
+ dev->cal.init_cal_done = true;
+
+ mt76_wr(dev, MT_BBP(AGC, 61), 0xFF64A4E2);
+ mt76_wr(dev, MT_BBP(AGC, 7), 0x08081010);
+ mt76_wr(dev, MT_BBP(AGC, 11), 0x00000404);
+ mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
+ mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x04101B3F);
+
+ if (scan)
+ return 0;
+
+ mt76x2_phy_channel_calibrate(dev, true);
+ mt76x02_init_agc_gain(dev);
+
+ /* init default values for temp compensation */
+ if (mt76x2_tssi_enabled(dev)) {
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
+ 0x38);
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
+ 0x38);
+ }
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+
+ return 0;
+}
+
+static void
+mt76x2_phy_temp_compensate(struct mt76x02_dev *dev)
+{
+ struct mt76x2_temp_comp t;
+ int temp, db_diff;
+
+ if (mt76x2_get_temp_comp(dev, &t))
+ return;
+
+ temp = mt76_get_field(dev, MT_TEMP_SENSOR, MT_TEMP_SENSOR_VAL);
+ temp -= t.temp_25_ref;
+ temp = (temp * 1789) / 1000 + 25;
+ dev->cal.temp = temp;
+
+ if (temp > 25)
+ db_diff = (temp - 25) / t.high_slope;
+ else
+ db_diff = (25 - temp) / t.low_slope;
+
+ db_diff = min(db_diff, t.upper_bound);
+ db_diff = max(db_diff, t.lower_bound);
+
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
+ db_diff * 2);
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
+ db_diff * 2);
+}
+
+void mt76x2_phy_calibrate(struct work_struct *work)
+{
+ struct mt76x02_dev *dev;
+
+ dev = container_of(work, struct mt76x02_dev, cal_work.work);
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt76x2_phy_channel_calibrate(dev, false);
+ mt76x2_phy_tssi_compensate(dev);
+ mt76x2_phy_temp_compensate(dev);
+ mt76x2_phy_update_channel_gain(dev);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+}
+
+int mt76x2_phy_start(struct mt76x02_dev *dev)
+{
+ int ret;
+
+ ret = mt76x02_mcu_set_radio_state(dev, true);
+ if (ret)
+ return ret;
+
+ mt76x2_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
new file mode 100644
index 000000000..ed2dcb05d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include "mt76x2.h"
+#include "eeprom.h"
+#include "mcu.h"
+#include "../mt76x02_phy.h"
+
+static void
+mt76x2_adjust_high_lna_gain(struct mt76x02_dev *dev, int reg, s8 offset)
+{
+ s8 gain;
+
+ gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN,
+ mt76_rr(dev, MT_BBP(AGC, reg)));
+ gain -= offset / 2;
+ mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_LNA_HIGH_GAIN, gain);
+}
+
+static void
+mt76x2_adjust_agc_gain(struct mt76x02_dev *dev, int reg, s8 offset)
+{
+ s8 gain;
+
+ gain = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
+ gain += offset;
+ mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain);
+}
+
+void mt76x2_apply_gain_adj(struct mt76x02_dev *dev)
+{
+ s8 *gain_adj = dev->cal.rx.high_gain;
+
+ mt76x2_adjust_high_lna_gain(dev, 4, gain_adj[0]);
+ mt76x2_adjust_high_lna_gain(dev, 5, gain_adj[1]);
+
+ mt76x2_adjust_agc_gain(dev, 8, gain_adj[0]);
+ mt76x2_adjust_agc_gain(dev, 9, gain_adj[1]);
+}
+EXPORT_SYMBOL_GPL(mt76x2_apply_gain_adj);
+
+void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
+ enum nl80211_band band)
+{
+ u32 pa_mode[2];
+ u32 pa_mode_adj;
+
+ if (band == NL80211_BAND_2GHZ) {
+ pa_mode[0] = 0x010055ff;
+ pa_mode[1] = 0x00550055;
+
+ mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
+ mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
+
+ if (mt76x02_ext_pa_enabled(dev, band)) {
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
+ } else {
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200);
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200);
+ }
+ } else {
+ pa_mode[0] = 0x0000ffff;
+ pa_mode[1] = 0x00ff00ff;
+
+ if (mt76x02_ext_pa_enabled(dev, band)) {
+ mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
+ mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
+ } else {
+ mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400);
+ mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
+ }
+
+ if (mt76x02_ext_pa_enabled(dev, band))
+ pa_mode_adj = 0x04000000;
+ else
+ pa_mode_adj = 0;
+
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj);
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj);
+ }
+
+ mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]);
+ mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]);
+ mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
+ mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
+
+ if (mt76x02_ext_pa_enabled(dev, band)) {
+ u32 val;
+
+ if (band == NL80211_BAND_2GHZ)
+ val = 0x3c3c023c;
+ else
+ val = 0x363c023c;
+
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818);
+ } else {
+ if (band == NL80211_BAND_2GHZ) {
+ u32 val = 0x0f3c3c3c;
+
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606);
+ } else {
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c);
+ mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28);
+ mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower_regs);
+
+static int
+mt76x2_get_min_rate_power(struct mt76_rate_power *r)
+{
+ int i;
+ s8 ret = 0;
+
+ for (i = 0; i < sizeof(r->all); i++) {
+ if (!r->all[i])
+ continue;
+
+ if (ret)
+ ret = min(ret, r->all[i]);
+ else
+ ret = r->all[i];
+ }
+
+ return ret;
+}
+
+void mt76x2_phy_set_txpower(struct mt76x02_dev *dev)
+{
+ enum nl80211_chan_width width = dev->mphy.chandef.width;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
+ struct mt76x2_tx_power_info txp;
+ int txp_0, txp_1, delta = 0;
+ struct mt76_rate_power t = {};
+ int base_power, gain;
+
+ mt76x2_get_power_info(dev, &txp, chan);
+
+ if (width == NL80211_CHAN_WIDTH_40)
+ delta = txp.delta_bw40;
+ else if (width == NL80211_CHAN_WIDTH_80)
+ delta = txp.delta_bw80;
+
+ mt76x2_get_rate_power(dev, &t, chan);
+ mt76x02_add_rate_power_offset(&t, txp.target_power + delta);
+ mt76x02_limit_rate_power(&t, dev->txpower_conf);
+ dev->mphy.txpower_cur = mt76x02_get_max_rate_power(&t);
+
+ base_power = mt76x2_get_min_rate_power(&t);
+ delta = base_power - txp.target_power;
+ txp_0 = txp.chain[0].target_power + txp.chain[0].delta + delta;
+ txp_1 = txp.chain[1].target_power + txp.chain[1].delta + delta;
+
+ gain = min(txp_0, txp_1);
+ if (gain < 0) {
+ base_power -= gain;
+ txp_0 -= gain;
+ txp_1 -= gain;
+ } else if (gain > 0x2f) {
+ base_power -= gain - 0x2f;
+ txp_0 = 0x2f;
+ txp_1 = 0x2f;
+ }
+
+ mt76x02_add_rate_power_offset(&t, -base_power);
+ dev->target_power = txp.target_power;
+ dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power;
+ dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
+ dev->mt76.rate_power = t;
+
+ mt76x02_phy_set_txpower(dev, txp_0, txp_1);
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower);
+
+void mt76x2_configure_tx_delay(struct mt76x02_dev *dev,
+ enum nl80211_band band, u8 bw)
+{
+ u32 cfg0, cfg1;
+
+ if (mt76x02_ext_pa_enabled(dev, band)) {
+ cfg0 = bw ? 0x000b0c01 : 0x00101101;
+ cfg1 = 0x00011414;
+ } else {
+ cfg0 = bw ? 0x000b0b01 : 0x00101001;
+ cfg1 = 0x00021414;
+ }
+ mt76_wr(dev, MT_TX_SW_CFG0, cfg0);
+ mt76_wr(dev, MT_TX_SW_CFG1, cfg1);
+
+ mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, 15);
+}
+EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay);
+
+void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
+ struct mt76x2_tx_power_info txp;
+ struct mt76x2_tssi_comp t = {};
+
+ if (!dev->cal.tssi_cal_done)
+ return;
+
+ if (!dev->cal.tssi_comp_pending) {
+ /* TSSI trigger */
+ t.cal_mode = BIT(0);
+ mt76x2_mcu_tssi_comp(dev, &t);
+ dev->cal.tssi_comp_pending = true;
+ } else {
+ if (mt76_rr(dev, MT_BBP(CORE, 34)) & BIT(4))
+ return;
+
+ dev->cal.tssi_comp_pending = false;
+ mt76x2_get_power_info(dev, &txp, chan);
+
+ if (mt76x02_ext_pa_enabled(dev, chan->band))
+ t.pa_mode = 1;
+
+ t.cal_mode = BIT(1);
+ t.slope0 = txp.chain[0].tssi_slope;
+ t.offset0 = txp.chain[0].tssi_offset;
+ t.slope1 = txp.chain[1].tssi_slope;
+ t.offset1 = txp.chain[1].tssi_offset;
+ mt76x2_mcu_tssi_comp(dev, &t);
+
+ if (t.pa_mode || dev->cal.dpd_cal_done || dev->ed_tx_blocked)
+ return;
+
+ usleep_range(10000, 20000);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value);
+ dev->cal.dpd_cal_done = true;
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_tssi_compensate);
+
+static void
+mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
+{
+ u32 val;
+ u8 gain_val[2];
+
+ gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
+ gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust;
+
+ val = 0x1836 << 16;
+ if (!mt76x2_has_ext_lna(dev) &&
+ dev->mphy.chandef.width >= NL80211_CHAN_WIDTH_40)
+ val = 0x1e42 << 16;
+
+ if (mt76x2_has_ext_lna(dev) &&
+ dev->mphy.chandef.chan->band == NL80211_BAND_2GHZ &&
+ dev->mphy.chandef.width < NL80211_CHAN_WIDTH_40)
+ val = 0x0f36 << 16;
+
+ val |= 0xf8;
+
+ mt76_wr(dev, MT_BBP(AGC, 8),
+ val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[0]));
+ mt76_wr(dev, MT_BBP(AGC, 9),
+ val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[1]));
+
+ if (dev->mphy.chandef.chan->flags & IEEE80211_CHAN_RADAR)
+ mt76x02_phy_dfs_adjust_agc(dev);
+}
+
+void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
+{
+ u8 *gain = dev->cal.agc_gain_init;
+ u8 low_gain_delta, gain_delta;
+ u32 agc_35, agc_37;
+ bool gain_change;
+ int low_gain;
+ u32 val;
+
+ dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76, false);
+ if (!dev->cal.avg_rssi_all)
+ dev->cal.avg_rssi_all = -75;
+
+ low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) +
+ (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev));
+
+ gain_change = dev->cal.low_gain < 0 ||
+ (dev->cal.low_gain & 2) ^ (low_gain & 2);
+ dev->cal.low_gain = low_gain;
+
+ if (!gain_change) {
+ if (mt76x02_phy_adjust_vga_gain(dev))
+ mt76x2_phy_set_gain_val(dev);
+ return;
+ }
+
+ if (dev->mphy.chandef.width == NL80211_CHAN_WIDTH_80) {
+ mt76_wr(dev, MT_BBP(RXO, 14), 0x00560211);
+ val = mt76_rr(dev, MT_BBP(AGC, 26)) & ~0xf;
+ if (low_gain == 2)
+ val |= 0x3;
+ else
+ val |= 0x5;
+ mt76_wr(dev, MT_BBP(AGC, 26), val);
+ } else {
+ mt76_wr(dev, MT_BBP(RXO, 14), 0x00560423);
+ }
+
+ if (mt76x2_has_ext_lna(dev))
+ low_gain_delta = 10;
+ else
+ low_gain_delta = 14;
+
+ agc_37 = 0x2121262c;
+ if (dev->mphy.chandef.chan->band == NL80211_BAND_2GHZ)
+ agc_35 = 0x11111516;
+ else if (low_gain == 2)
+ agc_35 = agc_37 = 0x08080808;
+ else if (dev->mphy.chandef.width == NL80211_CHAN_WIDTH_80)
+ agc_35 = 0x10101014;
+ else
+ agc_35 = 0x11111116;
+
+ if (low_gain == 2) {
+ mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
+ mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808);
+ mt76_wr(dev, MT_BBP(AGC, 37), 0x08080808);
+ gain_delta = low_gain_delta;
+ dev->cal.agc_gain_adjust = 0;
+ } else {
+ mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
+ gain_delta = 0;
+ dev->cal.agc_gain_adjust = low_gain_delta;
+ }
+
+ mt76_wr(dev, MT_BBP(AGC, 35), agc_35);
+ mt76_wr(dev, MT_BBP(AGC, 37), agc_37);
+
+ dev->cal.agc_gain_cur[0] = gain[0] - gain_delta;
+ dev->cal.agc_gain_cur[1] = gain[1] - gain_delta;
+ mt76x2_phy_set_gain_val(dev);
+
+ /* clear false CCA counters */
+ mt76_rr(dev, MT_RX_STAT_1);
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_update_channel_gain);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
new file mode 100644
index 000000000..4e003c7b6
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "../mt76x02_usb.h"
+#include "mt76x2u.h"
+
+static const struct usb_device_id mt76x2u_device_table[] = {
+ { USB_DEVICE(0x0b05, 0x1833) }, /* Asus USB-AC54 */
+ { USB_DEVICE(0x0b05, 0x17eb) }, /* Asus USB-AC55 */
+ { USB_DEVICE(0x0b05, 0x180b) }, /* Asus USB-N53 B1 */
+ { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USBAC1200 - Alfa AWUS036ACM */
+ { USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */
+ { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */
+ { USB_DEVICE(0x0e8d, 0x7632) }, /* HC-M7662BU1 */
+ { USB_DEVICE(0x2c4e, 0x0103) }, /* Mercury UD13 */
+ { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
+ { USB_DEVICE(0x045e, 0x02e6) }, /* XBox One Wireless Adapter */
+ { USB_DEVICE(0x045e, 0x02fe) }, /* XBox One Wireless Adapter */
+ { },
+};
+
+static int mt76x2u_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ .drv_flags = MT_DRV_SW_RX_AIRTIME,
+ .survey_flags = SURVEY_INFO_TIME_TX,
+ .update_survey = mt76x02_update_channel,
+ .tx_prepare_skb = mt76x02u_tx_prepare_skb,
+ .tx_complete_skb = mt76x02u_tx_complete_skb,
+ .tx_status_data = mt76x02_tx_status_data,
+ .rx_skb = mt76x02_queue_rx_skb,
+ .sta_ps = mt76x02_sta_ps,
+ .sta_add = mt76x02_sta_add,
+ .sta_remove = mt76x02_sta_remove,
+ };
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct mt76x02_dev *dev;
+ struct mt76_dev *mdev;
+ int err;
+
+ mdev = mt76_alloc_device(&intf->dev, sizeof(*dev), &mt76x2u_ops,
+ &drv_ops);
+ if (!mdev)
+ return -ENOMEM;
+
+ dev = container_of(mdev, struct mt76x02_dev, mt76);
+
+ udev = usb_get_dev(udev);
+ usb_reset_device(udev);
+
+ usb_set_intfdata(intf, dev);
+
+ mt76x02u_init_mcu(mdev);
+ err = mt76u_init(mdev, intf, false);
+ if (err < 0)
+ goto err;
+
+ mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
+ dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
+ if (!is_mt76x2(dev)) {
+ err = -ENODEV;
+ goto err;
+ }
+
+ err = mt76x2u_register_device(dev);
+ if (err < 0)
+ goto err;
+
+ return 0;
+
+err:
+ mt76_free_device(&dev->mt76);
+ usb_set_intfdata(intf, NULL);
+ usb_put_dev(udev);
+
+ return err;
+}
+
+static void mt76x2u_disconnect(struct usb_interface *intf)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct mt76x02_dev *dev = usb_get_intfdata(intf);
+ struct ieee80211_hw *hw = mt76_hw(dev);
+
+ set_bit(MT76_REMOVED, &dev->mphy.state);
+ ieee80211_unregister_hw(hw);
+ mt76x2u_cleanup(dev);
+ mt76_free_device(&dev->mt76);
+ usb_set_intfdata(intf, NULL);
+ usb_put_dev(udev);
+}
+
+static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf,
+ pm_message_t state)
+{
+ struct mt76x02_dev *dev = usb_get_intfdata(intf);
+
+ mt76u_stop_rx(&dev->mt76);
+
+ return 0;
+}
+
+static int __maybe_unused mt76x2u_resume(struct usb_interface *intf)
+{
+ struct mt76x02_dev *dev = usb_get_intfdata(intf);
+ int err;
+
+ err = mt76u_resume_rx(&dev->mt76);
+ if (err < 0)
+ goto err;
+
+ err = mt76x2u_init_hardware(dev);
+ if (err < 0)
+ goto err;
+
+ return 0;
+
+err:
+ mt76x2u_cleanup(dev);
+ return err;
+}
+
+MODULE_DEVICE_TABLE(usb, mt76x2u_device_table);
+MODULE_FIRMWARE(MT7662_FIRMWARE);
+MODULE_FIRMWARE(MT7662_ROM_PATCH);
+
+static struct usb_driver mt76x2u_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt76x2u_device_table,
+ .probe = mt76x2u_probe,
+ .disconnect = mt76x2u_disconnect,
+#ifdef CONFIG_PM
+ .suspend = mt76x2u_suspend,
+ .resume = mt76x2u_resume,
+ .reset_resume = mt76x2u_resume,
+#endif /* CONFIG_PM */
+ .soft_unbind = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(mt76x2u_driver);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
new file mode 100644
index 000000000..ffc2deba2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include <linux/delay.h>
+
+#include "mt76x2u.h"
+#include "eeprom.h"
+#include "../mt76x02_phy.h"
+#include "../mt76x02_usb.h"
+
+static void mt76x2u_init_dma(struct mt76x02_dev *dev)
+{
+ u32 val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
+
+ val |= MT_USB_DMA_CFG_RX_DROP_OR_PAD |
+ MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN;
+
+ /* disable AGGR_BULK_RX in order to receive one
+ * frame in each rx urb and avoid copies
+ */
+ val &= ~MT_USB_DMA_CFG_RX_BULK_AGG_EN;
+ mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
+}
+
+static void mt76x2u_power_on_rf_patch(struct mt76x02_dev *dev)
+{
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) | BIT(16));
+ udelay(1);
+
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1c), 0xff);
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x1c), 0x30);
+
+ mt76_wr(dev, MT_VEND_ADDR(CFG, 0x14), 0x484f);
+ udelay(1);
+
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(17));
+ usleep_range(150, 200);
+
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(16));
+ usleep_range(50, 100);
+
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x14c), BIT(19) | BIT(20));
+}
+
+static void mt76x2u_power_on_rf(struct mt76x02_dev *dev, int unit)
+{
+ int shift = unit ? 8 : 0;
+ u32 val = (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift;
+
+ /* Enable RF BG */
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) << shift);
+ usleep_range(10, 20);
+
+ /* Enable RFDIG LDO/AFE/ABB/ADDA */
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), val);
+ usleep_range(10, 20);
+
+ /* Switch RFDIG power to internal LDO */
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(2) << shift);
+ usleep_range(10, 20);
+
+ mt76x2u_power_on_rf_patch(dev);
+
+ mt76_set(dev, 0x530, 0xf);
+}
+
+static void mt76x2u_power_on(struct mt76x02_dev *dev)
+{
+ u32 val;
+
+ /* Turn on WL MTCMOS */
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x148),
+ MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP);
+
+ val = MT_WLAN_MTC_CTRL_STATE_UP |
+ MT_WLAN_MTC_CTRL_PWR_ACK |
+ MT_WLAN_MTC_CTRL_PWR_ACK_S;
+
+ mt76_poll(dev, MT_VEND_ADDR(CFG, 0x148), val, val, 1000);
+
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0x7f << 16);
+ usleep_range(10, 20);
+
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24);
+ usleep_range(10, 20);
+
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24);
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xfff);
+
+ /* Turn on AD/DA power down */
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1204), BIT(3));
+
+ /* WLAN function enable */
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x80), BIT(0));
+
+ /* Release BBP software reset */
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x64), BIT(18));
+
+ mt76x2u_power_on_rf(dev, 0);
+ mt76x2u_power_on_rf(dev, 1);
+}
+
+static int mt76x2u_init_eeprom(struct mt76x02_dev *dev)
+{
+ u32 val, i;
+
+ dev->mt76.eeprom.data = devm_kzalloc(dev->mt76.dev,
+ MT7612U_EEPROM_SIZE,
+ GFP_KERNEL);
+ dev->mt76.eeprom.size = MT7612U_EEPROM_SIZE;
+ if (!dev->mt76.eeprom.data)
+ return -ENOMEM;
+
+ for (i = 0; i + 4 <= MT7612U_EEPROM_SIZE; i += 4) {
+ val = mt76_rr(dev, MT_VEND_ADDR(EEPROM, i));
+ put_unaligned_le32(val, dev->mt76.eeprom.data + i);
+ }
+
+ mt76x02_eeprom_parse_hw_cap(dev);
+ return 0;
+}
+
+int mt76x2u_init_hardware(struct mt76x02_dev *dev)
+{
+ int i, k, err;
+
+ mt76x2_reset_wlan(dev, true);
+ mt76x2u_power_on(dev);
+
+ if (!mt76x02_wait_for_mac(&dev->mt76))
+ return -ETIMEDOUT;
+
+ err = mt76x2u_mcu_fw_init(dev);
+ if (err < 0)
+ return err;
+
+ if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100))
+ return -EIO;
+
+ /* wait for asic ready after fw load. */
+ if (!mt76x02_wait_for_mac(&dev->mt76))
+ return -ETIMEDOUT;
+
+ mt76x2u_init_dma(dev);
+
+ err = mt76x2u_mcu_init(dev);
+ if (err < 0)
+ return err;
+
+ err = mt76x2u_mac_reset(dev);
+ if (err < 0)
+ return err;
+
+ mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
+ dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
+
+ if (!mt76x02_wait_for_txrx_idle(&dev->mt76))
+ return -ETIMEDOUT;
+
+ /* reset wcid table */
+ for (i = 0; i < 256; i++)
+ mt76x02_mac_wcid_setup(dev, i, 0, NULL);
+
+ /* reset shared key table and pairwise key table */
+ for (i = 0; i < 16; i++) {
+ for (k = 0; k < 4; k++)
+ mt76x02_mac_shared_key_setup(dev, i, k, NULL);
+ }
+
+ mt76x02u_init_beacon_config(dev);
+
+ mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
+ mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x583f);
+
+ err = mt76x2_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0);
+ if (err < 0)
+ return err;
+
+ mt76x02_phy_set_rxpath(dev);
+ mt76x02_phy_set_txdac(dev);
+
+ return mt76x2u_mac_stop(dev);
+}
+
+int mt76x2u_register_device(struct mt76x02_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct mt76_usb *usb = &dev->mt76.usb;
+ int err;
+
+ INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate);
+ mt76x02_init_device(dev);
+
+ err = mt76x2u_init_eeprom(dev);
+ if (err < 0)
+ return err;
+
+ usb->mcu.data = devm_kmalloc(dev->mt76.dev, MCU_RESP_URB_SIZE,
+ GFP_KERNEL);
+ if (!usb->mcu.data)
+ return -ENOMEM;
+
+ err = mt76u_alloc_queues(&dev->mt76);
+ if (err < 0)
+ goto fail;
+
+ err = mt76x2u_init_hardware(dev);
+ if (err < 0)
+ goto fail;
+
+ /* check hw sg support in order to enable AMSDU */
+ hw->max_tx_fragments = dev->mt76.usb.sg_en ? MT_TX_SG_MAX_SIZE : 1;
+ err = mt76_register_device(&dev->mt76, true, mt76x02_rates,
+ ARRAY_SIZE(mt76x02_rates));
+ if (err)
+ goto fail;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+
+ mt76x02_init_debugfs(dev);
+ mt76x2_init_txpower(dev, &dev->mphy.sband_2g.sband);
+ mt76x2_init_txpower(dev, &dev->mphy.sband_5g.sband);
+
+ return 0;
+
+fail:
+ mt76x2u_cleanup(dev);
+ return err;
+}
+
+void mt76x2u_stop_hw(struct mt76x02_dev *dev)
+{
+ cancel_delayed_work_sync(&dev->cal_work);
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
+ mt76x2u_mac_stop(dev);
+}
+
+void mt76x2u_cleanup(struct mt76x02_dev *dev)
+{
+ mt76x02_mcu_set_radio_state(dev, false);
+ mt76x2u_stop_hw(dev);
+ mt76u_queues_deinit(&dev->mt76);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
new file mode 100644
index 000000000..eaa622833
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include "mt76x2u.h"
+#include "eeprom.h"
+
+static void mt76x2u_mac_fixup_xtal(struct mt76x02_dev *dev)
+{
+ s8 offset = 0;
+ u16 eep_val;
+
+ eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
+
+ offset = eep_val & 0x7f;
+ if ((eep_val & 0xff) == 0xff)
+ offset = 0;
+ else if (eep_val & 0x80)
+ offset = 0 - offset;
+
+ eep_val >>= 8;
+ if (eep_val == 0x00 || eep_val == 0xff) {
+ eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
+ eep_val &= 0xff;
+
+ if (eep_val == 0x00 || eep_val == 0xff)
+ eep_val = 0x14;
+ }
+
+ eep_val &= 0x7f;
+ mt76_rmw_field(dev, MT_VEND_ADDR(CFG, MT_XO_CTRL5),
+ MT_XO_CTRL5_C2_VAL, eep_val + offset);
+ mt76_set(dev, MT_VEND_ADDR(CFG, MT_XO_CTRL6), MT_XO_CTRL6_C2_CTRL);
+
+ mt76_wr(dev, 0x504, 0x06000000);
+ mt76_wr(dev, 0x50c, 0x08800000);
+ mdelay(5);
+ mt76_wr(dev, 0x504, 0x0);
+
+ /* decrease SIFS from 16us to 13us */
+ mt76_rmw_field(dev, MT_XIFS_TIME_CFG,
+ MT_XIFS_TIME_CFG_OFDM_SIFS, 0xd);
+ mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG, MT_BKOFF_SLOT_CFG_CC_DELAY, 1);
+
+ /* init fce */
+ mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
+
+ eep_val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
+ switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
+ case 0:
+ mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
+ break;
+ case 1:
+ mt76_wr(dev, MT_XO_CTRL7, 0x5c1feed0);
+ break;
+ default:
+ break;
+ }
+}
+
+int mt76x2u_mac_reset(struct mt76x02_dev *dev)
+{
+ mt76_wr(dev, MT_WPDMA_GLO_CFG, BIT(4) | BIT(5));
+
+ /* init pbf regs */
+ mt76_wr(dev, MT_PBF_TX_MAX_PCNT, 0xefef3f1f);
+ mt76_wr(dev, MT_PBF_RX_MAX_PCNT, 0xfebf);
+
+ mt76_write_mac_initvals(dev);
+
+ mt76_wr(dev, MT_TX_LINK_CFG, 0x1020);
+ mt76_wr(dev, MT_AUTO_RSP_CFG, 0x13);
+ mt76_wr(dev, MT_MAX_LEN_CFG, 0x2f00);
+
+ mt76_wr(dev, MT_WMM_AIFSN, 0x2273);
+ mt76_wr(dev, MT_WMM_CWMIN, 0x2344);
+ mt76_wr(dev, MT_WMM_CWMAX, 0x34aa);
+
+ mt76_clear(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_RESET_CSR |
+ MT_MAC_SYS_CTRL_RESET_BBP);
+
+ if (is_mt7612(dev))
+ mt76_clear(dev, MT_COEXCFG0, MT_COEXCFG0_COEX_EN);
+
+ mt76_set(dev, MT_EXT_CCA_CFG, 0xf000);
+ mt76_clear(dev, MT_TX_ALC_CFG_4, BIT(31));
+
+ mt76x2u_mac_fixup_xtal(dev);
+
+ return 0;
+}
+
+int mt76x2u_mac_stop(struct mt76x02_dev *dev)
+{
+ int i, count = 0, val;
+ bool stopped = false;
+ u32 rts_cfg;
+
+ if (test_bit(MT76_REMOVED, &dev->mphy.state))
+ return -EIO;
+
+ rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
+
+ mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
+ mt76_clear(dev, MT_TXOP_HLDR_ET, MT_TXOP_HLDR_TX40M_BLK_EN);
+
+ /* wait tx dma to stop */
+ for (i = 0; i < 2000; i++) {
+ val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
+ if (!(val & MT_USB_DMA_CFG_TX_BUSY) && i > 10)
+ break;
+ usleep_range(50, 100);
+ }
+
+ /* page count on TxQ */
+ for (i = 0; i < 200; i++) {
+ if (!(mt76_rr(dev, 0x0438) & 0xffffffff) &&
+ !(mt76_rr(dev, 0x0a30) & 0x000000ff) &&
+ !(mt76_rr(dev, 0x0a34) & 0xff00ff00))
+ break;
+ usleep_range(10, 20);
+ }
+
+ /* disable tx-rx */
+ mt76_clear(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_RX |
+ MT_MAC_SYS_CTRL_ENABLE_TX);
+
+ /* Wait for MAC to become idle */
+ for (i = 0; i < 1000; i++) {
+ if (!(mt76_rr(dev, MT_MAC_STATUS) & MT_MAC_STATUS_TX) &&
+ !mt76_rr(dev, MT_BBP(IBI, 12))) {
+ stopped = true;
+ break;
+ }
+ usleep_range(10, 20);
+ }
+
+ if (!stopped) {
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
+
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+ }
+
+ /* page count on RxQ */
+ for (i = 0; i < 200; i++) {
+ if (!(mt76_rr(dev, 0x0430) & 0x00ff0000) &&
+ !(mt76_rr(dev, 0x0a30) & 0xffffffff) &&
+ !(mt76_rr(dev, 0x0a34) & 0xffffffff) &&
+ ++count > 10)
+ break;
+ msleep(50);
+ }
+
+ if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 2000))
+ dev_warn(dev->mt76.dev, "MAC RX failed to stop\n");
+
+ /* wait rx dma to stop */
+ for (i = 0; i < 2000; i++) {
+ val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
+ if (!(val & MT_USB_DMA_CFG_RX_BUSY) && i > 10)
+ break;
+ usleep_range(50, 100);
+ }
+
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
new file mode 100644
index 000000000..bab4e6e19
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include "mt76x2u.h"
+#include "../mt76x02_usb.h"
+
+static int mt76x2u_start(struct ieee80211_hw *hw)
+{
+ struct mt76x02_dev *dev = hw->priv;
+ int ret;
+
+ ret = mt76x02u_mac_start(dev);
+ if (ret)
+ return ret;
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+ MT_MAC_WORK_INTERVAL);
+ set_bit(MT76_STATE_RUNNING, &dev->mphy.state);
+
+ return 0;
+}
+
+static void mt76x2u_stop(struct ieee80211_hw *hw)
+{
+ struct mt76x02_dev *dev = hw->priv;
+
+ clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
+ mt76u_stop_tx(&dev->mt76);
+ mt76x2u_stop_hw(dev);
+}
+
+static int
+mt76x2u_set_channel(struct mt76x02_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+ int err;
+
+ cancel_delayed_work_sync(&dev->cal_work);
+ mt76x02_pre_tbtt_enable(dev, false);
+
+ mutex_lock(&dev->mt76.mutex);
+ set_bit(MT76_RESET, &dev->mphy.state);
+
+ mt76_set_channel(&dev->mphy);
+
+ mt76x2_mac_stop(dev, false);
+
+ err = mt76x2u_phy_set_channel(dev, chandef);
+
+ mt76x02_mac_cc_reset(dev);
+ mt76x2_mac_resume(dev);
+
+ clear_bit(MT76_RESET, &dev->mphy.state);
+ mutex_unlock(&dev->mt76.mutex);
+
+ mt76x02_pre_tbtt_enable(dev, true);
+ mt76_txq_schedule_all(&dev->mphy);
+
+ return err;
+}
+
+static int
+mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct mt76x02_dev *dev = hw->priv;
+ int err = 0;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+ dev->mt76.rxfilter |= MT_RX_FILTR_CFG_PROMISC;
+ else
+ dev->mt76.rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ dev->txpower_conf = hw->conf.power_level * 2;
+
+ /* convert to per-chain power for 2x2 devices */
+ dev->txpower_conf -= 6;
+
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
+ mt76x2_phy_set_txpower(dev);
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ieee80211_stop_queues(hw);
+ err = mt76x2u_set_channel(dev, &hw->conf.chandef);
+ ieee80211_wake_queues(hw);
+ }
+
+ return err;
+}
+
+const struct ieee80211_ops mt76x2u_ops = {
+ .tx = mt76x02_tx,
+ .start = mt76x2u_start,
+ .stop = mt76x2u_stop,
+ .add_interface = mt76x02_add_interface,
+ .remove_interface = mt76x02_remove_interface,
+ .sta_state = mt76_sta_state,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
+ .set_key = mt76x02_set_key,
+ .ampdu_action = mt76x02_ampdu_action,
+ .config = mt76x2u_config,
+ .wake_tx_queue = mt76_wake_tx_queue,
+ .bss_info_changed = mt76x02_bss_info_changed,
+ .configure_filter = mt76x02_configure_filter,
+ .conf_tx = mt76x02_conf_tx,
+ .sw_scan_start = mt76_sw_scan,
+ .sw_scan_complete = mt76x02_sw_scan_complete,
+ .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
+ .get_txpower = mt76_get_txpower,
+ .get_survey = mt76_get_survey,
+ .set_tim = mt76_set_tim,
+ .release_buffered_frames = mt76_release_buffered_frames,
+ .get_antenna = mt76_get_antenna,
+};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c
new file mode 100644
index 000000000..dd22d8af0
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include <linux/firmware.h>
+
+#include "mt76x2u.h"
+#include "eeprom.h"
+#include "../mt76x02_usb.h"
+
+#define MT_CMD_HDR_LEN 4
+
+#define MCU_FW_URB_MAX_PAYLOAD 0x3900
+#define MCU_ROM_PATCH_MAX_PAYLOAD 2048
+
+#define MT76U_MCU_ILM_OFFSET 0x80000
+#define MT76U_MCU_DLM_OFFSET 0x110000
+#define MT76U_MCU_ROM_PATCH_OFFSET 0x90000
+
+static void mt76x2u_mcu_load_ivb(struct mt76x02_dev *dev)
+{
+ mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0x12, 0, NULL, 0);
+}
+
+static void mt76x2u_mcu_enable_patch(struct mt76x02_dev *dev)
+{
+ struct mt76_usb *usb = &dev->mt76.usb;
+ static const u8 data[] = {
+ 0x6f, 0xfc, 0x08, 0x01,
+ 0x20, 0x04, 0x00, 0x00,
+ 0x00, 0x09, 0x00,
+ };
+
+ memcpy(usb->data, data, sizeof(data));
+ mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+ USB_DIR_OUT | USB_TYPE_CLASS,
+ 0x12, 0, usb->data, sizeof(data));
+}
+
+static void mt76x2u_mcu_reset_wmt(struct mt76x02_dev *dev)
+{
+ struct mt76_usb *usb = &dev->mt76.usb;
+ u8 data[] = {
+ 0x6f, 0xfc, 0x05, 0x01,
+ 0x07, 0x01, 0x00, 0x04
+ };
+
+ memcpy(usb->data, data, sizeof(data));
+ mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+ USB_DIR_OUT | USB_TYPE_CLASS,
+ 0x12, 0, usb->data, sizeof(data));
+}
+
+static int mt76x2u_mcu_load_rom_patch(struct mt76x02_dev *dev)
+{
+ bool rom_protect = !is_mt7612(dev);
+ struct mt76x02_patch_header *hdr;
+ u32 val, patch_mask, patch_reg;
+ const struct firmware *fw;
+ int err;
+
+ if (rom_protect &&
+ !mt76_poll_msec(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) {
+ dev_err(dev->mt76.dev,
+ "could not get hardware semaphore for ROM PATCH\n");
+ return -ETIMEDOUT;
+ }
+
+ if (mt76xx_rev(dev) >= MT76XX_REV_E3) {
+ patch_mask = BIT(0);
+ patch_reg = MT_MCU_CLOCK_CTL;
+ } else {
+ patch_mask = BIT(1);
+ patch_reg = MT_MCU_COM_REG0;
+ }
+
+ if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) {
+ dev_info(dev->mt76.dev, "ROM patch already applied\n");
+ return 0;
+ }
+
+ err = request_firmware(&fw, MT7662_ROM_PATCH, dev->mt76.dev);
+ if (err < 0)
+ return err;
+
+ if (!fw || !fw->data || fw->size <= sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "failed to load firmware\n");
+ err = -EIO;
+ goto out;
+ }
+
+ hdr = (struct mt76x02_patch_header *)fw->data;
+ dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time);
+
+ /* enable USB_DMA_CFG */
+ val = MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN |
+ FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20);
+ mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
+
+ /* vendor reset */
+ mt76x02u_mcu_fw_reset(dev);
+ usleep_range(5000, 10000);
+
+ /* enable FCE to send in-band cmd */
+ mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
+ /* FCE tx_fs_base_ptr */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+ /* FCE tx_fs_max_cnt */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 0x1);
+ /* FCE pdma enable */
+ mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+ /* FCE skip_fs_en */
+ mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
+
+ err = mt76x02u_mcu_fw_send_data(dev, fw->data + sizeof(*hdr),
+ fw->size - sizeof(*hdr),
+ MCU_ROM_PATCH_MAX_PAYLOAD,
+ MT76U_MCU_ROM_PATCH_OFFSET);
+ if (err < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ mt76x2u_mcu_enable_patch(dev);
+ mt76x2u_mcu_reset_wmt(dev);
+ mdelay(20);
+
+ if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 100)) {
+ dev_err(dev->mt76.dev, "failed to load ROM patch\n");
+ err = -ETIMEDOUT;
+ }
+
+out:
+ if (rom_protect)
+ mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1);
+ release_firmware(fw);
+ return err;
+}
+
+static int mt76x2u_mcu_load_firmware(struct mt76x02_dev *dev)
+{
+ u32 val, dlm_offset = MT76U_MCU_DLM_OFFSET;
+ const struct mt76x02_fw_header *hdr;
+ int err, len, ilm_len, dlm_len;
+ const struct firmware *fw;
+
+ err = request_firmware(&fw, MT7662_FIRMWARE, dev->mt76.dev);
+ if (err < 0)
+ return err;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt76x02_fw_header *)fw->data;
+ ilm_len = le32_to_cpu(hdr->ilm_len);
+ dlm_len = le32_to_cpu(hdr->dlm_len);
+ len = sizeof(*hdr) + ilm_len + dlm_len;
+ if (fw->size != len) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ val = le16_to_cpu(hdr->fw_ver);
+ dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
+ (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
+
+ val = le16_to_cpu(hdr->build_ver);
+ dev_info(dev->mt76.dev, "Build: %x\n", val);
+ dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
+
+ /* vendor reset */
+ mt76x02u_mcu_fw_reset(dev);
+ usleep_range(5000, 10000);
+
+ /* enable USB_DMA_CFG */
+ val = MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN |
+ FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20);
+ mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
+ /* enable FCE to send in-band cmd */
+ mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
+ /* FCE tx_fs_base_ptr */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+ /* FCE tx_fs_max_cnt */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 0x1);
+ /* FCE pdma enable */
+ mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+ /* FCE skip_fs_en */
+ mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
+
+ /* load ILM */
+ err = mt76x02u_mcu_fw_send_data(dev, fw->data + sizeof(*hdr),
+ ilm_len, MCU_FW_URB_MAX_PAYLOAD,
+ MT76U_MCU_ILM_OFFSET);
+ if (err < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ /* load DLM */
+ if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+ dlm_offset += 0x800;
+ err = mt76x02u_mcu_fw_send_data(dev, fw->data + sizeof(*hdr) + ilm_len,
+ dlm_len, MCU_FW_URB_MAX_PAYLOAD,
+ dlm_offset);
+ if (err < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ mt76x2u_mcu_load_ivb(dev);
+ if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 100)) {
+ dev_err(dev->mt76.dev, "firmware failed to start\n");
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ mt76_set(dev, MT_MCU_COM_REG0, BIT(1));
+ /* enable FCE to send in-band cmd */
+ mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
+ mt76x02_set_ethtool_fwver(dev, hdr);
+ dev_dbg(dev->mt76.dev, "firmware running\n");
+
+out:
+ release_firmware(fw);
+ return err;
+}
+
+int mt76x2u_mcu_fw_init(struct mt76x02_dev *dev)
+{
+ int err;
+
+ err = mt76x2u_mcu_load_rom_patch(dev);
+ if (err < 0)
+ return err;
+
+ return mt76x2u_mcu_load_firmware(dev);
+}
+
+int mt76x2u_mcu_init(struct mt76x02_dev *dev)
+{
+ int err;
+
+ err = mt76x02_mcu_function_select(dev, Q_SELECT, 1);
+ if (err < 0)
+ return err;
+
+ return mt76x02_mcu_set_radio_state(dev, true);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
new file mode 100644
index 000000000..a04a98f5c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include "mt76x2u.h"
+#include "eeprom.h"
+#include "../mt76x02_phy.h"
+
+static void
+mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
+{
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
+ bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
+
+ if (dev->cal.channel_cal_done)
+ return;
+
+ if (mt76x2_channel_silent(dev))
+ return;
+
+ if (!mac_stopped)
+ mt76x2u_mac_stop(dev);
+
+ if (is_5ghz)
+ mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0);
+
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0);
+
+ if (!mac_stopped)
+ mt76x2_mac_resume(dev);
+ mt76x2_apply_gain_adj(dev);
+ mt76x02_edcca_init(dev);
+
+ dev->cal.channel_cal_done = true;
+}
+
+void mt76x2u_phy_calibrate(struct work_struct *work)
+{
+ struct mt76x02_dev *dev;
+
+ dev = container_of(work, struct mt76x02_dev, cal_work.work);
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt76x2u_phy_channel_calibrate(dev, false);
+ mt76x2_phy_tssi_compensate(dev);
+ mt76x2_phy_update_channel_gain(dev);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+}
+
+int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+ u32 ext_cca_chan[4] = {
+ [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)),
+ [1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)),
+ [2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)),
+ [3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
+ };
+ bool scan = test_bit(MT76_SCANNING, &dev->mphy.state);
+ struct ieee80211_channel *chan = chandef->chan;
+ u8 channel = chan->hw_value, bw, bw_index;
+ int ch_group_index, freq, freq1, ret;
+
+ dev->cal.channel_cal_done = false;
+ freq = chandef->chan->center_freq;
+ freq1 = chandef->center_freq1;
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_40:
+ bw = 1;
+ if (freq1 > freq) {
+ bw_index = 1;
+ ch_group_index = 0;
+ } else {
+ bw_index = 3;
+ ch_group_index = 1;
+ }
+ channel += 2 - ch_group_index * 4;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ ch_group_index = (freq - freq1 + 30) / 20;
+ if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+ ch_group_index = 0;
+ bw = 2;
+ bw_index = ch_group_index;
+ channel += 6 - ch_group_index * 4;
+ break;
+ default:
+ bw = 0;
+ bw_index = 0;
+ ch_group_index = 0;
+ break;
+ }
+
+ mt76x2_read_rx_gain(dev);
+ mt76x2_phy_set_txpower_regs(dev, chan->band);
+ mt76x2_configure_tx_delay(dev, chan->band, bw);
+ mt76x2_phy_set_txpower(dev);
+
+ mt76x02_phy_set_band(dev, chan->band, ch_group_index & 1);
+ mt76x02_phy_set_bw(dev, chandef->width, ch_group_index);
+
+ mt76_rmw(dev, MT_EXT_CCA_CFG,
+ (MT_EXT_CCA_CFG_CCA0 |
+ MT_EXT_CCA_CFG_CCA1 |
+ MT_EXT_CCA_CFG_CCA2 |
+ MT_EXT_CCA_CFG_CCA3 |
+ MT_EXT_CCA_CFG_CCA_MASK),
+ ext_cca_chan[ch_group_index]);
+
+ ret = mt76x2_mcu_set_channel(dev, channel, bw, bw_index, scan);
+ if (ret)
+ return ret;
+
+ mt76x2_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true);
+
+ /* Enable LDPC Rx */
+ if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+ mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
+
+ if (!dev->cal.init_cal_done) {
+ u8 val = mt76x02_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
+
+ if (val != 0xff)
+ mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0);
+ }
+
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel);
+
+ /* Rx LPF calibration */
+ if (!dev->cal.init_cal_done)
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0);
+ dev->cal.init_cal_done = true;
+
+ mt76_wr(dev, MT_BBP(AGC, 61), 0xff64a4e2);
+ mt76_wr(dev, MT_BBP(AGC, 7), 0x08081010);
+ mt76_wr(dev, MT_BBP(AGC, 11), 0x00000404);
+ mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
+ mt76_wr(dev, MT_TXOP_CTRL_CFG, 0X04101b3f);
+
+ mt76_set(dev, MT_BBP(TXO, 4), BIT(25));
+ mt76_set(dev, MT_BBP(RXO, 13), BIT(8));
+
+ if (scan)
+ return 0;
+
+ mt76x2u_phy_channel_calibrate(dev, true);
+ mt76x02_init_agc_gain(dev);
+
+ if (mt76x2_tssi_enabled(dev)) {
+ /* init default values for temp compensation */
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
+ 0x38);
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
+ 0x38);
+
+ /* init tssi calibration */
+ if (!mt76x2_channel_silent(dev)) {
+ struct ieee80211_channel *chan;
+ u32 flag = 0;
+
+ chan = dev->mphy.chandef.chan;
+ if (chan->band == NL80211_BAND_5GHZ)
+ flag |= BIT(0);
+ if (mt76x02_ext_pa_enabled(dev, chan->band))
+ flag |= BIT(8);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag);
+ dev->cal.tssi_cal_done = true;
+ }
+ }
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig
new file mode 100644
index 000000000..d98225da6
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: ISC
+config MT7915E
+ tristate "MediaTek MT7915E (PCIe) support"
+ select MT76_CORE
+ depends on MAC80211
+ depends on PCI
+ help
+ This adds support for MT7915-based wireless PCIe devices,
+ which support concurrent dual-band operation at both 5GHz
+ and 2.4GHz IEEE 802.11ax 4x4:4SS 1024-QAM, 160MHz channels,
+ OFDMA, spatial reuse and dual carrier modulation.
+
+ To compile this driver as a module, choose M here.
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/Makefile b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile
new file mode 100644
index 000000000..57fe726cc
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile
@@ -0,0 +1,6 @@
+#SPDX-License-Identifier: ISC
+
+obj-$(CONFIG_MT7915E) += mt7915e.o
+
+mt7915e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o \
+ debugfs.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
new file mode 100644
index 000000000..e4d7eb33a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include "mt7915.h"
+#include "eeprom.h"
+
+/** global debugfs **/
+
+/* test knob of system layer 1/2 error recovery */
+static int mt7915_ser_trigger_set(void *data, u64 val)
+{
+ enum {
+ SER_SET_RECOVER_L1 = 1,
+ SER_SET_RECOVER_L2,
+ SER_ENABLE = 2,
+ SER_RECOVER
+ };
+ struct mt7915_dev *dev = data;
+ int ret = 0;
+
+ switch (val) {
+ case SER_SET_RECOVER_L1:
+ case SER_SET_RECOVER_L2:
+ ret = mt7915_mcu_set_ser(dev, SER_ENABLE, BIT(val), 0);
+ if (ret)
+ return ret;
+
+ return mt7915_mcu_set_ser(dev, SER_RECOVER, val, 0);
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_ser_trigger, NULL,
+ mt7915_ser_trigger_set, "%lld\n");
+
+static int
+mt7915_radar_trigger(void *data, u64 val)
+{
+ struct mt7915_dev *dev = data;
+
+ return mt7915_mcu_rdd_cmd(dev, RDD_RADAR_EMULATE, 1, 0, 0);
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_trigger, NULL,
+ mt7915_radar_trigger, "%lld\n");
+
+static int
+mt7915_dbdc_set(void *data, u64 val)
+{
+ struct mt7915_dev *dev = data;
+
+ if (val)
+ mt7915_register_ext_phy(dev);
+ else
+ mt7915_unregister_ext_phy(dev);
+
+ return 0;
+}
+
+static int
+mt7915_dbdc_get(void *data, u64 *val)
+{
+ struct mt7915_dev *dev = data;
+
+ *val = !!mt7915_ext_phy(dev);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_dbdc, mt7915_dbdc_get,
+ mt7915_dbdc_set, "%lld\n");
+
+static int
+mt7915_fw_debug_set(void *data, u64 val)
+{
+ struct mt7915_dev *dev = data;
+ enum {
+ DEBUG_TXCMD = 62,
+ DEBUG_CMD_RPT_TX,
+ DEBUG_CMD_RPT_TRIG,
+ DEBUG_SPL,
+ DEBUG_RPT_RX,
+ } debug;
+
+ dev->fw_debug = !!val;
+
+ mt7915_mcu_fw_log_2_host(dev, dev->fw_debug ? 2 : 0);
+
+ for (debug = DEBUG_TXCMD; debug <= DEBUG_RPT_RX; debug++)
+ mt7915_mcu_fw_dbg_ctrl(dev, debug, dev->fw_debug);
+
+ return 0;
+}
+
+static int
+mt7915_fw_debug_get(void *data, u64 *val)
+{
+ struct mt7915_dev *dev = data;
+
+ *val = dev->fw_debug;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug, mt7915_fw_debug_get,
+ mt7915_fw_debug_set, "%lld\n");
+
+static void
+mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy,
+ struct seq_file *file)
+{
+ struct mt7915_dev *dev = file->private;
+ bool ext_phy = phy != &dev->phy;
+ int bound[15], range[4], i, n;
+
+ if (!phy)
+ return;
+
+ /* Tx ampdu stat */
+ for (i = 0; i < ARRAY_SIZE(range); i++)
+ range[i] = mt76_rr(dev, MT_MIB_ARNG(ext_phy, i));
+
+ for (i = 0; i < ARRAY_SIZE(bound); i++)
+ bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1;
+
+ seq_printf(file, "\nPhy %d\n", ext_phy);
+
+ seq_printf(file, "Length: %8d | ", bound[0]);
+ for (i = 0; i < ARRAY_SIZE(bound) - 1; i++)
+ seq_printf(file, "%3d -%3d | ",
+ bound[i] + 1, bound[i + 1]);
+
+ seq_puts(file, "\nCount: ");
+ n = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
+ for (i = 0; i < ARRAY_SIZE(bound); i++)
+ seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i + n]);
+ seq_puts(file, "\n");
+
+ seq_printf(file, "BA miss count: %d\n", phy->mib.ba_miss_cnt);
+}
+
+static void
+mt7915_txbf_stat_read_phy(struct mt7915_phy *phy, struct seq_file *s)
+{
+ struct mt7915_dev *dev = s->private;
+ bool ext_phy = phy != &dev->phy;
+ int cnt;
+
+ if (!phy)
+ return;
+
+ /* Tx Beamformer monitor */
+ seq_puts(s, "\nTx Beamformer applied PPDU counts: ");
+
+ cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(ext_phy));
+ seq_printf(s, "iBF: %ld, eBF: %ld\n",
+ FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt),
+ FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt));
+
+ /* Tx Beamformer Rx feedback monitor */
+ seq_puts(s, "Tx Beamformer Rx feedback statistics: ");
+
+ cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(ext_phy));
+ seq_printf(s, "All: %ld, HE: %ld, VHT: %ld, HT: %ld\n",
+ FIELD_GET(MT_ETBF_RX_FB_ALL, cnt),
+ FIELD_GET(MT_ETBF_RX_FB_HE, cnt),
+ FIELD_GET(MT_ETBF_RX_FB_VHT, cnt),
+ FIELD_GET(MT_ETBF_RX_FB_HT, cnt));
+
+ /* Tx Beamformee Rx NDPA & Tx feedback report */
+ cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(ext_phy));
+ seq_printf(s, "Tx Beamformee successful feedback frames: %ld\n",
+ FIELD_GET(MT_ETBF_TX_FB_CPL, cnt));
+ seq_printf(s, "Tx Beamformee feedback triggered counts: %ld\n",
+ FIELD_GET(MT_ETBF_TX_FB_TRI, cnt));
+
+ /* Tx SU & MU counters */
+ cnt = mt76_rr(dev, MT_MIB_SDR34(ext_phy));
+ seq_printf(s, "Tx multi-user Beamforming counts: %ld\n",
+ FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt));
+ cnt = mt76_rr(dev, MT_MIB_DR8(ext_phy));
+ seq_printf(s, "Tx multi-user MPDU counts: %d\n", cnt);
+ cnt = mt76_rr(dev, MT_MIB_DR9(ext_phy));
+ seq_printf(s, "Tx multi-user successful MPDU counts: %d\n", cnt);
+ cnt = mt76_rr(dev, MT_MIB_DR11(ext_phy));
+ seq_printf(s, "Tx single-user successful MPDU counts: %d\n", cnt);
+
+ seq_puts(s, "\n");
+}
+
+static int
+mt7915_tx_stats_read(struct seq_file *file, void *data)
+{
+ struct mt7915_dev *dev = file->private;
+ int stat[8], i, n;
+
+ mt7915_ampdu_stat_read_phy(&dev->phy, file);
+ mt7915_txbf_stat_read_phy(&dev->phy, file);
+
+ mt7915_ampdu_stat_read_phy(mt7915_ext_phy(dev), file);
+ mt7915_txbf_stat_read_phy(mt7915_ext_phy(dev), file);
+
+ /* Tx amsdu info */
+ seq_puts(file, "Tx MSDU stat:\n");
+ for (i = 0, n = 0; i < ARRAY_SIZE(stat); i++) {
+ stat[i] = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
+ n += stat[i];
+ }
+
+ for (i = 0; i < ARRAY_SIZE(stat); i++) {
+ seq_printf(file, "AMSDU pack count of %d MSDU in TXD: 0x%x ",
+ i + 1, stat[i]);
+ if (n != 0)
+ seq_printf(file, "(%d%%)\n", stat[i] * 100 / n);
+ else
+ seq_puts(file, "\n");
+ }
+
+ return 0;
+}
+
+static int
+mt7915_tx_stats_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, mt7915_tx_stats_read, inode->i_private);
+}
+
+static const struct file_operations fops_tx_stats = {
+ .open = mt7915_tx_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int mt7915_read_temperature(struct seq_file *s, void *data)
+{
+ struct mt7915_dev *dev = dev_get_drvdata(s->private);
+ int temp;
+
+ /* cpu */
+ temp = mt7915_mcu_get_temperature(dev, 0);
+ seq_printf(s, "Temperature: %d\n", temp);
+
+ return 0;
+}
+
+static int
+mt7915_queues_acq(struct seq_file *s, void *data)
+{
+ struct mt7915_dev *dev = dev_get_drvdata(s->private);
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ int j, acs = i / 4, index = i % 4;
+ u32 ctrl, val, qlen = 0;
+
+ val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, index));
+ ctrl = BIT(31) | BIT(15) | (acs << 8);
+
+ for (j = 0; j < 32; j++) {
+ if (val & BIT(j))
+ continue;
+
+ mt76_wr(dev, MT_PLE_FL_Q0_CTRL,
+ ctrl | (j + (index << 5)));
+ qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL,
+ GENMASK(11, 0));
+ }
+ seq_printf(s, "AC%d%d: queued=%d\n", acs, index, qlen);
+ }
+
+ return 0;
+}
+
+static int
+mt7915_queues_read(struct seq_file *s, void *data)
+{
+ struct mt7915_dev *dev = dev_get_drvdata(s->private);
+ static const struct {
+ char *queue;
+ int id;
+ } queue_map[] = {
+ { "WFDMA0", MT_TXQ_BE },
+ { "MCUWM", MT_TXQ_MCU },
+ { "MCUWA", MT_TXQ_MCU_WA },
+ { "MCUFWQ", MT_TXQ_FWDL },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(queue_map); i++) {
+ struct mt76_queue *q = dev->mt76.q_tx[queue_map[i].id];
+
+ if (!q)
+ continue;
+
+ seq_printf(s,
+ "%s: queued=%d head=%d tail=%d\n",
+ queue_map[i].queue, q->queued, q->head,
+ q->tail);
+ }
+
+ return 0;
+}
+
+static void
+mt7915_puts_rate_txpower(struct seq_file *s, s8 *delta,
+ s8 txpower_cur, int band)
+{
+ static const char * const sku_group_name[] = {
+ "CCK", "OFDM", "HT20", "HT40",
+ "VHT20", "VHT40", "VHT80", "VHT160",
+ "RU26", "RU52", "RU106", "RU242/SU20",
+ "RU484/SU40", "RU996/SU80", "RU2x996/SU160"
+ };
+ s8 txpower[MT7915_SKU_RATE_NUM];
+ int i, idx = 0;
+
+ for (i = 0; i < MT7915_SKU_RATE_NUM; i++)
+ txpower[i] = DIV_ROUND_UP(txpower_cur + delta[i], 2);
+
+ for (i = 0; i < MAX_SKU_RATE_GROUP_NUM; i++) {
+ const struct sku_group *sku = &mt7915_sku_groups[i];
+ u32 offset = sku->offset[band];
+
+ if (!offset) {
+ idx += sku->len;
+ continue;
+ }
+
+ mt76_seq_puts_array(s, sku_group_name[i],
+ txpower + idx, sku->len);
+ idx += sku->len;
+ }
+}
+
+static int
+mt7915_read_rate_txpower(struct seq_file *s, void *data)
+{
+ struct mt7915_dev *dev = dev_get_drvdata(s->private);
+ struct mt76_phy *mphy = &dev->mphy;
+ enum nl80211_band band = mphy->chandef.chan->band;
+ s8 *delta = dev->rate_power[band];
+ s8 txpower_base = mphy->txpower_cur - delta[MT7915_SKU_MAX_DELTA_IDX];
+
+ seq_puts(s, "Band 0:\n");
+ mt7915_puts_rate_txpower(s, delta, txpower_base, band);
+
+ if (dev->mt76.phy2) {
+ mphy = dev->mt76.phy2;
+ band = mphy->chandef.chan->band;
+ delta = dev->rate_power[band];
+ txpower_base = mphy->txpower_cur -
+ delta[MT7915_SKU_MAX_DELTA_IDX];
+
+ seq_puts(s, "Band 1:\n");
+ mt7915_puts_rate_txpower(s, delta, txpower_base, band);
+ }
+
+ return 0;
+}
+
+int mt7915_init_debugfs(struct mt7915_dev *dev)
+{
+ struct dentry *dir;
+
+ dir = mt76_register_debugfs(&dev->mt76);
+ if (!dir)
+ return -ENOMEM;
+
+ debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
+ mt7915_queues_read);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir,
+ mt7915_queues_acq);
+ debugfs_create_file("tx_stats", 0400, dir, dev, &fops_tx_stats);
+ debugfs_create_file("dbdc", 0600, dir, dev, &fops_dbdc);
+ debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug);
+ debugfs_create_u32("dfs_hw_pattern", 0400, dir, &dev->hw_pattern);
+ /* test knobs */
+ debugfs_create_file("radar_trigger", 0200, dir, dev,
+ &fops_radar_trigger);
+ debugfs_create_file("ser_trigger", 0200, dir, dev, &fops_ser_trigger);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "temperature", dir,
+ mt7915_read_temperature);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "txpower_sku", dir,
+ mt7915_read_rate_txpower);
+
+ return 0;
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+/** per-station debugfs **/
+
+/* usage: <tx mode> <ldpc> <stbc> <bw> <gi> <nss> <mcs> */
+static int mt7915_sta_fixed_rate_set(void *data, u64 rate)
+{
+ struct ieee80211_sta *sta = data;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+
+ return mt7915_mcu_set_fixed_rate(msta->vif->phy->dev, sta, rate);
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_fixed_rate, NULL,
+ mt7915_sta_fixed_rate_set, "%llx\n");
+
+static int
+mt7915_sta_stats_read(struct seq_file *s, void *data)
+{
+ struct ieee80211_sta *sta = s->private;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_sta_stats *stats = &msta->stats;
+ struct rate_info *rate = &stats->prob_rate;
+ static const char * const bw[] = {
+ "BW20", "BW5", "BW10", "BW40",
+ "BW80", "BW160", "BW_HE_RU"
+ };
+
+ if (!rate->legacy && !rate->flags)
+ return 0;
+
+ seq_puts(s, "Probing rate - ");
+ if (rate->flags & RATE_INFO_FLAGS_MCS)
+ seq_puts(s, "HT ");
+ else if (rate->flags & RATE_INFO_FLAGS_VHT_MCS)
+ seq_puts(s, "VHT ");
+ else if (rate->flags & RATE_INFO_FLAGS_HE_MCS)
+ seq_puts(s, "HE ");
+ else
+ seq_printf(s, "Bitrate %d\n", rate->legacy);
+
+ if (rate->flags) {
+ seq_printf(s, "%s NSS%d MCS%d ",
+ bw[rate->bw], rate->nss, rate->mcs);
+
+ if (rate->flags & RATE_INFO_FLAGS_SHORT_GI)
+ seq_puts(s, "SGI ");
+ else if (rate->he_gi)
+ seq_puts(s, "HE GI ");
+
+ if (rate->he_dcm)
+ seq_puts(s, "DCM ");
+ }
+
+ seq_printf(s, "\nPPDU PER: %ld.%1ld%%\n",
+ stats->per / 10, stats->per % 10);
+
+ return 0;
+}
+
+static int
+mt7915_sta_stats_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, mt7915_sta_stats_read, inode->i_private);
+}
+
+static const struct file_operations fops_sta_stats = {
+ .open = mt7915_sta_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir)
+{
+ debugfs_create_file("fixed_rate", 0600, dir, sta, &fops_fixed_rate);
+ debugfs_create_file("stats", 0400, dir, sta, &fops_sta_stats);
+}
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
new file mode 100644
index 000000000..cfa12c4c6
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include "mt7915.h"
+#include "../dma.h"
+#include "mac.h"
+
+static int
+mt7915_init_tx_queues(struct mt7915_dev *dev, int n_desc)
+{
+ struct mt76_queue *hwq;
+ int err, i;
+
+ hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
+ if (!hwq)
+ return -ENOMEM;
+
+ err = mt76_queue_alloc(dev, hwq, MT7915_TXQ_BAND0, n_desc, 0,
+ MT_TX_RING_BASE);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < MT_TXQ_MCU; i++)
+ dev->mt76.q_tx[i] = hwq;
+
+ return 0;
+}
+
+static int
+mt7915_init_mcu_queue(struct mt7915_dev *dev, int qid, int idx, int n_desc)
+{
+ struct mt76_queue *hwq;
+ int err;
+
+ hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
+ if (!hwq)
+ return -ENOMEM;
+
+ err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
+ if (err < 0)
+ return err;
+
+ dev->mt76.q_tx[qid] = hwq;
+
+ return 0;
+}
+
+void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ __le32 *rxd = (__le32 *)skb->data;
+ enum rx_pkt_type type;
+
+ type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+
+ switch (type) {
+ case PKT_TYPE_TXRX_NOTIFY:
+ mt7915_mac_tx_free(dev, skb);
+ break;
+ case PKT_TYPE_RX_EVENT:
+ mt7915_mcu_rx_event(dev, skb);
+ break;
+ case PKT_TYPE_NORMAL:
+ if (!mt7915_mac_fill_rx(dev, skb)) {
+ mt76_rx(&dev->mt76, q, skb);
+ return;
+ }
+ fallthrough;
+ default:
+ dev_kfree_skb(skb);
+ break;
+ }
+}
+
+static void
+mt7915_tx_cleanup(struct mt7915_dev *dev)
+{
+ mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false);
+ mt76_queue_tx_cleanup(dev, MT_TXQ_MCU_WA, false);
+}
+
+static int mt7915_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct mt7915_dev *dev;
+
+ dev = container_of(napi, struct mt7915_dev, mt76.tx_napi);
+
+ mt7915_tx_cleanup(dev);
+
+ if (napi_complete_done(napi, 0))
+ mt7915_irq_enable(dev, MT_INT_TX_DONE_MCU);
+
+ return 0;
+}
+
+void mt7915_dma_prefetch(struct mt7915_dev *dev)
+{
+#define PREFETCH(base, depth) ((base) << 16 | (depth))
+
+ mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0, 0x4));
+ mt76_wr(dev, MT_WFDMA0_RX_RING1_EXT_CTRL, PREFETCH(0x40, 0x4));
+ mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x80, 0x0));
+
+ mt76_wr(dev, MT_WFDMA1_TX_RING0_EXT_CTRL, PREFETCH(0x80, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING1_EXT_CTRL, PREFETCH(0xc0, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING2_EXT_CTRL, PREFETCH(0x100, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING3_EXT_CTRL, PREFETCH(0x140, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING4_EXT_CTRL, PREFETCH(0x180, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING5_EXT_CTRL, PREFETCH(0x1c0, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING6_EXT_CTRL, PREFETCH(0x200, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING7_EXT_CTRL, PREFETCH(0x240, 0x4));
+
+ mt76_wr(dev, MT_WFDMA1_TX_RING16_EXT_CTRL, PREFETCH(0x280, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING17_EXT_CTRL, PREFETCH(0x2c0, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING18_EXT_CTRL, PREFETCH(0x300, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING19_EXT_CTRL, PREFETCH(0x340, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING20_EXT_CTRL, PREFETCH(0x380, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING21_EXT_CTRL, PREFETCH(0x3c0, 0x0));
+
+ mt76_wr(dev, MT_WFDMA1_RX_RING0_EXT_CTRL, PREFETCH(0x3c0, 0x4));
+ mt76_wr(dev, MT_WFDMA1_RX_RING1_EXT_CTRL, PREFETCH(0x400, 0x4));
+ mt76_wr(dev, MT_WFDMA1_RX_RING2_EXT_CTRL, PREFETCH(0x440, 0x4));
+ mt76_wr(dev, MT_WFDMA1_RX_RING3_EXT_CTRL, PREFETCH(0x480, 0x0));
+}
+
+static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
+{
+ static const struct {
+ u32 phys;
+ u32 mapped;
+ u32 size;
+ } fixed_map[] = {
+ { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
+ { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
+ { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
+ { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
+ { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
+ { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
+ { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
+ { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ };
+ int i;
+
+ if (addr < 0x100000)
+ return addr;
+
+ for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
+ u32 ofs;
+
+ if (addr < fixed_map[i].phys)
+ continue;
+
+ ofs = addr - fixed_map[i].phys;
+ if (ofs > fixed_map[i].size)
+ continue;
+
+ return fixed_map[i].mapped + ofs;
+ }
+
+ if ((addr >= 0x18000000 && addr < 0x18c00000) ||
+ (addr >= 0x70000000 && addr < 0x78000000) ||
+ (addr >= 0x7c000000 && addr < 0x7c400000))
+ return mt7915_reg_map_l1(dev, addr);
+
+ return mt7915_reg_map_l2(dev, addr);
+}
+
+static u32 mt7915_rr(struct mt76_dev *mdev, u32 offset)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ u32 addr = __mt7915_reg_addr(dev, offset);
+
+ return dev->bus_ops->rr(mdev, addr);
+}
+
+static void mt7915_wr(struct mt76_dev *mdev, u32 offset, u32 val)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ u32 addr = __mt7915_reg_addr(dev, offset);
+
+ dev->bus_ops->wr(mdev, addr, val);
+}
+
+static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ u32 addr = __mt7915_reg_addr(dev, offset);
+
+ return dev->bus_ops->rmw(mdev, addr, mask, val);
+}
+
+int mt7915_dma_init(struct mt7915_dev *dev)
+{
+ /* Increase buffer size to receive large VHT/HE MPDUs */
+ struct mt76_bus_ops *bus_ops;
+ int rx_buf_size = MT_RX_BUF_SIZE * 2;
+ int ret;
+
+ dev->bus_ops = dev->mt76.bus;
+ bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
+ GFP_KERNEL);
+ if (!bus_ops)
+ return -ENOMEM;
+
+ bus_ops->rr = mt7915_rr;
+ bus_ops->wr = mt7915_wr;
+ bus_ops->rmw = mt7915_rmw;
+ dev->mt76.bus = bus_ops;
+
+ mt76_dma_attach(&dev->mt76);
+
+ /* configure global setting */
+ mt76_set(dev, MT_WFDMA1_GLO_CFG,
+ MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
+
+ /* configure perfetch settings */
+ mt7915_dma_prefetch(dev);
+
+ /* reset dma idx */
+ mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
+ mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR, ~0);
+
+ /* configure delay interrupt */
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
+ mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0, 0);
+
+ /* init tx queue */
+ ret = mt7915_init_tx_queues(dev, MT7915_TX_RING_SIZE);
+ if (ret)
+ return ret;
+
+ /* command to WM */
+ ret = mt7915_init_mcu_queue(dev, MT_TXQ_MCU, MT7915_TXQ_MCU_WM,
+ MT7915_TX_MCU_RING_SIZE);
+ if (ret)
+ return ret;
+
+ /* command to WA */
+ ret = mt7915_init_mcu_queue(dev, MT_TXQ_MCU_WA, MT7915_TXQ_MCU_WA,
+ MT7915_TX_MCU_RING_SIZE);
+ if (ret)
+ return ret;
+
+ /* firmware download */
+ ret = mt7915_init_mcu_queue(dev, MT_TXQ_FWDL, MT7915_TXQ_FWDL,
+ MT7915_TX_FWDL_RING_SIZE);
+ if (ret)
+ return ret;
+
+ /* event from WM */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
+ MT7915_RXQ_MCU_WM, MT7915_RX_MCU_RING_SIZE,
+ rx_buf_size, MT_RX_EVENT_RING_BASE);
+ if (ret)
+ return ret;
+
+ /* event from WA */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
+ MT7915_RXQ_MCU_WA, MT7915_RX_MCU_RING_SIZE,
+ rx_buf_size, MT_RX_EVENT_RING_BASE);
+ if (ret)
+ return ret;
+
+ /* rx data */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0,
+ MT7915_RX_RING_SIZE, rx_buf_size,
+ MT_RX_DATA_RING_BASE);
+ if (ret)
+ return ret;
+
+ ret = mt76_init_queues(dev);
+ if (ret < 0)
+ return ret;
+
+ netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi,
+ mt7915_poll_tx, NAPI_POLL_WEIGHT);
+ napi_enable(&dev->mt76.tx_napi);
+
+ /* hif wait WFDMA idle */
+ mt76_set(dev, MT_WFDMA0_BUSY_ENA,
+ MT_WFDMA0_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA0_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA0_BUSY_ENA_RX_FIFO);
+
+ mt76_set(dev, MT_WFDMA1_BUSY_ENA,
+ MT_WFDMA1_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA1_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA1_BUSY_ENA_RX_FIFO);
+
+ mt76_set(dev, MT_WFDMA0_PCIE1_BUSY_ENA,
+ MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO);
+
+ mt76_set(dev, MT_WFDMA1_PCIE1_BUSY_ENA,
+ MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO);
+
+ mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC,
+ MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000);
+
+ /* set WFDMA Tx/Rx */
+ mt76_set(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+ mt76_set(dev, MT_WFDMA1_GLO_CFG,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
+
+ /* enable interrupts for TX/RX rings */
+ mt7915_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_MCU |
+ MT_INT_MCU_CMD);
+
+ return 0;
+}
+
+void mt7915_dma_cleanup(struct mt7915_dev *dev)
+{
+ /* disable */
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+ mt76_clear(dev, MT_WFDMA1_GLO_CFG,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN);
+
+ /* reset */
+ mt76_clear(dev, MT_WFDMA1_RST,
+ MT_WFDMA1_RST_DMASHDL_ALL_RST |
+ MT_WFDMA1_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA1_RST,
+ MT_WFDMA1_RST_DMASHDL_ALL_RST |
+ MT_WFDMA1_RST_LOGIC_RST);
+
+ mt76_clear(dev, MT_WFDMA0_RST,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA0_RST,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ mt76_dma_cleanup(&dev->mt76);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
new file mode 100644
index 000000000..5f6c52761
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include "mt7915.h"
+#include "eeprom.h"
+
+static inline bool mt7915_efuse_valid(u8 val)
+{
+ return !(val == 0xff);
+}
+
+u32 mt7915_eeprom_read(struct mt7915_dev *dev, u32 offset)
+{
+ u8 *data = dev->mt76.eeprom.data;
+
+ if (!mt7915_efuse_valid(data[offset]))
+ mt7915_mcu_get_eeprom(dev, offset);
+
+ return data[offset];
+}
+
+static int mt7915_eeprom_load(struct mt7915_dev *dev)
+{
+ int ret;
+
+ ret = mt76_eeprom_init(&dev->mt76, MT7915_EEPROM_SIZE);
+ if (ret < 0)
+ return ret;
+
+ memset(dev->mt76.eeprom.data, -1, MT7915_EEPROM_SIZE);
+
+ return 0;
+}
+
+static int mt7915_check_eeprom(struct mt7915_dev *dev)
+{
+ u16 val;
+ u8 *eeprom = dev->mt76.eeprom.data;
+
+ mt7915_eeprom_read(dev, 0);
+ val = get_unaligned_le16(eeprom);
+
+ switch (val) {
+ case 0x7915:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev)
+{
+ u8 *eeprom = dev->mt76.eeprom.data;
+ u8 tx_mask, max_nss = 4;
+ u32 val = mt7915_eeprom_read(dev, MT_EE_WIFI_CONF);
+
+ val = FIELD_GET(MT_EE_WIFI_CONF_BAND_SEL, val);
+ switch (val) {
+ case MT_EE_5GHZ:
+ dev->mphy.cap.has_5ghz = true;
+ break;
+ case MT_EE_2GHZ:
+ dev->mphy.cap.has_2ghz = true;
+ break;
+ default:
+ dev->mphy.cap.has_2ghz = true;
+ dev->mphy.cap.has_5ghz = true;
+ break;
+ }
+
+ /* read tx mask from eeprom */
+ tx_mask = FIELD_GET(MT_EE_WIFI_CONF_TX_MASK,
+ eeprom[MT_EE_WIFI_CONF]);
+ if (!tx_mask || tx_mask > max_nss)
+ tx_mask = max_nss;
+
+ dev->chainmask = BIT(tx_mask) - 1;
+ dev->mphy.antenna_mask = dev->chainmask;
+ dev->phy.chainmask = dev->chainmask;
+}
+
+int mt7915_eeprom_init(struct mt7915_dev *dev)
+{
+ int ret;
+
+ ret = mt7915_eeprom_load(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = mt7915_check_eeprom(dev);
+ if (ret)
+ return ret;
+
+ mt7915_eeprom_parse_hw_cap(dev);
+ memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
+ ETH_ALEN);
+
+ mt76_eeprom_override(&dev->mt76);
+
+ return 0;
+}
+
+int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
+ struct ieee80211_channel *chan,
+ u8 chain_idx)
+{
+ int index, target_power;
+ bool tssi_on;
+
+ if (chain_idx > 3)
+ return -EINVAL;
+
+ tssi_on = mt7915_tssi_enabled(dev, chan->band);
+
+ if (chan->band == NL80211_BAND_2GHZ) {
+ index = MT_EE_TX0_POWER_2G + chain_idx * 3;
+ target_power = mt7915_eeprom_read(dev, index);
+
+ if (!tssi_on)
+ target_power += mt7915_eeprom_read(dev, index + 1);
+ } else {
+ int group = mt7915_get_channel_group(chan->hw_value);
+
+ index = MT_EE_TX0_POWER_5G + chain_idx * 12;
+ target_power = mt7915_eeprom_read(dev, index + group);
+
+ if (!tssi_on)
+ target_power += mt7915_eeprom_read(dev, index + 8);
+ }
+
+ return target_power;
+}
+
+static const u8 sku_cck_delta_map[] = {
+ SKU_CCK_GROUP0,
+ SKU_CCK_GROUP0,
+ SKU_CCK_GROUP1,
+ SKU_CCK_GROUP1,
+};
+
+static const u8 sku_ofdm_delta_map[] = {
+ SKU_OFDM_GROUP0,
+ SKU_OFDM_GROUP0,
+ SKU_OFDM_GROUP1,
+ SKU_OFDM_GROUP1,
+ SKU_OFDM_GROUP2,
+ SKU_OFDM_GROUP2,
+ SKU_OFDM_GROUP3,
+ SKU_OFDM_GROUP4,
+};
+
+static const u8 sku_mcs_delta_map[] = {
+ SKU_MCS_GROUP0,
+ SKU_MCS_GROUP1,
+ SKU_MCS_GROUP1,
+ SKU_MCS_GROUP2,
+ SKU_MCS_GROUP2,
+ SKU_MCS_GROUP3,
+ SKU_MCS_GROUP4,
+ SKU_MCS_GROUP5,
+ SKU_MCS_GROUP6,
+ SKU_MCS_GROUP7,
+ SKU_MCS_GROUP8,
+ SKU_MCS_GROUP9,
+};
+
+#define SKU_GROUP(_mode, _len, _ofs_2g, _ofs_5g, _map) \
+ [_mode] = { \
+ .len = _len, \
+ .offset = { \
+ _ofs_2g, \
+ _ofs_5g, \
+ }, \
+ .delta_map = _map \
+}
+
+const struct sku_group mt7915_sku_groups[] = {
+ SKU_GROUP(SKU_CCK, 4, 0x252, 0, sku_cck_delta_map),
+ SKU_GROUP(SKU_OFDM, 8, 0x254, 0x29d, sku_ofdm_delta_map),
+
+ SKU_GROUP(SKU_HT_BW20, 8, 0x259, 0x2a2, sku_mcs_delta_map),
+ SKU_GROUP(SKU_HT_BW40, 9, 0x262, 0x2ab, sku_mcs_delta_map),
+ SKU_GROUP(SKU_VHT_BW20, 12, 0x259, 0x2a2, sku_mcs_delta_map),
+ SKU_GROUP(SKU_VHT_BW40, 12, 0x262, 0x2ab, sku_mcs_delta_map),
+ SKU_GROUP(SKU_VHT_BW80, 12, 0, 0x2b4, sku_mcs_delta_map),
+ SKU_GROUP(SKU_VHT_BW160, 12, 0, 0, sku_mcs_delta_map),
+
+ SKU_GROUP(SKU_HE_RU26, 12, 0x27f, 0x2dd, sku_mcs_delta_map),
+ SKU_GROUP(SKU_HE_RU52, 12, 0x289, 0x2e7, sku_mcs_delta_map),
+ SKU_GROUP(SKU_HE_RU106, 12, 0x293, 0x2f1, sku_mcs_delta_map),
+ SKU_GROUP(SKU_HE_RU242, 12, 0x26b, 0x2bf, sku_mcs_delta_map),
+ SKU_GROUP(SKU_HE_RU484, 12, 0x275, 0x2c9, sku_mcs_delta_map),
+ SKU_GROUP(SKU_HE_RU996, 12, 0, 0x2d3, sku_mcs_delta_map),
+ SKU_GROUP(SKU_HE_RU2x996, 12, 0, 0, sku_mcs_delta_map),
+};
+
+static s8
+mt7915_get_sku_delta(struct mt7915_dev *dev, u32 addr)
+{
+ u32 val = mt7915_eeprom_read(dev, addr);
+ s8 delta = FIELD_GET(SKU_DELTA_VAL, val);
+
+ if (!(val & SKU_DELTA_EN))
+ return 0;
+
+ return val & SKU_DELTA_ADD ? delta : -delta;
+}
+
+static void
+mt7915_eeprom_init_sku_band(struct mt7915_dev *dev,
+ struct ieee80211_supported_band *sband)
+{
+ int i, band = sband->band;
+ s8 *rate_power = dev->rate_power[band], max_delta = 0;
+ u8 idx = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mt7915_sku_groups); i++) {
+ const struct sku_group *sku = &mt7915_sku_groups[i];
+ u32 offset = sku->offset[band];
+ int j;
+
+ if (!offset) {
+ idx += sku->len;
+ continue;
+ }
+
+ rate_power[idx++] = mt7915_get_sku_delta(dev, offset);
+ if (rate_power[idx - 1] > max_delta)
+ max_delta = rate_power[idx - 1];
+
+ if (i == SKU_HT_BW20 || i == SKU_VHT_BW20)
+ offset += 1;
+
+ for (j = 1; j < sku->len; j++) {
+ u32 addr = offset + sku->delta_map[j];
+
+ rate_power[idx++] = mt7915_get_sku_delta(dev, addr);
+ if (rate_power[idx - 1] > max_delta)
+ max_delta = rate_power[idx - 1];
+ }
+ }
+
+ rate_power[idx] = max_delta;
+}
+
+void mt7915_eeprom_init_sku(struct mt7915_dev *dev)
+{
+ mt7915_eeprom_init_sku_band(dev, &dev->mphy.sband_2g.sband);
+ mt7915_eeprom_init_sku_band(dev, &dev->mphy.sband_5g.sband);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
new file mode 100644
index 000000000..4e31d6ab4
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#ifndef __MT7915_EEPROM_H
+#define __MT7915_EEPROM_H
+
+#include "mt7915.h"
+
+struct cal_data {
+ u8 count;
+ u16 offset[60];
+};
+
+enum mt7915_eeprom_field {
+ MT_EE_CHIP_ID = 0x000,
+ MT_EE_VERSION = 0x002,
+ MT_EE_MAC_ADDR = 0x004,
+ MT_EE_DDIE_FT_VERSION = 0x050,
+ MT_EE_WIFI_CONF = 0x190,
+ MT_EE_TX0_POWER_2G = 0x2fc,
+ MT_EE_TX0_POWER_5G = 0x34b,
+ MT_EE_ADIE_FT_VERSION = 0x9a0,
+
+ __MT_EE_MAX = 0xe00
+};
+
+#define MT_EE_WIFI_CONF_TX_MASK GENMASK(2, 0)
+#define MT_EE_WIFI_CONF_BAND_SEL GENMASK(7, 6)
+#define MT_EE_WIFI_CONF_TSSI0_2G BIT(0)
+#define MT_EE_WIFI_CONF_TSSI0_5G BIT(2)
+#define MT_EE_WIFI_CONF_TSSI1_5G BIT(4)
+
+enum mt7915_eeprom_band {
+ MT_EE_DUAL_BAND,
+ MT_EE_5GHZ,
+ MT_EE_2GHZ,
+ MT_EE_DBDC,
+};
+
+#define SKU_DELTA_VAL GENMASK(5, 0)
+#define SKU_DELTA_ADD BIT(6)
+#define SKU_DELTA_EN BIT(7)
+
+enum mt7915_sku_delta_group {
+ SKU_CCK_GROUP0,
+ SKU_CCK_GROUP1,
+
+ SKU_OFDM_GROUP0 = 0,
+ SKU_OFDM_GROUP1,
+ SKU_OFDM_GROUP2,
+ SKU_OFDM_GROUP3,
+ SKU_OFDM_GROUP4,
+
+ SKU_MCS_GROUP0 = 0,
+ SKU_MCS_GROUP1,
+ SKU_MCS_GROUP2,
+ SKU_MCS_GROUP3,
+ SKU_MCS_GROUP4,
+ SKU_MCS_GROUP5,
+ SKU_MCS_GROUP6,
+ SKU_MCS_GROUP7,
+ SKU_MCS_GROUP8,
+ SKU_MCS_GROUP9,
+};
+
+enum mt7915_sku_rate_group {
+ SKU_CCK,
+ SKU_OFDM,
+ SKU_HT_BW20,
+ SKU_HT_BW40,
+ SKU_VHT_BW20,
+ SKU_VHT_BW40,
+ SKU_VHT_BW80,
+ SKU_VHT_BW160,
+ SKU_HE_RU26,
+ SKU_HE_RU52,
+ SKU_HE_RU106,
+ SKU_HE_RU242,
+ SKU_HE_RU484,
+ SKU_HE_RU996,
+ SKU_HE_RU2x996,
+ MAX_SKU_RATE_GROUP_NUM,
+};
+
+struct sku_group {
+ u8 len;
+ u16 offset[2];
+ const u8 *delta_map;
+};
+
+static inline int
+mt7915_get_channel_group(int channel)
+{
+ if (channel >= 184 && channel <= 196)
+ return 0;
+ if (channel <= 48)
+ return 1;
+ if (channel <= 64)
+ return 2;
+ if (channel <= 96)
+ return 3;
+ if (channel <= 112)
+ return 4;
+ if (channel <= 128)
+ return 5;
+ if (channel <= 144)
+ return 6;
+ return 7;
+}
+
+static inline bool
+mt7915_tssi_enabled(struct mt7915_dev *dev, enum nl80211_band band)
+{
+ u8 *eep = dev->mt76.eeprom.data;
+
+ /* TODO: DBDC */
+ if (band == NL80211_BAND_5GHZ)
+ return eep[MT_EE_WIFI_CONF + 7] & MT_EE_WIFI_CONF_TSSI0_5G;
+ else
+ return eep[MT_EE_WIFI_CONF + 7] & MT_EE_WIFI_CONF_TSSI0_2G;
+}
+
+extern const struct sku_group mt7915_sku_groups[];
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
new file mode 100644
index 000000000..99683688a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -0,0 +1,723 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include <linux/etherdevice.h>
+#include "mt7915.h"
+#include "mac.h"
+#include "eeprom.h"
+
+static void
+mt7915_mac_init_band(struct mt7915_dev *dev, u8 band)
+{
+ u32 mask, set;
+
+ mt76_rmw_field(dev, MT_TMAC_CTCR0(band),
+ MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f);
+ mt76_set(dev, MT_TMAC_CTCR0(band),
+ MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
+ MT_TMAC_CTCR0_INS_DDLMT_EN);
+
+ mask = MT_MDP_RCFR0_MCU_RX_MGMT |
+ MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR |
+ MT_MDP_RCFR0_MCU_RX_CTL_BAR;
+ set = FIELD_PREP(MT_MDP_RCFR0_MCU_RX_MGMT, MT_MDP_TO_HIF) |
+ FIELD_PREP(MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR, MT_MDP_TO_HIF) |
+ FIELD_PREP(MT_MDP_RCFR0_MCU_RX_CTL_BAR, MT_MDP_TO_HIF);
+ mt76_rmw(dev, MT_MDP_BNRCFR0(band), mask, set);
+
+ mask = MT_MDP_RCFR1_MCU_RX_BYPASS |
+ MT_MDP_RCFR1_RX_DROPPED_UCAST |
+ MT_MDP_RCFR1_RX_DROPPED_MCAST;
+ set = FIELD_PREP(MT_MDP_RCFR1_MCU_RX_BYPASS, MT_MDP_TO_HIF) |
+ FIELD_PREP(MT_MDP_RCFR1_RX_DROPPED_UCAST, MT_MDP_TO_HIF) |
+ FIELD_PREP(MT_MDP_RCFR1_RX_DROPPED_MCAST, MT_MDP_TO_HIF);
+ mt76_rmw(dev, MT_MDP_BNRCFR1(band), mask, set);
+
+ mt76_set(dev, MT_WF_RMAC_MIB_TIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
+ mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
+}
+
+static void mt7915_mac_init(struct mt7915_dev *dev)
+{
+ int i;
+
+ mt76_rmw_field(dev, MT_DMA_DCR0, MT_DMA_DCR0_MAX_RX_LEN, 1536);
+ mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, 1536);
+ /* enable rx rate report */
+ mt76_set(dev, MT_DMA_DCR0, MT_DMA_DCR0_RXD_G5_EN);
+ /* disable hardware de-agg */
+ mt76_clear(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN);
+
+ for (i = 0; i < MT7915_WTBL_SIZE; i++)
+ mt7915_mac_wtbl_update(dev, i,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
+ mt7915_mac_init_band(dev, 0);
+ mt7915_mac_init_band(dev, 1);
+ mt7915_mcu_set_rts_thresh(&dev->phy, 0x92b);
+}
+
+static int mt7915_txbf_init(struct mt7915_dev *dev)
+{
+ int ret;
+
+ /*
+ * TODO: DBDC & check whether iBF phase calibration data has
+ * been stored in eeprom offset 0x651~0x7b8, then write down
+ * 0x1111 into 0x651 and 0x651 to trigger iBF.
+ */
+
+ /* trigger sounding packets */
+ ret = mt7915_mcu_set_txbf_sounding(dev);
+ if (ret)
+ return ret;
+
+ /* enable iBF & eBF */
+ return mt7915_mcu_set_txbf_type(dev);
+}
+
+static void
+mt7915_init_txpower_band(struct mt7915_dev *dev,
+ struct ieee80211_supported_band *sband)
+{
+ int i, n_chains = hweight8(dev->mphy.antenna_mask);
+
+ for (i = 0; i < sband->n_channels; i++) {
+ struct ieee80211_channel *chan = &sband->channels[i];
+ u32 target_power = 0;
+ int j;
+
+ for (j = 0; j < n_chains; j++) {
+ u32 val;
+
+ val = mt7915_eeprom_get_target_power(dev, chan, j);
+ target_power = max(target_power, val);
+ }
+
+ chan->max_power = min_t(int, chan->max_reg_power,
+ target_power / 2);
+ chan->orig_mpwr = target_power / 2;
+ }
+}
+
+static void mt7915_init_txpower(struct mt7915_dev *dev)
+{
+ mt7915_init_txpower_band(dev, &dev->mphy.sband_2g.sband);
+ mt7915_init_txpower_band(dev, &dev->mphy.sband_5g.sband);
+
+ mt7915_eeprom_init_sku(dev);
+}
+
+static void mt7915_init_work(struct work_struct *work)
+{
+ struct mt7915_dev *dev = container_of(work, struct mt7915_dev,
+ init_work);
+
+ mt7915_mcu_set_eeprom(dev);
+ mt7915_mac_init(dev);
+ mt7915_init_txpower(dev);
+ mt7915_txbf_init(dev);
+}
+
+static int mt7915_init_hardware(struct mt7915_dev *dev)
+{
+ int ret, idx;
+
+ mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
+
+ INIT_WORK(&dev->init_work, mt7915_init_work);
+ spin_lock_init(&dev->token_lock);
+ idr_init(&dev->token);
+
+ ret = mt7915_dma_init(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+
+ /*
+ * force firmware operation mode into normal state,
+ * which should be set before firmware download stage.
+ */
+ mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
+
+ ret = mt7915_mcu_init(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7915_eeprom_init(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Beacon and mgmt frames should occupy wcid 0 */
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA - 1);
+ if (idx)
+ return -ENOSPC;
+
+ dev->mt76.global_wcid.idx = idx;
+ dev->mt76.global_wcid.hw_key_idx = -1;
+ dev->mt76.global_wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid);
+
+ return 0;
+}
+
+#define CCK_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
+ .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \
+ .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + (_idx)), \
+}
+
+#define OFDM_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
+ .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
+}
+
+static struct ieee80211_rate mt7915_rates[] = {
+ CCK_RATE(0, 10),
+ CCK_RATE(1, 20),
+ CCK_RATE(2, 55),
+ CCK_RATE(3, 110),
+ OFDM_RATE(11, 60),
+ OFDM_RATE(15, 90),
+ OFDM_RATE(10, 120),
+ OFDM_RATE(14, 180),
+ OFDM_RATE(9, 240),
+ OFDM_RATE(13, 360),
+ OFDM_RATE(8, 480),
+ OFDM_RATE(12, 540),
+};
+
+static const struct ieee80211_iface_limit if_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_ADHOC)
+ }, {
+ .max = MT7915_MAX_INTERFACES,
+ .types = BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_STATION)
+ }
+};
+
+static const struct ieee80211_iface_combination if_comb[] = {
+ {
+ .limits = if_limits,
+ .n_limits = ARRAY_SIZE(if_limits),
+ .max_interfaces = 4,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160) |
+ BIT(NL80211_CHAN_WIDTH_80P80),
+ }
+};
+
+static void
+mt7915_regd_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt76_phy *mphy = hw->priv;
+ struct mt7915_phy *phy = mphy->priv;
+ struct cfg80211_chan_def *chandef = &mphy->chandef;
+
+ dev->mt76.region = request->dfs_region;
+
+ if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
+ return;
+
+ mt7915_dfs_init_radar_detector(phy);
+}
+
+static void
+mt7915_init_wiphy(struct ieee80211_hw *hw)
+{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct wiphy *wiphy = hw->wiphy;
+
+ hw->queues = 4;
+ hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+ hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+
+ phy->slottime = 9;
+
+ hw->sta_data_size = sizeof(struct mt7915_sta);
+ hw->vif_data_size = sizeof(struct mt7915_vif);
+
+ wiphy->iface_combinations = if_comb;
+ wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+ wiphy->reg_notifier = mt7915_regd_notifier;
+ wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+
+ hw->max_tx_fragments = 4;
+}
+
+void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy)
+{
+ int nss = hweight8(phy->chainmask);
+ u32 *cap = &phy->mt76->sband_5g.sband.vht_cap.cap;
+
+ *cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
+ (3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT);
+
+ *cap &= ~(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK |
+ IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
+
+ if (nss < 2)
+ return;
+
+ *cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE |
+ FIELD_PREP(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
+ nss - 1);
+}
+
+static void
+mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap,
+ int vif, int nss)
+{
+ struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
+ struct ieee80211_he_mcs_nss_supp *mcs = &he_cap->he_mcs_nss_supp;
+ u8 c;
+
+#ifdef CONFIG_MAC80211_MESH
+ if (vif == NL80211_IFTYPE_MESH_POINT)
+ return;
+#endif
+
+ elem->phy_cap_info[3] &= ~IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER;
+ elem->phy_cap_info[4] &= ~IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
+
+ c = IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK |
+ IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK;
+ elem->phy_cap_info[5] &= ~c;
+
+ c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB |
+ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB;
+ elem->phy_cap_info[6] &= ~c;
+
+ elem->phy_cap_info[7] &= ~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK;
+
+ c = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
+ elem->phy_cap_info[2] |= c;
+
+ c = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
+ elem->phy_cap_info[4] |= c;
+
+ /* do not support NG16 due to spec D4.0 changes subcarrier idx */
+ c = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
+ IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU;
+
+ if (vif == NL80211_IFTYPE_STATION)
+ c |= IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO;
+
+ elem->phy_cap_info[6] |= c;
+
+ if (nss < 2)
+ return;
+
+ /* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */
+ elem->phy_cap_info[7] |= min_t(int, nss - 1, 2) << 3;
+
+ if (vif != NL80211_IFTYPE_AP)
+ return;
+
+ elem->phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER;
+ elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
+
+ /* num_snd_dim */
+ c = (nss - 1) | (max_t(int, mcs->tx_mcs_160, 1) << 3);
+ elem->phy_cap_info[5] |= c;
+
+ c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB |
+ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB;
+ elem->phy_cap_info[6] |= c;
+}
+
+static void
+mt7915_gen_ppe_thresh(u8 *he_ppet)
+{
+ int ru, nss, max_nss = 1, max_ru = 3;
+ u8 bit = 7, ru_bit_mask = 0x7;
+ u8 ppet16_ppet8_ru3_ru0[] = {0x1c, 0xc7, 0x71};
+
+ he_ppet[0] = max_nss & IEEE80211_PPE_THRES_NSS_MASK;
+ he_ppet[0] |= (ru_bit_mask <<
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS) &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK;
+
+ for (nss = 0; nss <= max_nss; nss++) {
+ for (ru = 0; ru < max_ru; ru++) {
+ u8 val;
+ int i;
+
+ if (!(ru_bit_mask & BIT(ru)))
+ continue;
+
+ val = (ppet16_ppet8_ru3_ru0[nss] >> (ru * 6)) &
+ 0x3f;
+ val = ((val >> 3) & 0x7) | ((val & 0x7) << 3);
+ for (i = 5; i >= 0; i--) {
+ he_ppet[bit / 8] |=
+ ((val >> i) & 0x1) << ((bit % 8));
+ bit++;
+ }
+ }
+ }
+}
+
+static int
+mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
+ struct ieee80211_sband_iftype_data *data)
+{
+ int i, idx = 0;
+ int nss = hweight8(phy->chainmask);
+ u16 mcs_map = 0;
+
+ for (i = 0; i < 8; i++) {
+ if (i < nss)
+ mcs_map |= (IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2));
+ else
+ mcs_map |= (IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2));
+ }
+
+ for (i = 0; i < NUM_NL80211_IFTYPES; i++) {
+ struct ieee80211_sta_he_cap *he_cap = &data[idx].he_cap;
+ struct ieee80211_he_cap_elem *he_cap_elem =
+ &he_cap->he_cap_elem;
+ struct ieee80211_he_mcs_nss_supp *he_mcs =
+ &he_cap->he_mcs_nss_supp;
+
+ switch (i) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+#ifdef CONFIG_MAC80211_MESH
+ case NL80211_IFTYPE_MESH_POINT:
+#endif
+ break;
+ default:
+ continue;
+ }
+
+ data[idx].types_mask = BIT(i);
+ he_cap->has_he = true;
+
+ he_cap_elem->mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE;
+ he_cap_elem->mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_RESERVED;
+ he_cap_elem->mac_cap_info[4] =
+ IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU;
+
+ if (band == NL80211_BAND_2GHZ)
+ he_cap_elem->phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ else if (band == NL80211_BAND_5GHZ)
+ he_cap_elem->phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
+
+ he_cap_elem->phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD;
+ he_cap_elem->phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ;
+
+ switch (i) {
+ case NL80211_IFTYPE_AP:
+ he_cap_elem->mac_cap_info[0] |=
+ IEEE80211_HE_MAC_CAP0_TWT_RES;
+ he_cap_elem->mac_cap_info[2] |=
+ IEEE80211_HE_MAC_CAP2_BSR;
+ he_cap_elem->mac_cap_info[4] |=
+ IEEE80211_HE_MAC_CAP4_BQR;
+ he_cap_elem->mac_cap_info[5] |=
+ IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX;
+ he_cap_elem->phy_cap_info[3] |=
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK;
+ he_cap_elem->phy_cap_info[6] |=
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
+ break;
+ case NL80211_IFTYPE_STATION:
+ he_cap_elem->mac_cap_info[0] |=
+ IEEE80211_HE_MAC_CAP0_TWT_REQ;
+ he_cap_elem->mac_cap_info[1] |=
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
+
+ if (band == NL80211_BAND_2GHZ)
+ he_cap_elem->phy_cap_info[0] |=
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G;
+ else if (band == NL80211_BAND_5GHZ)
+ he_cap_elem->phy_cap_info[0] |=
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G;
+
+ he_cap_elem->phy_cap_info[1] |=
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
+ he_cap_elem->phy_cap_info[3] |=
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK;
+ he_cap_elem->phy_cap_info[6] |=
+ IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
+ he_cap_elem->phy_cap_info[7] |=
+ IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR |
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI;
+ he_cap_elem->phy_cap_info[8] |=
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484;
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
+ IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB;
+ break;
+ }
+
+ he_mcs->rx_mcs_80 = cpu_to_le16(mcs_map);
+ he_mcs->tx_mcs_80 = cpu_to_le16(mcs_map);
+ he_mcs->rx_mcs_160 = cpu_to_le16(mcs_map);
+ he_mcs->tx_mcs_160 = cpu_to_le16(mcs_map);
+ he_mcs->rx_mcs_80p80 = cpu_to_le16(mcs_map);
+ he_mcs->tx_mcs_80p80 = cpu_to_le16(mcs_map);
+
+ mt7915_set_stream_he_txbf_caps(he_cap, i, nss);
+
+ memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
+ if (he_cap_elem->phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ mt7915_gen_ppe_thresh(he_cap->ppe_thres);
+ } else {
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US;
+ }
+ idx++;
+ }
+
+ return idx;
+}
+
+void mt7915_set_stream_he_caps(struct mt7915_phy *phy)
+{
+ struct ieee80211_sband_iftype_data *data;
+ struct ieee80211_supported_band *band;
+ int n;
+
+ if (phy->mt76->cap.has_2ghz) {
+ data = phy->iftype[NL80211_BAND_2GHZ];
+ n = mt7915_init_he_caps(phy, NL80211_BAND_2GHZ, data);
+
+ band = &phy->mt76->sband_2g.sband;
+ band->iftype_data = data;
+ band->n_iftype_data = n;
+ }
+
+ if (phy->mt76->cap.has_5ghz) {
+ data = phy->iftype[NL80211_BAND_5GHZ];
+ n = mt7915_init_he_caps(phy, NL80211_BAND_5GHZ, data);
+
+ band = &phy->mt76->sband_5g.sband;
+ band->iftype_data = data;
+ band->n_iftype_data = n;
+ }
+}
+
+static void
+mt7915_cap_dbdc_enable(struct mt7915_dev *dev)
+{
+ dev->mphy.sband_5g.sband.vht_cap.cap &=
+ ~(IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ);
+
+ if (dev->chainmask == 0xf)
+ dev->mphy.antenna_mask = dev->chainmask >> 2;
+ else
+ dev->mphy.antenna_mask = dev->chainmask >> 1;
+
+ dev->phy.chainmask = dev->mphy.antenna_mask;
+ dev->mphy.hw->wiphy->available_antennas_rx = dev->phy.chainmask;
+ dev->mphy.hw->wiphy->available_antennas_tx = dev->phy.chainmask;
+
+ mt76_set_stream_caps(&dev->mphy, true);
+ mt7915_set_stream_vht_txbf_caps(&dev->phy);
+ mt7915_set_stream_he_caps(&dev->phy);
+}
+
+static void
+mt7915_cap_dbdc_disable(struct mt7915_dev *dev)
+{
+ dev->mphy.sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
+
+ dev->mphy.antenna_mask = dev->chainmask;
+ dev->phy.chainmask = dev->chainmask;
+ dev->mphy.hw->wiphy->available_antennas_rx = dev->chainmask;
+ dev->mphy.hw->wiphy->available_antennas_tx = dev->chainmask;
+
+ mt76_set_stream_caps(&dev->mphy, true);
+ mt7915_set_stream_vht_txbf_caps(&dev->phy);
+ mt7915_set_stream_he_caps(&dev->phy);
+}
+
+int mt7915_register_ext_phy(struct mt7915_dev *dev)
+{
+ struct mt7915_phy *phy = mt7915_ext_phy(dev);
+ struct mt76_phy *mphy;
+ int ret;
+ bool bound;
+
+ /* TODO: enble DBDC */
+ bound = mt7915_l1_rr(dev, MT_HW_BOUND) & BIT(5);
+ if (!bound)
+ return -EINVAL;
+
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
+ return -EINVAL;
+
+ if (phy)
+ return 0;
+
+ mt7915_cap_dbdc_enable(dev);
+ mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7915_ops);
+ if (!mphy)
+ return -ENOMEM;
+
+ phy = mphy->priv;
+ phy->dev = dev;
+ phy->mt76 = mphy;
+ phy->chainmask = dev->chainmask & ~dev->phy.chainmask;
+ mphy->antenna_mask = BIT(hweight8(phy->chainmask)) - 1;
+ mt7915_init_wiphy(mphy->hw);
+
+ INIT_LIST_HEAD(&phy->stats_list);
+ INIT_DELAYED_WORK(&phy->mac_work, mt7915_mac_work);
+
+ /*
+ * Make the secondary PHY MAC address local without overlapping with
+ * the usual MAC address allocation scheme on multiple virtual interfaces
+ */
+ mphy->hw->wiphy->perm_addr[0] |= 2;
+ mphy->hw->wiphy->perm_addr[0] ^= BIT(7);
+
+ /* The second interface does not get any packets unless it has a vif */
+ ieee80211_hw_set(mphy->hw, WANT_MONITOR_VIF);
+
+ ret = mt76_register_phy(mphy);
+ if (ret)
+ ieee80211_free_hw(mphy->hw);
+
+ return ret;
+}
+
+void mt7915_unregister_ext_phy(struct mt7915_dev *dev)
+{
+ struct mt7915_phy *phy = mt7915_ext_phy(dev);
+ struct mt76_phy *mphy = dev->mt76.phy2;
+
+ if (!phy)
+ return;
+
+ mt7915_cap_dbdc_disable(dev);
+ mt76_unregister_phy(mphy);
+ ieee80211_free_hw(mphy->hw);
+}
+
+int mt7915_register_device(struct mt7915_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ int ret;
+
+ dev->phy.dev = dev;
+ dev->phy.mt76 = &dev->mt76.phy;
+ dev->mt76.phy.priv = &dev->phy;
+ INIT_LIST_HEAD(&dev->phy.stats_list);
+ INIT_WORK(&dev->rc_work, mt7915_mac_sta_rc_work);
+ INIT_DELAYED_WORK(&dev->phy.mac_work, mt7915_mac_work);
+ INIT_LIST_HEAD(&dev->sta_rc_list);
+ INIT_LIST_HEAD(&dev->sta_poll_list);
+ spin_lock_init(&dev->sta_poll_lock);
+
+ init_waitqueue_head(&dev->reset_wait);
+ INIT_WORK(&dev->reset_work, mt7915_mac_reset_work);
+
+ ret = mt7915_init_hardware(dev);
+ if (ret)
+ return ret;
+
+ mt7915_init_wiphy(hw);
+ dev->mphy.sband_2g.sband.ht_cap.cap |=
+ IEEE80211_HT_CAP_LDPC_CODING |
+ IEEE80211_HT_CAP_MAX_AMSDU;
+ dev->mphy.sband_5g.sband.ht_cap.cap |=
+ IEEE80211_HT_CAP_LDPC_CODING |
+ IEEE80211_HT_CAP_MAX_AMSDU;
+ dev->mphy.sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+ mt7915_cap_dbdc_disable(dev);
+ dev->phy.dfs_state = -1;
+
+ ret = mt76_register_device(&dev->mt76, true, mt7915_rates,
+ ARRAY_SIZE(mt7915_rates));
+ if (ret)
+ return ret;
+
+ ieee80211_queue_work(mt76_hw(dev), &dev->init_work);
+
+ return mt7915_init_debugfs(dev);
+}
+
+void mt7915_unregister_device(struct mt7915_dev *dev)
+{
+ struct mt76_txwi_cache *txwi;
+ int id;
+
+ mt7915_unregister_ext_phy(dev);
+ mt76_unregister_device(&dev->mt76);
+ mt7915_mcu_exit(dev);
+ mt7915_dma_cleanup(dev);
+
+ spin_lock_bh(&dev->token_lock);
+ idr_for_each_entry(&dev->token, txwi, id) {
+ mt7915_txp_skb_unmap(&dev->mt76, txwi);
+ if (txwi->skb) {
+ struct ieee80211_hw *hw;
+
+ hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
+ ieee80211_free_txskb(hw, txwi->skb);
+ }
+ mt76_put_txwi(&dev->mt76, txwi);
+ }
+ spin_unlock_bh(&dev->token_lock);
+ idr_destroy(&dev->token);
+
+ mt76_free_device(&dev->mt76);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
new file mode 100644
index 000000000..1e14d7782
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -0,0 +1,1526 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include <linux/etherdevice.h>
+#include <linux/timekeeping.h>
+#include "mt7915.h"
+#include "../dma.h"
+#include "mac.h"
+
+#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2)
+
+#define HE_BITS(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_##f)
+#define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\
+ IEEE80211_RADIOTAP_HE_##f)
+
+static const struct mt7915_dfs_radar_spec etsi_radar_specs = {
+ .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
+ .radar_pattern = {
+ [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 },
+ [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 },
+ [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 },
+ [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 },
+ [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 },
+ [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 },
+ [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 },
+ [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 },
+ },
+};
+
+static const struct mt7915_dfs_radar_spec fcc_radar_specs = {
+ .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
+ .radar_pattern = {
+ [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
+ [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
+ [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
+ [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
+ [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
+ },
+};
+
+static const struct mt7915_dfs_radar_spec jp_radar_specs = {
+ .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
+ .radar_pattern = {
+ [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
+ [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
+ [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
+ [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
+ [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
+ [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 },
+ [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 },
+ [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 },
+ },
+};
+
+static struct mt76_wcid *mt7915_rx_get_wcid(struct mt7915_dev *dev,
+ u16 idx, bool unicast)
+{
+ struct mt7915_sta *sta;
+ struct mt76_wcid *wcid;
+
+ if (idx >= ARRAY_SIZE(dev->mt76.wcid))
+ return NULL;
+
+ wcid = rcu_dereference(dev->mt76.wcid[idx]);
+ if (unicast || !wcid)
+ return wcid;
+
+ if (!wcid->sta)
+ return NULL;
+
+ sta = container_of(wcid, struct mt7915_sta, wcid);
+ if (!sta->vif)
+ return NULL;
+
+ return &sta->vif->sta.wcid;
+}
+
+void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
+{
+}
+
+bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask)
+{
+ mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
+ FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
+
+ return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
+ 0, 5000);
+}
+
+static u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid)
+{
+ mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
+ FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
+
+ return MT_WTBL_LMAC_OFFS(wcid, 0);
+}
+
+/* TODO: use txfree airtime info to avoid runtime accessing in the long run */
+static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
+{
+ static const u8 ac_to_tid[] = {
+ [IEEE80211_AC_BE] = 0,
+ [IEEE80211_AC_BK] = 1,
+ [IEEE80211_AC_VI] = 4,
+ [IEEE80211_AC_VO] = 6
+ };
+ struct ieee80211_sta *sta;
+ struct mt7915_sta *msta;
+ u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
+ LIST_HEAD(sta_poll_list);
+ int i;
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ list_splice_init(&dev->sta_poll_list, &sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ rcu_read_lock();
+
+ while (true) {
+ bool clear = false;
+ u32 addr;
+ u16 idx;
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&sta_poll_list)) {
+ spin_unlock_bh(&dev->sta_poll_lock);
+ break;
+ }
+ msta = list_first_entry(&sta_poll_list,
+ struct mt7915_sta, poll_list);
+ list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ idx = msta->wcid.idx;
+ addr = mt7915_mac_wtbl_lmac_addr(dev, idx) + 20 * 4;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ u32 tx_last = msta->airtime_ac[i];
+ u32 rx_last = msta->airtime_ac[i + 4];
+
+ msta->airtime_ac[i] = mt76_rr(dev, addr);
+ msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
+
+ tx_time[i] = msta->airtime_ac[i] - tx_last;
+ rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
+
+ if ((tx_last | rx_last) & BIT(30))
+ clear = true;
+
+ addr += 8;
+ }
+
+ if (clear) {
+ mt7915_mac_wtbl_update(dev, idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
+ }
+
+ if (!msta->wcid.sta)
+ continue;
+
+ sta = container_of((void *)msta, struct ieee80211_sta,
+ drv_priv);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ u8 q = mt7915_lmac_mapping(dev, i);
+ u32 tx_cur = tx_time[q];
+ u32 rx_cur = rx_time[q];
+ u8 tid = ac_to_tid[i];
+
+ if (!tx_cur && !rx_cur)
+ continue;
+
+ ieee80211_sta_register_airtime(sta, tid, tx_cur,
+ rx_cur);
+ }
+ }
+
+ rcu_read_unlock();
+}
+
+static void
+mt7915_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
+ struct ieee80211_radiotap_he *he,
+ __le32 *rxv)
+{
+ u32 ru_h, ru_l;
+ u8 ru, offs = 0;
+
+ ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv[0]));
+ ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv[1]));
+ ru = (u8)(ru_l | ru_h << 4);
+
+ status->bw = RATE_INFO_BW_HE_RU;
+
+ switch (ru) {
+ case 0 ... 36:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ offs = ru;
+ break;
+ case 37 ... 52:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+ offs = ru - 37;
+ break;
+ case 53 ... 60:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ offs = ru - 53;
+ break;
+ case 61 ... 64:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ offs = ru - 61;
+ break;
+ case 65 ... 66:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ offs = ru - 65;
+ break;
+ case 67:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ case 68:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+ break;
+ }
+
+ he->data1 |= HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
+ he->data2 |= HE_BITS(DATA2_RU_OFFSET_KNOWN) |
+ le16_encode_bits(offs,
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
+}
+
+static void
+mt7915_mac_decode_he_radiotap(struct sk_buff *skb,
+ struct mt76_rx_status *status,
+ __le32 *rxv, u32 phy)
+{
+ /* TODO: struct ieee80211_radiotap_he_mu */
+ static const struct ieee80211_radiotap_he known = {
+ .data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) |
+ HE_BITS(DATA1_DATA_DCM_KNOWN) |
+ HE_BITS(DATA1_STBC_KNOWN) |
+ HE_BITS(DATA1_CODING_KNOWN) |
+ HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) |
+ HE_BITS(DATA1_DOPPLER_KNOWN) |
+ HE_BITS(DATA1_BSS_COLOR_KNOWN),
+ .data2 = HE_BITS(DATA2_GI_KNOWN) |
+ HE_BITS(DATA2_TXBF_KNOWN) |
+ HE_BITS(DATA2_PE_DISAMBIG_KNOWN) |
+ HE_BITS(DATA2_TXOP_KNOWN),
+ };
+ struct ieee80211_radiotap_he *he = NULL;
+ u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1;
+
+ he = skb_push(skb, sizeof(known));
+ memcpy(he, &known, sizeof(known));
+
+ he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) |
+ HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]);
+ he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) |
+ le16_encode_bits(ltf_size,
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
+ he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) |
+ HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]);
+
+ switch (phy) {
+ case MT_PHY_TYPE_HE_SU:
+ he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
+ HE_BITS(DATA1_UL_DL_KNOWN) |
+ HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
+ HE_BITS(DATA1_SPTL_REUSE_KNOWN);
+
+ he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
+ HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
+ he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
+ break;
+ case MT_PHY_TYPE_HE_EXT_SU:
+ he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
+ HE_BITS(DATA1_UL_DL_KNOWN);
+
+ he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
+ break;
+ case MT_PHY_TYPE_HE_MU:
+ he->data1 |= HE_BITS(DATA1_FORMAT_MU) |
+ HE_BITS(DATA1_UL_DL_KNOWN) |
+ HE_BITS(DATA1_SPTL_REUSE_KNOWN);
+
+ he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
+ he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
+
+ mt7915_mac_decode_he_radiotap_ru(status, he, rxv);
+ break;
+ case MT_PHY_TYPE_HE_TB:
+ he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) |
+ HE_BITS(DATA1_SPTL_REUSE_KNOWN) |
+ HE_BITS(DATA1_SPTL_REUSE2_KNOWN) |
+ HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
+ HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
+
+ he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) |
+ HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
+ HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) |
+ HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]);
+
+ mt7915_mac_decode_he_radiotap_ru(status, he, rxv);
+ break;
+ default:
+ break;
+ }
+}
+
+int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7915_phy *phy = &dev->phy;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_hdr *hdr;
+ __le32 *rxd = (__le32 *)skb->data;
+ __le32 *rxv = NULL;
+ u32 mode = 0;
+ u32 rxd1 = le32_to_cpu(rxd[1]);
+ u32 rxd2 = le32_to_cpu(rxd[2]);
+ u32 rxd3 = le32_to_cpu(rxd[3]);
+ bool unicast, insert_ccmp_hdr = false;
+ u8 remove_pad;
+ int i, idx;
+
+ memset(status, 0, sizeof(*status));
+
+ if (rxd1 & MT_RXD1_NORMAL_BAND_IDX) {
+ mphy = dev->mt76.phy2;
+ if (!mphy)
+ return -EINVAL;
+
+ phy = mphy->priv;
+ status->ext_phy = true;
+ }
+
+ if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
+ return -EINVAL;
+
+ unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
+ idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
+ status->wcid = mt7915_rx_get_wcid(dev, idx, unicast);
+
+ if (status->wcid) {
+ struct mt7915_sta *msta;
+
+ msta = container_of(status->wcid, struct mt7915_sta, wcid);
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&msta->poll_list))
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+ }
+
+ status->freq = mphy->chandef.chan->center_freq;
+ status->band = mphy->chandef.chan->band;
+ if (status->band == NL80211_BAND_5GHZ)
+ sband = &mphy->sband_5g.sband;
+ else
+ sband = &mphy->sband_2g.sband;
+
+ if (!sband->channels)
+ return -EINVAL;
+
+ if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
+ status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 &&
+ !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
+ status->flag |= RX_FLAG_DECRYPTED;
+ status->flag |= RX_FLAG_IV_STRIPPED;
+ status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
+ }
+
+ if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
+ status->flag |= RX_FLAG_AMPDU_DETAILS;
+
+ /* all subframes of an A-MPDU have the same timestamp */
+ if (phy->rx_ampdu_ts != rxd[14]) {
+ if (!++phy->ampdu_ref)
+ phy->ampdu_ref++;
+ }
+ phy->rx_ampdu_ts = rxd[14];
+
+ status->ampdu_ref = phy->ampdu_ref;
+ }
+
+ remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
+
+ if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
+ return -EINVAL;
+
+ rxd += 6;
+ if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
+ rxd += 4;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
+ u8 *data = (u8 *)rxd;
+
+ if (status->flag & RX_FLAG_DECRYPTED) {
+ status->iv[0] = data[5];
+ status->iv[1] = data[4];
+ status->iv[2] = data[3];
+ status->iv[3] = data[2];
+ status->iv[4] = data[1];
+ status->iv[5] = data[0];
+
+ insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+ }
+ rxd += 4;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
+ rxd += 2;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ /* RXD Group 3 - P-RXV */
+ if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
+ u32 v0, v1, v2;
+
+ rxv = rxd;
+ rxd += 2;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+
+ v0 = le32_to_cpu(rxv[0]);
+ v1 = le32_to_cpu(rxv[1]);
+ v2 = le32_to_cpu(rxv[2]);
+
+ if (v0 & MT_PRXV_HT_AD_CODE)
+ status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+ status->chains = mphy->antenna_mask;
+ status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1);
+ status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
+ status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
+ status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
+ status->signal = status->chain_signal[0];
+
+ for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
+ if (!(status->chains & BIT(i)))
+ continue;
+
+ status->signal = max(status->signal,
+ status->chain_signal[i]);
+ }
+
+ /* RXD Group 5 - C-RXV */
+ if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
+ u8 stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
+ u8 gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
+ bool cck = false;
+
+ rxd += 18;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+
+ idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
+ mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
+
+ switch (mode) {
+ case MT_PHY_TYPE_CCK:
+ cck = true;
+ fallthrough;
+ case MT_PHY_TYPE_OFDM:
+ i = mt76_get_rate(&dev->mt76, sband, i, cck);
+ break;
+ case MT_PHY_TYPE_HT_GF:
+ case MT_PHY_TYPE_HT:
+ status->encoding = RX_ENC_HT;
+ if (i > 31)
+ return -EINVAL;
+ break;
+ case MT_PHY_TYPE_VHT:
+ status->nss =
+ FIELD_GET(MT_PRXV_NSTS, v0) + 1;
+ status->encoding = RX_ENC_VHT;
+ if (i > 9)
+ return -EINVAL;
+ break;
+ case MT_PHY_TYPE_HE_MU:
+ status->flag |= RX_FLAG_RADIOTAP_HE_MU;
+ fallthrough;
+ case MT_PHY_TYPE_HE_SU:
+ case MT_PHY_TYPE_HE_EXT_SU:
+ case MT_PHY_TYPE_HE_TB:
+ status->nss =
+ FIELD_GET(MT_PRXV_NSTS, v0) + 1;
+ status->encoding = RX_ENC_HE;
+ status->flag |= RX_FLAG_RADIOTAP_HE;
+ i &= GENMASK(3, 0);
+
+ if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
+ status->he_gi = gi;
+
+ status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
+ break;
+ default:
+ return -EINVAL;
+ }
+ status->rate_idx = i;
+
+ switch (FIELD_GET(MT_CRXV_FRAME_MODE, v2)) {
+ case IEEE80211_STA_RX_BW_20:
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ if (mode & MT_PHY_TYPE_HE_EXT_SU &&
+ (idx & MT_PRXV_TX_ER_SU_106T)) {
+ status->bw = RATE_INFO_BW_HE_RU;
+ status->he_ru =
+ NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ } else {
+ status->bw = RATE_INFO_BW_40;
+ }
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ status->bw = RATE_INFO_BW_80;
+ break;
+ case IEEE80211_STA_RX_BW_160:
+ status->bw = RATE_INFO_BW_160;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
+ if (mode < MT_PHY_TYPE_HE_SU && gi)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ }
+ }
+
+ skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
+
+ if (insert_ccmp_hdr) {
+ u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
+
+ mt76_insert_ccmp_hdr(skb, key_id);
+ }
+
+ if (rxv && status->flag & RX_FLAG_RADIOTAP_HE)
+ mt7915_mac_decode_he_radiotap(skb, status, rxv, mode);
+
+ hdr = mt76_skb_get_hdr(skb);
+ if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
+ return 0;
+
+ status->aggr = unicast &&
+ !ieee80211_is_qos_nullfunc(hdr->frame_control);
+ status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+ status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+
+ return 0;
+}
+
+void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_key_conf *key, bool beacon)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+ bool multicast = is_multicast_ether_addr(hdr->addr1);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_phy *mphy = &dev->mphy;
+ bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY;
+ u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
+ __le16 fc = hdr->frame_control;
+ u16 tx_count = 15, seqno = 0;
+ u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+ u32 val;
+
+ if (vif) {
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+
+ omac_idx = mvif->omac_idx;
+ wmm_idx = mvif->wmm_idx;
+ }
+
+ if (ext_phy && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
+ fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
+ fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
+
+ txwi[4] = 0;
+ txwi[5] = 0;
+ txwi[6] = 0;
+
+ if (beacon) {
+ p_fmt = MT_TX_TYPE_FW;
+ q_idx = MT_LMAC_BCN0;
+ } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
+ p_fmt = MT_TX_TYPE_CT;
+ q_idx = MT_LMAC_ALTX0;
+ } else {
+ p_fmt = MT_TX_TYPE_CT;
+ q_idx = wmm_idx * MT7915_MAX_WMM_SETS +
+ mt7915_lmac_mapping(dev, skb_get_queue_mapping(skb));
+ }
+
+ if (ieee80211_is_action(fc) &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK &&
+ mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
+ u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
+
+ txwi[5] |= cpu_to_le32(MT_TXD5_ADD_BA);
+ tid = (capab >> 2) & IEEE80211_QOS_CTL_TID_MASK;
+ } else if (ieee80211_is_back_req(hdr->frame_control)) {
+ struct ieee80211_bar *bar = (struct ieee80211_bar *)hdr;
+ u16 control = le16_to_cpu(bar->control);
+
+ tid = FIELD_GET(IEEE80211_BAR_CTRL_TID_INFO_MASK, control);
+ }
+
+ val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
+ FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
+ FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
+ txwi[0] = cpu_to_le32(val);
+
+ val = MT_TXD1_LONG_FORMAT |
+ FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
+ FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
+ FIELD_PREP(MT_TXD1_HDR_INFO,
+ ieee80211_get_hdrlen_from_skb(skb) / 2) |
+ FIELD_PREP(MT_TXD1_TID, tid) |
+ FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
+
+ if (ext_phy && q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0)
+ val |= MT_TXD1_TGID;
+
+ txwi[1] = cpu_to_le32(val);
+
+ val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
+ FIELD_PREP(MT_TXD2_MULTICAST, multicast);
+ if (key) {
+ if (multicast && ieee80211_is_robust_mgmt_frame(skb) &&
+ key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
+ val |= MT_TXD2_BIP;
+ txwi[3] = 0;
+ } else {
+ txwi[3] = cpu_to_le32(MT_TXD3_PROTECT_FRAME);
+ }
+ } else {
+ txwi[3] = 0;
+ }
+ txwi[2] = cpu_to_le32(val);
+
+ if (!ieee80211_is_data(fc) || multicast) {
+ u16 rate;
+
+ /* hardware won't add HTC for mgmt/ctrl frame */
+ txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE | MT_TXD2_HTC_VLD);
+
+ if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
+ rate = MT7915_5G_RATE_DEFAULT;
+ else
+ rate = MT7915_2G_RATE_DEFAULT;
+
+ val = MT_TXD6_FIXED_BW |
+ FIELD_PREP(MT_TXD6_TX_RATE, rate);
+ txwi[6] |= cpu_to_le32(val);
+ txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
+ }
+
+ if (!ieee80211_is_beacon(fc))
+ txwi[3] |= cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
+ else
+ tx_count = 0x1f;
+
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK);
+
+ val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
+ if (wcid->amsdu)
+ val |= MT_TXD7_HW_AMSDU;
+ txwi[7] = cpu_to_le32(val);
+
+ val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
+ if (info->flags & IEEE80211_TX_CTL_INJECTED) {
+ seqno = le16_to_cpu(hdr->seq_ctrl);
+
+ if (ieee80211_is_back_req(hdr->frame_control)) {
+ struct ieee80211_bar *bar;
+
+ bar = (struct ieee80211_bar *)skb->data;
+ seqno = le16_to_cpu(bar->start_seq_num);
+ }
+
+ val |= MT_TXD3_SN_VALID |
+ FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
+ }
+ txwi[3] |= cpu_to_le32(val);
+}
+
+int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_tx_cb *cb = mt76_tx_skb_cb(tx_info->skb);
+ struct mt76_txwi_cache *t;
+ struct mt7915_txp *txp;
+ int id, i, nbuf = tx_info->nbuf - 1;
+ u8 *txwi = (u8 *)txwi_ptr;
+
+ if (!wcid)
+ wcid = &dev->mt76.global_wcid;
+
+ cb->wcid = wcid->idx;
+
+ mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
+ false);
+
+ txp = (struct mt7915_txp *)(txwi + MT_TXD_SIZE);
+ for (i = 0; i < nbuf; i++) {
+ txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
+ txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
+ }
+ txp->nbuf = nbuf;
+
+ /* pass partial skb header to fw */
+ tx_info->buf[1].len = MT_CT_PARSE_LEN;
+ tx_info->buf[1].skip_unmap = true;
+ tx_info->nbuf = MT_CT_DMA_BUF_NUM;
+
+ txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
+
+ if (!key)
+ txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
+
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
+
+ if (vif) {
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+
+ txp->bss_idx = mvif->idx;
+ }
+
+ t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
+ t->skb = tx_info->skb;
+
+ spin_lock_bh(&dev->token_lock);
+ id = idr_alloc(&dev->token, t, 0, MT7915_TOKEN_SIZE, GFP_ATOMIC);
+ spin_unlock_bh(&dev->token_lock);
+ if (id < 0)
+ return id;
+
+ txp->token = cpu_to_le16(id);
+ txp->rept_wds_wcid = 0xff;
+ tx_info->skb = DMA_DUMMY_DATA;
+
+ return 0;
+}
+
+static void
+mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
+{
+ struct mt7915_sta *msta;
+ u16 fc, tid;
+ u32 val;
+
+ if (!sta || !sta->ht_cap.ht_supported)
+ return;
+
+ tid = FIELD_GET(MT_TXD1_TID, le32_to_cpu(txwi[1]));
+ if (tid >= 6) /* skip VO queue */
+ return;
+
+ val = le32_to_cpu(txwi[2]);
+ fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 |
+ FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4;
+ if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
+ return;
+
+ msta = (struct mt7915_sta *)sta->drv_priv;
+ if (!test_and_set_bit(tid, &msta->ampdu_state))
+ ieee80211_start_tx_ba_session(sta, tid, 0);
+}
+
+static inline void
+mt7915_tx_status(struct ieee80211_sta *sta, struct ieee80211_hw *hw,
+ struct ieee80211_tx_info *info, struct sk_buff *skb)
+{
+ struct ieee80211_tx_status status = {
+ .sta = sta,
+ .info = info,
+ };
+
+ if (skb)
+ status.skb = skb;
+
+ if (sta) {
+ struct mt7915_sta *msta;
+
+ msta = (struct mt7915_sta *)sta->drv_priv;
+ status.rate = &msta->stats.tx_rate;
+ }
+
+ /* use status_ext to report HE rate */
+ ieee80211_tx_status_ext(hw, &status);
+}
+
+static void
+mt7915_tx_complete_status(struct mt76_dev *mdev, struct sk_buff *skb,
+ struct ieee80211_sta *sta, u8 stat)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hw *hw;
+
+ hw = mt76_tx_status_get_hw(mdev, skb);
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ info->flags |= IEEE80211_TX_STAT_AMPDU;
+
+ if (stat)
+ ieee80211_tx_info_clear_status(info);
+
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ info->status.tx_time = 0;
+
+ if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
+ mt7915_tx_status(sta, hw, info, skb);
+ return;
+ }
+
+ if (sta || !(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ mt7915_tx_status(sta, hw, info, NULL);
+
+ ieee80211_free_txskb(hw, skb);
+}
+
+void mt7915_txp_skb_unmap(struct mt76_dev *dev,
+ struct mt76_txwi_cache *t)
+{
+ struct mt7915_txp *txp;
+ int i;
+
+ txp = mt7915_txwi_to_txp(dev, t);
+ for (i = 0; i < txp->nbuf; i++)
+ dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
+ le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
+}
+
+void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt7915_tx_free *free = (struct mt7915_tx_free *)skb->data;
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt76_txwi_cache *txwi;
+ struct ieee80211_sta *sta = NULL;
+ u8 i, count;
+
+ /* clean DMA queues and unmap buffers first */
+ mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false);
+ mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false);
+
+ /*
+ * TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE,
+ * to the time ack is received or dropped by hw (air + hw queue time).
+ * Should avoid accessing WTBL to get Tx airtime, and use it instead.
+ */
+ count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
+ for (i = 0; i < count; i++) {
+ u32 msdu, info = le32_to_cpu(free->info[i]);
+ u8 stat;
+
+ /*
+ * 1'b1: new wcid pair.
+ * 1'b0: msdu_id with the same 'wcid pair' as above.
+ */
+ if (info & MT_TX_FREE_PAIR) {
+ struct mt7915_sta *msta;
+ struct mt7915_phy *phy;
+ struct mt76_wcid *wcid;
+ u16 idx;
+
+ count++;
+ idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
+ wcid = rcu_dereference(dev->mt76.wcid[idx]);
+ sta = wcid_to_sta(wcid);
+ if (!sta)
+ continue;
+
+ msta = container_of(wcid, struct mt7915_sta, wcid);
+ phy = msta->vif->phy;
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&msta->stats_list))
+ list_add_tail(&msta->stats_list, &phy->stats_list);
+ if (list_empty(&msta->poll_list))
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+ }
+
+ msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
+ stat = FIELD_GET(MT_TX_FREE_STATUS, info);
+
+ spin_lock_bh(&dev->token_lock);
+ txwi = idr_remove(&dev->token, msdu);
+ spin_unlock_bh(&dev->token_lock);
+
+ if (!txwi)
+ continue;
+
+ mt7915_txp_skb_unmap(mdev, txwi);
+ if (txwi->skb) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txwi->skb);
+ void *txwi_ptr = mt76_get_txwi_ptr(mdev, txwi);
+
+ if (likely(txwi->skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ mt7915_tx_check_aggr(sta, txwi_ptr);
+
+ if (sta && !info->tx_time_est) {
+ struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+ int pending;
+
+ pending = atomic_dec_return(&wcid->non_aql_packets);
+ if (pending < 0)
+ atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
+ }
+
+ mt7915_tx_complete_status(mdev, txwi->skb, sta, stat);
+ txwi->skb = NULL;
+ }
+
+ mt76_put_txwi(mdev, txwi);
+ }
+ dev_kfree_skb(skb);
+
+ mt7915_mac_sta_poll(dev);
+ mt76_worker_schedule(&dev->mt76.tx_worker);
+}
+
+void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
+{
+ struct mt7915_dev *dev;
+
+ if (!e->txwi) {
+ dev_kfree_skb_any(e->skb);
+ return;
+ }
+
+ dev = container_of(mdev, struct mt7915_dev, mt76);
+
+ /* error path */
+ if (e->skb == DMA_DUMMY_DATA) {
+ struct mt76_txwi_cache *t;
+ struct mt7915_txp *txp;
+
+ txp = mt7915_txwi_to_txp(mdev, e->txwi);
+
+ spin_lock_bh(&dev->token_lock);
+ t = idr_remove(&dev->token, le16_to_cpu(txp->token));
+ spin_unlock_bh(&dev->token_lock);
+ e->skb = t ? t->skb : NULL;
+ }
+
+ if (e->skb) {
+ struct mt76_tx_cb *cb = mt76_tx_skb_cb(e->skb);
+ struct mt76_wcid *wcid;
+
+ wcid = rcu_dereference(dev->mt76.wcid[cb->wcid]);
+
+ mt7915_tx_complete_status(mdev, e->skb, wcid_to_sta(wcid), 0);
+ }
+}
+
+void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ u32 reg = MT_WF_PHY_RX_CTRL1(ext_phy);
+
+ mt7915_l2_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN);
+ mt7915_l2_set(dev, reg, BIT(11) | BIT(9));
+}
+
+void mt7915_mac_reset_counters(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
+ mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
+ }
+
+ if (ext_phy) {
+ dev->mt76.phy2->survey_time = ktime_get_boottime();
+ i = ARRAY_SIZE(dev->mt76.aggr_stats) / 2;
+ } else {
+ dev->mt76.phy.survey_time = ktime_get_boottime();
+ i = 0;
+ }
+ memset(&dev->mt76.aggr_stats[i], 0, sizeof(dev->mt76.aggr_stats) / 2);
+
+ /* reset airtime counters */
+ mt76_rr(dev, MT_MIB_SDR9(ext_phy));
+ mt76_rr(dev, MT_MIB_SDR36(ext_phy));
+ mt76_rr(dev, MT_MIB_SDR37(ext_phy));
+
+ mt76_set(dev, MT_WF_RMAC_MIB_TIME0(ext_phy),
+ MT_WF_RMAC_MIB_RXTIME_CLR);
+ mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(ext_phy),
+ MT_WF_RMAC_MIB_RXTIME_CLR);
+}
+
+void mt7915_mac_set_timing(struct mt7915_phy *phy)
+{
+ s16 coverage_class = phy->coverage_class;
+ struct mt7915_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ u32 val, reg_offset;
+ u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
+ u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
+ int sifs, offset;
+ bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ;
+
+ if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
+ return;
+
+ if (is_5ghz)
+ sifs = 16;
+ else
+ sifs = 10;
+
+ if (ext_phy) {
+ coverage_class = max_t(s16, dev->phy.coverage_class,
+ coverage_class);
+ } else {
+ struct mt7915_phy *phy_ext = mt7915_ext_phy(dev);
+
+ if (phy_ext)
+ coverage_class = max_t(s16, phy_ext->coverage_class,
+ coverage_class);
+ }
+ mt76_set(dev, MT_ARB_SCR(ext_phy),
+ MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
+ udelay(1);
+
+ offset = 3 * coverage_class;
+ reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
+
+ mt76_wr(dev, MT_TMAC_CDTR(ext_phy), cck + reg_offset);
+ mt76_wr(dev, MT_TMAC_ODTR(ext_phy), ofdm + reg_offset);
+ mt76_wr(dev, MT_TMAC_ICR0(ext_phy),
+ FIELD_PREP(MT_IFS_EIFS, 360) |
+ FIELD_PREP(MT_IFS_RIFS, 2) |
+ FIELD_PREP(MT_IFS_SIFS, sifs) |
+ FIELD_PREP(MT_IFS_SLOT, phy->slottime));
+
+ if (phy->slottime < 20 || is_5ghz)
+ val = MT7915_CFEND_RATE_DEFAULT;
+ else
+ val = MT7915_CFEND_RATE_11B;
+
+ mt76_rmw_field(dev, MT_AGG_ACR0(ext_phy), MT_AGG_ACR_CFEND_RATE, val);
+ mt76_clear(dev, MT_ARB_SCR(ext_phy),
+ MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
+}
+
+/*
+ * TODO: mib counters are read-clear and there're many HE functionalities need
+ * such info, hence firmware prepares a task to read the fields out to a shared
+ * structure. User should switch to use event format to avoid race condition.
+ */
+static void
+mt7915_phy_update_channel(struct mt76_phy *mphy, int idx)
+{
+ struct mt7915_dev *dev = container_of(mphy->dev, struct mt7915_dev, mt76);
+ struct mt76_channel_state *state;
+ u64 busy_time, tx_time, rx_time, obss_time;
+
+ busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx),
+ MT_MIB_SDR9_BUSY_MASK);
+ tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx),
+ MT_MIB_SDR36_TXTIME_MASK);
+ rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx),
+ MT_MIB_SDR37_RXTIME_MASK);
+ obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_AIRTIME14(idx),
+ MT_MIB_OBSSTIME_MASK);
+
+ /* TODO: state->noise */
+ state = mphy->chan_state;
+ state->cc_busy += busy_time;
+ state->cc_tx += tx_time;
+ state->cc_rx += rx_time + obss_time;
+ state->cc_bss_rx += rx_time;
+}
+
+void mt7915_update_channel(struct mt76_dev *mdev)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+
+ mt7915_phy_update_channel(&mdev->phy, 0);
+ if (mdev->phy2)
+ mt7915_phy_update_channel(mdev->phy2, 1);
+
+ /* reset obss airtime */
+ mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
+ if (mdev->phy2)
+ mt76_set(dev, MT_WF_RMAC_MIB_TIME0(1),
+ MT_WF_RMAC_MIB_RXTIME_CLR);
+}
+
+static bool
+mt7915_wait_reset_state(struct mt7915_dev *dev, u32 state)
+{
+ bool ret;
+
+ ret = wait_event_timeout(dev->reset_wait,
+ (READ_ONCE(dev->reset_state) & state),
+ MT7915_RESET_TIMEOUT);
+
+ WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
+ return ret;
+}
+
+static void
+mt7915_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ieee80211_hw *hw = priv;
+
+ mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon);
+}
+
+static void
+mt7915_update_beacons(struct mt7915_dev *dev)
+{
+ ieee80211_iterate_active_interfaces(dev->mt76.hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7915_update_vif_beacon, dev->mt76.hw);
+
+ if (!dev->mt76.phy2)
+ return;
+
+ ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7915_update_vif_beacon, dev->mt76.phy2->hw);
+}
+
+static void
+mt7915_dma_reset(struct mt7915_dev *dev)
+{
+ int i;
+
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+ mt76_clear(dev, MT_WFDMA1_GLO_CFG,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
+ usleep_range(1000, 2000);
+
+ for (i = 0; i < __MT_TXQ_MAX; i++)
+ mt76_queue_tx_cleanup(dev, i, true);
+
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ mt76_queue_rx_reset(dev, i);
+ }
+
+ /* re-init prefetch settings after reset */
+ mt7915_dma_prefetch(dev);
+
+ mt76_set(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+ mt76_set(dev, MT_WFDMA1_GLO_CFG,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
+}
+
+/* system error recovery */
+void mt7915_mac_reset_work(struct work_struct *work)
+{
+ struct mt7915_phy *phy2;
+ struct mt76_phy *ext_phy;
+ struct mt7915_dev *dev;
+
+ dev = container_of(work, struct mt7915_dev, reset_work);
+ ext_phy = dev->mt76.phy2;
+ phy2 = ext_phy ? ext_phy->priv : NULL;
+
+ if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_DMA))
+ return;
+
+ ieee80211_stop_queues(mt76_hw(dev));
+ if (ext_phy)
+ ieee80211_stop_queues(ext_phy->hw);
+
+ set_bit(MT76_RESET, &dev->mphy.state);
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ wake_up(&dev->mt76.mcu.wait);
+ cancel_delayed_work_sync(&dev->phy.mac_work);
+ if (phy2)
+ cancel_delayed_work_sync(&phy2->mac_work);
+
+ /* lock/unlock all queues to ensure that no tx is pending */
+ mt76_txq_schedule_all(&dev->mphy);
+ if (ext_phy)
+ mt76_txq_schedule_all(ext_phy);
+
+ mt76_worker_disable(&dev->mt76.tx_worker);
+ napi_disable(&dev->mt76.napi[0]);
+ napi_disable(&dev->mt76.napi[1]);
+ napi_disable(&dev->mt76.napi[2]);
+ napi_disable(&dev->mt76.tx_napi);
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
+
+ if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
+ mt7915_dma_reset(dev);
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
+ mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
+ }
+
+ clear_bit(MT76_MCU_RESET, &dev->mphy.state);
+ clear_bit(MT76_RESET, &dev->mphy.state);
+
+ mt76_worker_enable(&dev->mt76.tx_worker);
+ napi_enable(&dev->mt76.tx_napi);
+ napi_schedule(&dev->mt76.tx_napi);
+
+ napi_enable(&dev->mt76.napi[0]);
+ napi_schedule(&dev->mt76.napi[0]);
+
+ napi_enable(&dev->mt76.napi[1]);
+ napi_schedule(&dev->mt76.napi[1]);
+
+ napi_enable(&dev->mt76.napi[2]);
+ napi_schedule(&dev->mt76.napi[2]);
+
+ ieee80211_wake_queues(mt76_hw(dev));
+ if (ext_phy)
+ ieee80211_wake_queues(ext_phy->hw);
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
+ mt7915_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ mt7915_update_beacons(dev);
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->phy.mac_work,
+ MT7915_WATCHDOG_TIME);
+ if (phy2)
+ ieee80211_queue_delayed_work(ext_phy->hw, &phy2->mac_work,
+ MT7915_WATCHDOG_TIME);
+}
+
+static void
+mt7915_mac_update_mib_stats(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mib_stats *mib = &phy->mib;
+ bool ext_phy = phy != &dev->phy;
+ int i, aggr0, aggr1;
+
+ mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
+ MT_MIB_SDR3_FCS_ERR_MASK);
+
+ aggr0 = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
+ for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
+ u32 val;
+
+ val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
+ mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
+ mib->ack_fail_cnt +=
+ FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
+
+ val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
+ mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
+ mib->rts_retries_cnt +=
+ FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
+
+ val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
+ dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
+ dev->mt76.aggr_stats[aggr0++] += val >> 16;
+
+ val = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
+ dev->mt76.aggr_stats[aggr1++] += val & 0xffff;
+ dev->mt76.aggr_stats[aggr1++] += val >> 16;
+ }
+}
+
+static void
+mt7915_mac_sta_stats_work(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt7915_sta *msta;
+ LIST_HEAD(list);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ list_splice_init(&phy->stats_list, &list);
+
+ while (!list_empty(&list)) {
+ msta = list_first_entry(&list, struct mt7915_sta, stats_list);
+ list_del_init(&msta->stats_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ /* use MT_TX_FREE_RATE to report Tx rate for further devices */
+ mt7915_mcu_get_rate_info(dev, RATE_CTRL_RU_INFO, msta->wcid.idx);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ }
+
+ spin_unlock_bh(&dev->sta_poll_lock);
+}
+
+void mt7915_mac_sta_rc_work(struct work_struct *work)
+{
+ struct mt7915_dev *dev = container_of(work, struct mt7915_dev, rc_work);
+ struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif;
+ struct mt7915_sta *msta;
+ u32 changed;
+ LIST_HEAD(list);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ list_splice_init(&dev->sta_rc_list, &list);
+
+ while (!list_empty(&list)) {
+ msta = list_first_entry(&list, struct mt7915_sta, rc_list);
+ list_del_init(&msta->rc_list);
+ changed = msta->stats.changed;
+ msta->stats.changed = 0;
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
+ vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
+
+ if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
+ IEEE80211_RC_NSS_CHANGED |
+ IEEE80211_RC_BW_CHANGED))
+ mt7915_mcu_add_rate_ctrl(dev, vif, sta);
+
+ if (changed & IEEE80211_RC_SMPS_CHANGED)
+ mt7915_mcu_add_smps(dev, vif, sta);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ }
+
+ spin_unlock_bh(&dev->sta_poll_lock);
+}
+
+void mt7915_mac_work(struct work_struct *work)
+{
+ struct mt7915_phy *phy;
+ struct mt76_dev *mdev;
+
+ phy = (struct mt7915_phy *)container_of(work, struct mt7915_phy,
+ mac_work.work);
+ mdev = &phy->dev->mt76;
+
+ mutex_lock(&mdev->mutex);
+
+ mt76_update_survey(mdev);
+ if (++phy->mac_work_count == 5) {
+ phy->mac_work_count = 0;
+
+ mt7915_mac_update_mib_stats(phy);
+ }
+
+ if (++phy->sta_work_count == 10) {
+ phy->sta_work_count = 0;
+ mt7915_mac_sta_stats_work(phy);
+ };
+
+ mutex_unlock(&mdev->mutex);
+
+ ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work,
+ MT7915_WATCHDOG_TIME);
+}
+
+static void mt7915_dfs_stop_radar_detector(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+
+ if (phy->rdd_state & BIT(0))
+ mt7915_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0);
+ if (phy->rdd_state & BIT(1))
+ mt7915_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0);
+}
+
+static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain)
+{
+ int err;
+
+ err = mt7915_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0);
+ if (err < 0)
+ return err;
+
+ return mt7915_mcu_rdd_cmd(dev, RDD_DET_MODE, chain, MT_RX_SEL0, 1);
+}
+
+static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy)
+{
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ struct mt7915_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ int err;
+
+ /* start CAC */
+ err = mt7915_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0);
+ if (err < 0)
+ return err;
+
+ err = mt7915_dfs_start_rdd(dev, ext_phy);
+ if (err < 0)
+ return err;
+
+ phy->rdd_state |= BIT(ext_phy);
+
+ if (chandef->width == NL80211_CHAN_WIDTH_160 ||
+ chandef->width == NL80211_CHAN_WIDTH_80P80) {
+ err = mt7915_dfs_start_rdd(dev, 1);
+ if (err < 0)
+ return err;
+
+ phy->rdd_state |= BIT(1);
+ }
+
+ return 0;
+}
+
+static int
+mt7915_dfs_init_radar_specs(struct mt7915_phy *phy)
+{
+ const struct mt7915_dfs_radar_spec *radar_specs;
+ struct mt7915_dev *dev = phy->dev;
+ int err, i;
+
+ switch (dev->mt76.region) {
+ case NL80211_DFS_FCC:
+ radar_specs = &fcc_radar_specs;
+ err = mt7915_mcu_set_fcc5_lpn(dev, 8);
+ if (err < 0)
+ return err;
+ break;
+ case NL80211_DFS_ETSI:
+ radar_specs = &etsi_radar_specs;
+ break;
+ case NL80211_DFS_JP:
+ radar_specs = &jp_radar_specs;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
+ err = mt7915_mcu_set_radar_th(dev, i,
+ &radar_specs->radar_pattern[i]);
+ if (err < 0)
+ return err;
+ }
+
+ return mt7915_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
+}
+
+int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy)
+{
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ struct mt7915_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ int err;
+
+ if (dev->mt76.region == NL80211_DFS_UNSET) {
+ phy->dfs_state = -1;
+ if (phy->rdd_state)
+ goto stop;
+
+ return 0;
+ }
+
+ if (test_bit(MT76_SCANNING, &phy->mt76->state))
+ return 0;
+
+ if (phy->dfs_state == chandef->chan->dfs_state)
+ return 0;
+
+ err = mt7915_dfs_init_radar_specs(phy);
+ if (err < 0) {
+ phy->dfs_state = -1;
+ goto stop;
+ }
+
+ phy->dfs_state = chandef->chan->dfs_state;
+
+ if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
+ if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
+ return mt7915_dfs_start_radar_detector(phy);
+
+ return mt7915_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy,
+ MT_RX_SEL0, 0);
+ }
+
+stop:
+ err = mt7915_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy,
+ MT_RX_SEL0, 0);
+ if (err < 0)
+ return err;
+
+ mt7915_dfs_stop_radar_detector(phy);
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
new file mode 100644
index 000000000..c8bb5ea96
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
@@ -0,0 +1,329 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#ifndef __MT7915_MAC_H
+#define __MT7915_MAC_H
+
+#define MT_CT_PARSE_LEN 72
+#define MT_CT_DMA_BUF_NUM 2
+
+#define MT_RXD0_LENGTH GENMASK(15, 0)
+#define MT_RXD0_PKT_TYPE GENMASK(31, 27)
+
+#define MT_RXD0_NORMAL_ETH_TYPE_OFS GENMASK(22, 16)
+#define MT_RXD0_NORMAL_IP_SUM BIT(23)
+#define MT_RXD0_NORMAL_UDP_TCP_SUM BIT(24)
+
+enum rx_pkt_type {
+ PKT_TYPE_TXS,
+ PKT_TYPE_TXRXV,
+ PKT_TYPE_NORMAL,
+ PKT_TYPE_RX_DUP_RFB,
+ PKT_TYPE_RX_TMR,
+ PKT_TYPE_RETRIEVE,
+ PKT_TYPE_TXRX_NOTIFY,
+ PKT_TYPE_RX_EVENT,
+};
+
+/* RXD DW1 */
+#define MT_RXD1_NORMAL_WLAN_IDX GENMASK(9, 0)
+#define MT_RXD1_NORMAL_GROUP_1 BIT(11)
+#define MT_RXD1_NORMAL_GROUP_2 BIT(12)
+#define MT_RXD1_NORMAL_GROUP_3 BIT(13)
+#define MT_RXD1_NORMAL_GROUP_4 BIT(14)
+#define MT_RXD1_NORMAL_GROUP_5 BIT(15)
+#define MT_RXD1_NORMAL_SEC_MODE GENMASK(20, 16)
+#define MT_RXD1_NORMAL_KEY_ID GENMASK(22, 21)
+#define MT_RXD1_NORMAL_CM BIT(23)
+#define MT_RXD1_NORMAL_CLM BIT(24)
+#define MT_RXD1_NORMAL_ICV_ERR BIT(25)
+#define MT_RXD1_NORMAL_TKIP_MIC_ERR BIT(26)
+#define MT_RXD1_NORMAL_FCS_ERR BIT(27)
+#define MT_RXD1_NORMAL_BAND_IDX BIT(28)
+#define MT_RXD1_NORMAL_SPP_EN BIT(29)
+#define MT_RXD1_NORMAL_ADD_OM BIT(30)
+#define MT_RXD1_NORMAL_SEC_DONE BIT(31)
+
+/* RXD DW2 */
+#define MT_RXD2_NORMAL_BSSID GENMASK(5, 0)
+#define MT_RXD2_NORMAL_CO_ANT BIT(6)
+#define MT_RXD2_NORMAL_BF_CQI BIT(7)
+#define MT_RXD2_NORMAL_MAC_HDR_LEN GENMASK(12, 8)
+#define MT_RXD2_NORMAL_HDR_TRANS BIT(13)
+#define MT_RXD2_NORMAL_HDR_OFFSET GENMASK(15, 14)
+#define MT_RXD2_NORMAL_TID GENMASK(19, 16)
+#define MT_RXD2_NORMAL_MU_BAR BIT(21)
+#define MT_RXD2_NORMAL_SW_BIT BIT(22)
+#define MT_RXD2_NORMAL_AMSDU_ERR BIT(23)
+#define MT_RXD2_NORMAL_MAX_LEN_ERROR BIT(24)
+#define MT_RXD2_NORMAL_HDR_TRANS_ERROR BIT(25)
+#define MT_RXD2_NORMAL_INT_FRAME BIT(26)
+#define MT_RXD2_NORMAL_FRAG BIT(27)
+#define MT_RXD2_NORMAL_NULL_FRAME BIT(28)
+#define MT_RXD2_NORMAL_NDATA BIT(29)
+#define MT_RXD2_NORMAL_NON_AMPDU BIT(30)
+#define MT_RXD2_NORMAL_BF_REPORT BIT(31)
+
+/* RXD DW3 */
+#define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0)
+#define MT_RXD3_NORMAL_CH_FREQ GENMASK(15, 8)
+#define MT_RXD3_NORMAL_ADDR_TYPE GENMASK(17, 16)
+#define MT_RXD3_NORMAL_U2M BIT(0)
+#define MT_RXD3_NORMAL_HTC_VLD BIT(0)
+#define MT_RXD3_NORMAL_TSF_COMPARE_LOSS BIT(19)
+#define MT_RXD3_NORMAL_BEACON_MC BIT(20)
+#define MT_RXD3_NORMAL_BEACON_UC BIT(21)
+#define MT_RXD3_NORMAL_AMSDU BIT(22)
+#define MT_RXD3_NORMAL_MESH BIT(23)
+#define MT_RXD3_NORMAL_MHCP BIT(24)
+#define MT_RXD3_NORMAL_NO_INFO_WB BIT(25)
+#define MT_RXD3_NORMAL_DISABLE_RX_HDR_TRANS BIT(26)
+#define MT_RXD3_NORMAL_POWER_SAVE_STAT BIT(27)
+#define MT_RXD3_NORMAL_MORE BIT(28)
+#define MT_RXD3_NORMAL_UNWANT BIT(29)
+#define MT_RXD3_NORMAL_RX_DROP BIT(30)
+#define MT_RXD3_NORMAL_VLAN2ETH BIT(31)
+
+/* RXD DW4 */
+#define MT_RXD4_NORMAL_PAYLOAD_FORMAT GENMASK(1, 0)
+#define MT_RXD4_NORMAL_PATTERN_DROP BIT(9)
+#define MT_RXD4_NORMAL_CLS BIT(10)
+#define MT_RXD4_NORMAL_OFLD GENMASK(12, 11)
+#define MT_RXD4_NORMAL_MAGIC_PKT BIT(13)
+#define MT_RXD4_NORMAL_WOL GENMASK(18, 14)
+#define MT_RXD4_NORMAL_CLS_BITMAP GENMASK(28, 19)
+#define MT_RXD3_NORMAL_PF_MODE BIT(29)
+#define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30)
+
+/* P-RXV */
+#define MT_PRXV_TX_RATE GENMASK(6, 0)
+#define MT_PRXV_TX_DCM BIT(4)
+#define MT_PRXV_TX_ER_SU_106T BIT(5)
+#define MT_PRXV_NSTS GENMASK(9, 7)
+#define MT_PRXV_HT_AD_CODE BIT(11)
+#define MT_PRXV_HE_RU_ALLOC_L GENMASK(31, 28)
+#define MT_PRXV_HE_RU_ALLOC_H GENMASK(3, 0)
+#define MT_PRXV_RCPI3 GENMASK(31, 24)
+#define MT_PRXV_RCPI2 GENMASK(23, 16)
+#define MT_PRXV_RCPI1 GENMASK(15, 8)
+#define MT_PRXV_RCPI0 GENMASK(7, 0)
+
+/* C-RXV */
+#define MT_CRXV_HT_STBC GENMASK(1, 0)
+#define MT_CRXV_TX_MODE GENMASK(7, 4)
+#define MT_CRXV_FRAME_MODE GENMASK(10, 8)
+#define MT_CRXV_HT_SHORT_GI GENMASK(14, 13)
+#define MT_CRXV_HE_LTF_SIZE GENMASK(18, 17)
+#define MT_CRXV_HE_LDPC_EXT_SYM BIT(20)
+#define MT_CRXV_HE_PE_DISAMBIG BIT(23)
+#define MT_CRXV_HE_UPLINK BIT(31)
+
+#define MT_CRXV_HE_SR_MASK GENMASK(11, 8)
+#define MT_CRXV_HE_SR1_MASK GENMASK(16, 12)
+#define MT_CRXV_HE_SR2_MASK GENMASK(20, 17)
+#define MT_CRXV_HE_SR3_MASK GENMASK(24, 21)
+
+#define MT_CRXV_HE_BSS_COLOR GENMASK(5, 0)
+#define MT_CRXV_HE_TXOP_DUR GENMASK(12, 6)
+#define MT_CRXV_HE_BEAM_CHNG BIT(13)
+#define MT_CRXV_HE_DOPPLER BIT(16)
+
+enum tx_header_format {
+ MT_HDR_FORMAT_802_3,
+ MT_HDR_FORMAT_CMD,
+ MT_HDR_FORMAT_802_11,
+ MT_HDR_FORMAT_802_11_EXT,
+};
+
+enum tx_pkt_type {
+ MT_TX_TYPE_CT,
+ MT_TX_TYPE_SF,
+ MT_TX_TYPE_CMD,
+ MT_TX_TYPE_FW,
+};
+
+enum tx_port_idx {
+ MT_TX_PORT_IDX_LMAC,
+ MT_TX_PORT_IDX_MCU
+};
+
+enum tx_mcu_port_q_idx {
+ MT_TX_MCU_PORT_RX_Q0 = 0x20,
+ MT_TX_MCU_PORT_RX_Q1,
+ MT_TX_MCU_PORT_RX_Q2,
+ MT_TX_MCU_PORT_RX_Q3,
+ MT_TX_MCU_PORT_RX_FWDL = 0x3e
+};
+
+#define MT_CT_INFO_APPLY_TXD BIT(0)
+#define MT_CT_INFO_COPY_HOST_TXD_ALL BIT(1)
+#define MT_CT_INFO_MGMT_FRAME BIT(2)
+#define MT_CT_INFO_NONE_CIPHER_FRAME BIT(3)
+#define MT_CT_INFO_HSR2_TX BIT(4)
+
+#define MT_TXD_SIZE (8 * 4)
+
+#define MT_TXD0_Q_IDX GENMASK(31, 25)
+#define MT_TXD0_PKT_FMT GENMASK(24, 23)
+#define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16)
+#define MT_TXD0_TX_BYTES GENMASK(15, 0)
+
+#define MT_TXD1_LONG_FORMAT BIT(31)
+#define MT_TXD1_TGID BIT(30)
+#define MT_TXD1_OWN_MAC GENMASK(29, 24)
+#define MT_TXD1_AMSDU BIT(23)
+#define MT_TXD1_TID GENMASK(22, 20)
+#define MT_TXD1_HDR_PAD GENMASK(19, 18)
+#define MT_TXD1_HDR_FORMAT GENMASK(17, 16)
+#define MT_TXD1_HDR_INFO GENMASK(15, 11)
+#define MT_TXD1_VTA BIT(10)
+#define MT_TXD1_WLAN_IDX GENMASK(9, 0)
+
+#define MT_TXD2_FIX_RATE BIT(31)
+#define MT_TXD2_FIXED_RATE BIT(30)
+#define MT_TXD2_POWER_OFFSET GENMASK(29, 24)
+#define MT_TXD2_MAX_TX_TIME GENMASK(23, 16)
+#define MT_TXD2_FRAG GENMASK(15, 14)
+#define MT_TXD2_HTC_VLD BIT(13)
+#define MT_TXD2_DURATION BIT(12)
+#define MT_TXD2_BIP BIT(11)
+#define MT_TXD2_MULTICAST BIT(10)
+#define MT_TXD2_RTS BIT(9)
+#define MT_TXD2_SOUNDING BIT(8)
+#define MT_TXD2_NDPA BIT(7)
+#define MT_TXD2_NDP BIT(6)
+#define MT_TXD2_FRAME_TYPE GENMASK(5, 4)
+#define MT_TXD2_SUB_TYPE GENMASK(3, 0)
+
+#define MT_TXD3_SN_VALID BIT(31)
+#define MT_TXD3_PN_VALID BIT(30)
+#define MT_TXD3_SW_POWER_MGMT BIT(29)
+#define MT_TXD3_BA_DISABLE BIT(28)
+#define MT_TXD3_SEQ GENMASK(27, 16)
+#define MT_TXD3_REM_TX_COUNT GENMASK(15, 11)
+#define MT_TXD3_TX_COUNT GENMASK(10, 6)
+#define MT_TXD3_TIMING_MEASURE BIT(5)
+#define MT_TXD3_DAS BIT(4)
+#define MT_TXD3_EEOSP BIT(3)
+#define MT_TXD3_EMRD BIT(2)
+#define MT_TXD3_PROTECT_FRAME BIT(1)
+#define MT_TXD3_NO_ACK BIT(0)
+
+#define MT_TXD4_PN_LOW GENMASK(31, 0)
+
+#define MT_TXD5_PN_HIGH GENMASK(31, 16)
+#define MT_TXD5_MD BIT(15)
+#define MT_TXD5_ADD_BA BIT(14)
+#define MT_TXD5_TX_STATUS_HOST BIT(10)
+#define MT_TXD5_TX_STATUS_MCU BIT(9)
+#define MT_TXD5_TX_STATUS_FMT BIT(8)
+#define MT_TXD5_PID GENMASK(7, 0)
+
+#define MT_TXD6_TX_IBF BIT(31)
+#define MT_TXD6_TX_EBF BIT(30)
+#define MT_TXD6_TX_RATE GENMASK(29, 16)
+#define MT_TXD6_SGI GENMASK(15, 14)
+#define MT_TXD6_HELTF GENMASK(13, 12)
+#define MT_TXD6_LDPC BIT(11)
+#define MT_TXD6_SPE_ID_IDX BIT(10)
+#define MT_TXD6_ANT_ID GENMASK(7, 4)
+#define MT_TXD6_DYN_BW BIT(3)
+#define MT_TXD6_FIXED_BW BIT(2)
+#define MT_TXD6_BW GENMASK(2, 0)
+
+#define MT_TXD7_TXD_LEN GENMASK(31, 30)
+#define MT_TXD7_UDP_TCP_SUM BIT(29)
+#define MT_TXD7_IP_SUM BIT(28)
+
+#define MT_TXD7_TYPE GENMASK(21, 20)
+#define MT_TXD7_SUB_TYPE GENMASK(19, 16)
+
+#define MT_TXD7_PSE_FID GENMASK(27, 16)
+#define MT_TXD7_SPE_IDX GENMASK(15, 11)
+#define MT_TXD7_HW_AMSDU BIT(10)
+#define MT_TXD7_TX_TIME GENMASK(9, 0)
+
+#define MT_TX_RATE_STBC BIT(13)
+#define MT_TX_RATE_NSS GENMASK(12, 10)
+#define MT_TX_RATE_MODE GENMASK(9, 6)
+#define MT_TX_RATE_IDX GENMASK(5, 0)
+
+#define MT_TXP_MAX_BUF_NUM 6
+
+struct mt7915_txp {
+ __le16 flags;
+ __le16 token;
+ u8 bss_idx;
+ u8 rept_wds_wcid;
+ u8 rsv;
+ u8 nbuf;
+ __le32 buf[MT_TXP_MAX_BUF_NUM];
+ __le16 len[MT_TXP_MAX_BUF_NUM];
+} __packed __aligned(4);
+
+struct mt7915_tx_free {
+ __le16 rx_byte_cnt;
+ __le16 ctrl;
+ u8 txd_cnt;
+ u8 rsv[3];
+ __le32 info[];
+} __packed __aligned(4);
+
+#define MT_TX_FREE_MSDU_CNT GENMASK(9, 0)
+#define MT_TX_FREE_WLAN_ID GENMASK(23, 14)
+#define MT_TX_FREE_LATENCY GENMASK(12, 0)
+/* 0: success, others: dropped */
+#define MT_TX_FREE_STATUS GENMASK(14, 13)
+#define MT_TX_FREE_MSDU_ID GENMASK(30, 16)
+#define MT_TX_FREE_PAIR BIT(31)
+/* will support this field in further revision */
+#define MT_TX_FREE_RATE GENMASK(13, 0)
+
+struct mt7915_dfs_pulse {
+ u32 max_width; /* us */
+ int max_pwr; /* dbm */
+ int min_pwr; /* dbm */
+ u32 min_stgr_pri; /* us */
+ u32 max_stgr_pri; /* us */
+ u32 min_cr_pri; /* us */
+ u32 max_cr_pri; /* us */
+};
+
+struct mt7915_dfs_pattern {
+ u8 enb;
+ u8 stgr;
+ u8 min_crpn;
+ u8 max_crpn;
+ u8 min_crpr;
+ u8 min_pw;
+ u32 min_pri;
+ u32 max_pri;
+ u8 max_pw;
+ u8 min_crbn;
+ u8 max_crbn;
+ u8 min_stgpn;
+ u8 max_stgpn;
+ u8 min_stgpr;
+ u8 rsv[2];
+ u32 min_stgpr_diff;
+} __packed;
+
+struct mt7915_dfs_radar_spec {
+ struct mt7915_dfs_pulse pulse_th;
+ struct mt7915_dfs_pattern radar_pattern[16];
+};
+
+static inline struct mt7915_txp *
+mt7915_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+{
+ u8 *txwi;
+
+ if (!t)
+ return NULL;
+
+ txwi = mt76_get_txwi_ptr(dev, t);
+
+ return (struct mt7915_txp *)(txwi + MT_TXD_SIZE);
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
new file mode 100644
index 000000000..e78d3efa3
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -0,0 +1,845 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include "mt7915.h"
+#include "mcu.h"
+
+static bool mt7915_dev_running(struct mt7915_dev *dev)
+{
+ struct mt7915_phy *phy;
+
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
+ return true;
+
+ phy = mt7915_ext_phy(dev);
+
+ return phy && test_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+}
+
+static int mt7915_start(struct ieee80211_hw *hw)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ bool running;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ running = mt7915_dev_running(dev);
+
+ if (!running) {
+ mt7915_mcu_set_pm(dev, 0, 0);
+ mt7915_mcu_set_mac(dev, 0, true, false);
+ mt7915_mcu_set_scs(dev, 0, true);
+ }
+
+ if (phy != &dev->phy) {
+ mt7915_mcu_set_pm(dev, 1, 0);
+ mt7915_mcu_set_mac(dev, 1, true, false);
+ mt7915_mcu_set_scs(dev, 1, true);
+ }
+
+ mt7915_mcu_set_sku_en(phy, true);
+ mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH);
+
+ set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+
+ ieee80211_queue_delayed_work(hw, &phy->mac_work,
+ MT7915_WATCHDOG_TIME);
+
+ if (!running)
+ mt7915_mac_reset_counters(phy);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+static void mt7915_stop(struct ieee80211_hw *hw)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+
+ cancel_delayed_work_sync(&phy->mac_work);
+
+ mutex_lock(&dev->mt76.mutex);
+
+ clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+
+ if (phy != &dev->phy) {
+ mt7915_mcu_set_pm(dev, 1, 1);
+ mt7915_mcu_set_mac(dev, 1, false, false);
+ }
+
+ if (!mt7915_dev_running(dev)) {
+ mt7915_mcu_set_pm(dev, 0, 1);
+ mt7915_mcu_set_mac(dev, 0, false, false);
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static int get_omac_idx(enum nl80211_iftype type, u32 mask)
+{
+ int i;
+
+ switch (type) {
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_AP:
+ /* ap uses hw bssid 0 and ext bssid */
+ if (~mask & BIT(HW_BSSID_0))
+ return HW_BSSID_0;
+
+ for (i = EXT_BSSID_1; i < EXT_BSSID_END; i++)
+ if (~mask & BIT(i))
+ return i;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ /* station uses hw bssid other than 0 */
+ for (i = HW_BSSID_1; i < HW_BSSID_MAX; i++)
+ if (~mask & BIT(i))
+ return i;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return -1;
+}
+
+static int mt7915_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt76_txq *mtxq;
+ bool ext_phy = phy != &dev->phy;
+ int idx, ret = 0;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mvif->idx = ffs(~phy->mt76->vif_mask) - 1;
+ if (mvif->idx >= MT7915_MAX_INTERFACES) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ idx = get_omac_idx(vif->type, phy->omac_mask);
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out;
+ }
+ mvif->omac_idx = idx;
+ mvif->phy = phy;
+ mvif->band_idx = ext_phy;
+
+ if (ext_phy)
+ mvif->wmm_idx = ext_phy * (MT7915_MAX_WMM_SETS / 2) +
+ mvif->idx % (MT7915_MAX_WMM_SETS / 2);
+ else
+ mvif->wmm_idx = mvif->idx % MT7915_MAX_WMM_SETS;
+
+ ret = mt7915_mcu_add_dev_info(dev, vif, true);
+ if (ret)
+ goto out;
+
+ phy->mt76->vif_mask |= BIT(mvif->idx);
+ phy->omac_mask |= BIT(mvif->omac_idx);
+
+ idx = MT7915_WTBL_RESERVED - mvif->idx;
+
+ INIT_LIST_HEAD(&mvif->sta.rc_list);
+ INIT_LIST_HEAD(&mvif->sta.stats_list);
+ INIT_LIST_HEAD(&mvif->sta.poll_list);
+ mvif->sta.wcid.idx = idx;
+ mvif->sta.wcid.ext_phy = mvif->band_idx;
+ mvif->sta.wcid.hw_key_idx = -1;
+ mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ mt7915_mac_wtbl_update(dev, idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
+ rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
+ if (vif->txq) {
+ mtxq = (struct mt76_txq *)vif->txq->drv_priv;
+ mtxq->wcid = &mvif->sta.wcid;
+ }
+
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static void mt7915_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta = &mvif->sta;
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ int idx = msta->wcid.idx;
+
+ /* TODO: disable beacon for the bss */
+
+ mt7915_mcu_add_dev_info(dev, vif, false);
+
+ rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
+
+ mutex_lock(&dev->mt76.mutex);
+ phy->mt76->vif_mask &= ~BIT(mvif->idx);
+ phy->omac_mask &= ~BIT(mvif->omac_idx);
+ mutex_unlock(&dev->mt76.mutex);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (!list_empty(&msta->poll_list))
+ list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+}
+
+static void mt7915_init_dfs_state(struct mt7915_phy *phy)
+{
+ struct mt76_phy *mphy = phy->mt76;
+ struct ieee80211_hw *hw = mphy->hw;
+ struct cfg80211_chan_def *chandef = &hw->conf.chandef;
+
+ if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ return;
+
+ if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
+ return;
+
+ if (mphy->chandef.chan->center_freq == chandef->chan->center_freq &&
+ mphy->chandef.width == chandef->width)
+ return;
+
+ phy->dfs_state = -1;
+}
+
+static int mt7915_set_channel(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ int ret;
+
+ cancel_delayed_work_sync(&phy->mac_work);
+
+ mutex_lock(&dev->mt76.mutex);
+ set_bit(MT76_RESET, &phy->mt76->state);
+
+ mt7915_init_dfs_state(phy);
+ mt76_set_channel(phy->mt76);
+
+ ret = mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD_CHANNEL_SWITCH);
+ if (ret)
+ goto out;
+
+ mt7915_mac_set_timing(phy);
+ ret = mt7915_dfs_init_radar_detector(phy);
+ mt7915_mac_cca_stats_reset(phy);
+
+ mt7915_mac_reset_counters(phy);
+ phy->noise = 0;
+
+out:
+ clear_bit(MT76_RESET, &phy->mt76->state);
+ mutex_unlock(&dev->mt76.mutex);
+
+ mt76_txq_schedule_all(phy->mt76);
+ ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work,
+ MT7915_WATCHDOG_TIME);
+
+ return ret;
+}
+
+static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta = sta ? (struct mt7915_sta *)sta->drv_priv :
+ &mvif->sta;
+ struct mt76_wcid *wcid = &msta->wcid;
+ int idx = key->keyidx;
+
+ /* The hardware does not support per-STA RX GTK, fallback
+ * to software mode for these.
+ */
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
+ (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+ key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ /* fall back to sw encryption for unsupported ciphers */
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ case WLAN_CIPHER_SUITE_SMS4:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (cmd == SET_KEY) {
+ key->hw_key_idx = wcid->idx;
+ wcid->hw_key_idx = idx;
+ } else if (idx == wcid->hw_key_idx) {
+ wcid->hw_key_idx = -1;
+ }
+ mt76_wcid_key_setup(&dev->mt76, wcid,
+ cmd == SET_KEY ? key : NULL);
+
+ return mt7915_mcu_add_key(dev, vif, msta, key, cmd);
+}
+
+static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ bool band = phy != &dev->phy;
+ int ret;
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ieee80211_stop_queues(hw);
+ ret = mt7915_set_channel(phy);
+ if (ret)
+ return ret;
+ ieee80211_wake_queues(hw);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ ret = mt7915_mcu_set_sku(phy);
+ if (ret)
+ return ret;
+ }
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+ phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
+ else
+ phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
+
+ mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter);
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+static int
+mt7915_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+
+ /* no need to update right away, we'll get BSS_CHANGED_QOS */
+ queue = mt7915_lmac_mapping(dev, queue);
+ mvif->queue_params[queue] = *params;
+
+ return 0;
+}
+
+static void mt7915_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ bool band = phy != &dev->phy;
+
+ u32 ctl_flags = MT_WF_RFCR1_DROP_ACK |
+ MT_WF_RFCR1_DROP_BF_POLL |
+ MT_WF_RFCR1_DROP_BA |
+ MT_WF_RFCR1_DROP_CFEND |
+ MT_WF_RFCR1_DROP_CFACK;
+ u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+ flags |= *total_flags & FIF_##_flag; \
+ phy->rxfilter &= ~(_hw); \
+ phy->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ } while (0)
+
+ phy->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
+ MT_WF_RFCR_DROP_OTHER_BEACON |
+ MT_WF_RFCR_DROP_FRAME_REPORT |
+ MT_WF_RFCR_DROP_PROBEREQ |
+ MT_WF_RFCR_DROP_MCAST_FILTERED |
+ MT_WF_RFCR_DROP_MCAST |
+ MT_WF_RFCR_DROP_BCAST |
+ MT_WF_RFCR_DROP_DUPLICATE |
+ MT_WF_RFCR_DROP_A2_BSSID |
+ MT_WF_RFCR_DROP_UNWANTED_CTL |
+ MT_WF_RFCR_DROP_STBC_MULTI);
+
+ MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM |
+ MT_WF_RFCR_DROP_A3_MAC |
+ MT_WF_RFCR_DROP_A3_BSSID);
+
+ MT76_FILTER(FCSFAIL, MT_WF_RFCR_DROP_FCSFAIL);
+
+ MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS |
+ MT_WF_RFCR_DROP_RTS |
+ MT_WF_RFCR_DROP_CTL_RSV |
+ MT_WF_RFCR_DROP_NDPA);
+
+ *total_flags = flags;
+ mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter);
+
+ if (*total_flags & FIF_CONTROL)
+ mt76_clear(dev, MT_WF_RFCR1(band), ctl_flags);
+ else
+ mt76_set(dev, MT_WF_RFCR1(band), ctl_flags);
+}
+
+static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u32 changed)
+{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+
+ mutex_lock(&dev->mt76.mutex);
+
+ /*
+ * station mode uses BSSID to map the wlan entry to a peer,
+ * and then peer references bss_info_rfch to set bandwidth cap.
+ */
+ if (changed & BSS_CHANGED_BSSID &&
+ vif->type == NL80211_IFTYPE_STATION) {
+ bool join = !is_zero_ether_addr(info->bssid);
+
+ mt7915_mcu_add_bss_info(phy, vif, join);
+ mt7915_mcu_add_sta(dev, vif, NULL, join);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ mt7915_mcu_add_bss_info(phy, vif, info->assoc);
+ mt7915_mcu_add_obss_spr(dev, vif, info->he_obss_pd.enable);
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ int slottime = info->use_short_slot ? 9 : 20;
+
+ if (slottime != phy->slottime) {
+ phy->slottime = slottime;
+ mt7915_mac_set_timing(phy);
+ }
+ }
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ mt7915_mcu_add_bss_info(phy, vif, info->enable_beacon);
+ mt7915_mcu_add_sta(dev, vif, NULL, info->enable_beacon);
+ }
+
+ /* ensure that enable txcmd_mode after bss_info */
+ if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED))
+ mt7915_mcu_set_tx(dev, vif);
+
+ if (changed & BSS_CHANGED_HE_OBSS_PD)
+ mt7915_mcu_add_obss_spr(dev, vif, info->he_obss_pd.enable);
+
+ if (changed & (BSS_CHANGED_BEACON |
+ BSS_CHANGED_BEACON_ENABLED))
+ mt7915_mcu_add_beacon(hw, vif, info->enable_beacon);
+
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static void
+mt7915_channel_switch_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *chandef)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+
+ mutex_lock(&dev->mt76.mutex);
+ mt7915_mcu_add_beacon(hw, vif, true);
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ int ret, idx;
+
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA - 1);
+ if (idx < 0)
+ return -ENOSPC;
+
+ INIT_LIST_HEAD(&msta->rc_list);
+ INIT_LIST_HEAD(&msta->stats_list);
+ INIT_LIST_HEAD(&msta->poll_list);
+ msta->vif = mvif;
+ msta->wcid.sta = 1;
+ msta->wcid.idx = idx;
+ msta->wcid.ext_phy = mvif->band_idx;
+ msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ msta->stats.jiffies = jiffies;
+
+ mt7915_mac_wtbl_update(dev, idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
+ ret = mt7915_mcu_add_sta(dev, vif, sta, true);
+ if (ret)
+ return ret;
+
+ return mt7915_mcu_add_sta_adv(dev, vif, sta, true);
+}
+
+void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+
+ mt7915_mcu_add_sta_adv(dev, vif, sta, false);
+ mt7915_mcu_add_sta(dev, vif, sta, false);
+
+ mt7915_mac_wtbl_update(dev, msta->wcid.idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (!list_empty(&msta->poll_list))
+ list_del_init(&msta->poll_list);
+ if (!list_empty(&msta->stats_list))
+ list_del_init(&msta->stats_list);
+ if (!list_empty(&msta->rc_list))
+ list_del_init(&msta->rc_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+}
+
+static void mt7915_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt76_phy *mphy = hw->priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+
+ if (control->sta) {
+ struct mt7915_sta *sta;
+
+ sta = (struct mt7915_sta *)control->sta->drv_priv;
+ wcid = &sta->wcid;
+ }
+
+ if (vif && !control->sta) {
+ struct mt7915_vif *mvif;
+
+ mvif = (struct mt7915_vif *)vif->drv_priv;
+ wcid = &mvif->sta.wcid;
+ }
+
+ mt76_tx(mphy, control->sta, wcid, skb);
+}
+
+static int mt7915_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+
+ mutex_lock(&dev->mt76.mutex);
+ mt7915_mcu_set_rts_thresh(phy, val);
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+static int
+mt7915_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct ieee80211_sta *sta = params->sta;
+ struct ieee80211_txq *txq = sta->txq[params->tid];
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ u16 tid = params->tid;
+ u16 ssn = params->ssn;
+ struct mt76_txq *mtxq;
+ int ret = 0;
+
+ if (!txq)
+ return -EINVAL;
+
+ mtxq = (struct mt76_txq *)txq->drv_priv;
+
+ mutex_lock(&dev->mt76.mutex);
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn,
+ params->buf_size);
+ mt7915_mcu_add_rx_ba(dev, params, true);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+ mt7915_mcu_add_rx_ba(dev, params, false);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ mtxq->aggr = true;
+ mtxq->send_bar = false;
+ mt7915_mcu_add_tx_ba(dev, params, true);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ mtxq->aggr = false;
+ clear_bit(tid, &msta->ampdu_state);
+ mt7915_mcu_add_tx_ba(dev, params, false);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ set_bit(tid, &msta->ampdu_state);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ mtxq->aggr = false;
+ clear_bit(tid, &msta->ampdu_state);
+ mt7915_mcu_add_tx_ba(dev, params, false);
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static int
+mt7915_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NOTEXIST,
+ IEEE80211_STA_NONE);
+}
+
+static int
+mt7915_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NONE,
+ IEEE80211_STA_NOTEXIST);
+}
+
+static int
+mt7915_get_stats(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats)
+{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mib_stats *mib = &phy->mib;
+
+ mutex_lock(&dev->mt76.mutex);
+ stats->dot11RTSSuccessCount = mib->rts_cnt;
+ stats->dot11RTSFailureCount = mib->rts_retries_cnt;
+ stats->dot11FCSErrorCount = mib->fcs_err_cnt;
+ stats->dot11ACKFailureCount = mib->ack_fail_cnt;
+
+ memset(mib, 0, sizeof(*mib));
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+static u64
+mt7915_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ bool band = phy != &dev->phy;
+ union {
+ u64 t64;
+ u32 t32[2];
+ } tsf;
+ u16 n;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ n = mvif->omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : mvif->omac_idx;
+ /* TSF software read */
+ mt76_set(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE);
+ tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(band));
+ tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(band));
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return tsf.t64;
+}
+
+static void
+mt7915_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u64 timestamp)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ bool band = phy != &dev->phy;
+ union {
+ u64 t64;
+ u32 t32[2];
+ } tsf = { .t64 = timestamp, };
+ u16 n;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ n = mvif->omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : mvif->omac_idx;
+ mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]);
+ mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]);
+ /* TSF software overwrite */
+ mt76_set(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_WRITE);
+
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static void
+mt7915_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
+{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_dev *dev = phy->dev;
+
+ mutex_lock(&dev->mt76.mutex);
+ phy->coverage_class = max_t(s16, coverage_class, 0);
+ mt7915_mac_set_timing(phy);
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static int
+mt7915_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ int max_nss = hweight8(hw->wiphy->available_antennas_tx);
+ bool ext_phy = phy != &dev->phy;
+
+ if (!tx_ant || tx_ant != rx_ant || ffs(tx_ant) > max_nss)
+ return -EINVAL;
+
+ if ((BIT(hweight8(tx_ant)) - 1) != tx_ant)
+ tx_ant = BIT(ffs(tx_ant) - 1) - 1;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ phy->mt76->antenna_mask = tx_ant;
+
+ if (ext_phy) {
+ if (dev->chainmask == 0xf)
+ tx_ant <<= 2;
+ else
+ tx_ant <<= 1;
+ }
+ phy->chainmask = tx_ant;
+
+ mt76_set_stream_caps(phy->mt76, true);
+ mt7915_set_stream_vht_txbf_caps(phy);
+ mt7915_set_stream_he_caps(phy);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+static void mt7915_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_sta_stats *stats = &msta->stats;
+
+ if (!stats->tx_rate.legacy && !stats->tx_rate.flags)
+ return;
+
+ if (stats->tx_rate.legacy) {
+ sinfo->txrate.legacy = stats->tx_rate.legacy;
+ } else {
+ sinfo->txrate.mcs = stats->tx_rate.mcs;
+ sinfo->txrate.nss = stats->tx_rate.nss;
+ sinfo->txrate.bw = stats->tx_rate.bw;
+ sinfo->txrate.he_gi = stats->tx_rate.he_gi;
+ sinfo->txrate.he_dcm = stats->tx_rate.he_dcm;
+ sinfo->txrate.he_ru_alloc = stats->tx_rate.he_ru_alloc;
+ }
+ sinfo->txrate.flags = stats->tx_rate.flags;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+}
+
+static void
+mt7915_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 changed)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ msta->stats.changed |= changed;
+ if (list_empty(&msta->rc_list))
+ list_add_tail(&msta->rc_list, &dev->sta_rc_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ ieee80211_queue_work(hw, &dev->rc_work);
+}
+
+const struct ieee80211_ops mt7915_ops = {
+ .tx = mt7915_tx,
+ .start = mt7915_start,
+ .stop = mt7915_stop,
+ .add_interface = mt7915_add_interface,
+ .remove_interface = mt7915_remove_interface,
+ .config = mt7915_config,
+ .conf_tx = mt7915_conf_tx,
+ .configure_filter = mt7915_configure_filter,
+ .bss_info_changed = mt7915_bss_info_changed,
+ .sta_add = mt7915_sta_add,
+ .sta_remove = mt7915_sta_remove,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
+ .sta_rc_update = mt7915_sta_rc_update,
+ .set_key = mt7915_set_key,
+ .ampdu_action = mt7915_ampdu_action,
+ .set_rts_threshold = mt7915_set_rts_threshold,
+ .wake_tx_queue = mt76_wake_tx_queue,
+ .sw_scan_start = mt76_sw_scan,
+ .sw_scan_complete = mt76_sw_scan_complete,
+ .release_buffered_frames = mt76_release_buffered_frames,
+ .get_txpower = mt76_get_txpower,
+ .channel_switch_beacon = mt7915_channel_switch_beacon,
+ .get_stats = mt7915_get_stats,
+ .get_tsf = mt7915_get_tsf,
+ .set_tsf = mt7915_set_tsf,
+ .get_survey = mt76_get_survey,
+ .get_antenna = mt76_get_antenna,
+ .set_antenna = mt7915_set_antenna,
+ .set_coverage_class = mt7915_set_coverage_class,
+ .sta_statistics = mt7915_sta_statistics,
+#ifdef CONFIG_MAC80211_DEBUGFS
+ .sta_add_debugfs = mt7915_sta_add_debugfs,
+#endif
+};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
new file mode 100644
index 000000000..41054ee43
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -0,0 +1,3409 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include "mt7915.h"
+#include "mcu.h"
+#include "mac.h"
+#include "eeprom.h"
+
+struct mt7915_patch_hdr {
+ char build_date[16];
+ char platform[4];
+ __be32 hw_sw_ver;
+ __be32 patch_ver;
+ __be16 checksum;
+ u16 reserved;
+ struct {
+ __be32 patch_ver;
+ __be32 subsys;
+ __be32 feature;
+ __be32 n_region;
+ __be32 crc;
+ u32 reserved[11];
+ } desc;
+} __packed;
+
+struct mt7915_patch_sec {
+ __be32 type;
+ __be32 offs;
+ __be32 size;
+ union {
+ __be32 spec[13];
+ struct {
+ __be32 addr;
+ __be32 len;
+ __be32 sec_key_idx;
+ __be32 align_len;
+ u32 reserved[9];
+ } info;
+ };
+} __packed;
+
+struct mt7915_fw_trailer {
+ u8 chip_id;
+ u8 eco_code;
+ u8 n_region;
+ u8 format_ver;
+ u8 format_flag;
+ u8 reserved[2];
+ char fw_ver[10];
+ char build_date[15];
+ u32 crc;
+} __packed;
+
+struct mt7915_fw_region {
+ __le32 decomp_crc;
+ __le32 decomp_len;
+ __le32 decomp_blk_sz;
+ u8 reserved[4];
+ __le32 addr;
+ __le32 len;
+ u8 feature_set;
+ u8 reserved1[15];
+} __packed;
+
+#define MCU_PATCH_ADDRESS 0x200000
+
+#define MT_STA_BFER BIT(0)
+#define MT_STA_BFEE BIT(1)
+
+#define FW_FEATURE_SET_ENCRYPT BIT(0)
+#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
+#define FW_FEATURE_OVERRIDE_ADDR BIT(5)
+
+#define DL_MODE_ENCRYPT BIT(0)
+#define DL_MODE_KEY_IDX GENMASK(2, 1)
+#define DL_MODE_RESET_SEC_IV BIT(3)
+#define DL_MODE_WORKING_PDA_CR4 BIT(4)
+#define DL_MODE_NEED_RSP BIT(31)
+
+#define FW_START_OVERRIDE BIT(0)
+#define FW_START_WORKING_PDA_CR4 BIT(2)
+
+#define PATCH_SEC_TYPE_MASK GENMASK(15, 0)
+#define PATCH_SEC_TYPE_INFO 0x2
+
+#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
+#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
+
+#define HE_PHY(p, c) u8_get_bits(c, IEEE80211_HE_PHY_##p)
+#define HE_MAC(m, c) u8_get_bits(c, IEEE80211_HE_MAC_##m)
+
+static enum mt7915_cipher_type
+mt7915_mcu_get_cipher(int cipher)
+{
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MT_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MT_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return MT_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ return MT_CIPHER_BIP_CMAC_128;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MT_CIPHER_AES_CCMP;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ return MT_CIPHER_CCMP_256;
+ case WLAN_CIPHER_SUITE_GCMP:
+ return MT_CIPHER_GCMP;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return MT_CIPHER_GCMP_256;
+ case WLAN_CIPHER_SUITE_SMS4:
+ return MT_CIPHER_WAPI;
+ default:
+ return MT_CIPHER_NONE;
+ }
+}
+
+static u8 mt7915_mcu_chan_bw(struct cfg80211_chan_def *chandef)
+{
+ static const u8 width_to_bw[] = {
+ [NL80211_CHAN_WIDTH_40] = CMD_CBW_40MHZ,
+ [NL80211_CHAN_WIDTH_80] = CMD_CBW_80MHZ,
+ [NL80211_CHAN_WIDTH_80P80] = CMD_CBW_8080MHZ,
+ [NL80211_CHAN_WIDTH_160] = CMD_CBW_160MHZ,
+ [NL80211_CHAN_WIDTH_5] = CMD_CBW_5MHZ,
+ [NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ,
+ [NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ,
+ [NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ,
+ };
+
+ if (chandef->width >= ARRAY_SIZE(width_to_bw))
+ return 0;
+
+ return width_to_bw[chandef->width];
+}
+
+static const struct ieee80211_sta_he_cap *
+mt7915_get_he_phy_cap(struct mt7915_phy *phy, struct ieee80211_vif *vif)
+{
+ struct ieee80211_supported_band *sband;
+ enum nl80211_band band;
+
+ band = phy->mt76->chandef.chan->band;
+ sband = phy->mt76->hw->wiphy->bands[band];
+
+ return ieee80211_get_he_iftype_cap(sband, vif->type);
+}
+
+static u8
+mt7915_get_phy_mode(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ enum nl80211_band band, struct ieee80211_sta *sta)
+{
+ struct ieee80211_sta_ht_cap *ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap;
+ const struct ieee80211_sta_he_cap *he_cap;
+ u8 mode = 0;
+
+ if (sta) {
+ ht_cap = &sta->ht_cap;
+ vht_cap = &sta->vht_cap;
+ he_cap = &sta->he_cap;
+ } else {
+ struct ieee80211_supported_band *sband;
+ struct mt7915_phy *phy;
+ struct mt7915_vif *mvif;
+
+ mvif = (struct mt7915_vif *)vif->drv_priv;
+ phy = mvif->band_idx ? mt7915_ext_phy(dev) : &dev->phy;
+ sband = phy->mt76->hw->wiphy->bands[band];
+
+ ht_cap = &sband->ht_cap;
+ vht_cap = &sband->vht_cap;
+ he_cap = ieee80211_get_he_iftype_cap(sband, vif->type);
+ }
+
+ if (band == NL80211_BAND_2GHZ) {
+ mode |= PHY_MODE_B | PHY_MODE_G;
+
+ if (ht_cap->ht_supported)
+ mode |= PHY_MODE_GN;
+
+ if (he_cap && he_cap->has_he)
+ mode |= PHY_MODE_AX_24G;
+ } else if (band == NL80211_BAND_5GHZ) {
+ mode |= PHY_MODE_A;
+
+ if (ht_cap->ht_supported)
+ mode |= PHY_MODE_AN;
+
+ if (vht_cap->vht_supported)
+ mode |= PHY_MODE_AC;
+
+ if (he_cap && he_cap->has_he)
+ mode |= PHY_MODE_AX_5G;
+ }
+
+ return mode;
+}
+
+static u8
+mt7915_mcu_get_sta_nss(u16 mcs_map)
+{
+ u8 nss;
+
+ for (nss = 8; nss > 0; nss--) {
+ u8 nss_mcs = (mcs_map >> (2 * (nss - 1))) & 3;
+
+ if (nss_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED)
+ break;
+ }
+
+ return nss - 1;
+}
+
+static int __mt7915_mcu_msg_send(struct mt7915_dev *dev, struct sk_buff *skb,
+ int cmd, int *wait_seq)
+{
+ struct mt7915_mcu_txd *mcu_txd;
+ u8 seq, pkt_fmt, qidx;
+ enum mt76_txq_id txq;
+ __le32 *txd;
+ u32 val;
+
+ seq = ++dev->mt76.mcu.msg_seq & 0xf;
+ if (!seq)
+ seq = ++dev->mt76.mcu.msg_seq & 0xf;
+
+ if (cmd == -MCU_CMD_FW_SCATTER) {
+ txq = MT_TXQ_FWDL;
+ goto exit;
+ }
+
+ mcu_txd = (struct mt7915_mcu_txd *)skb_push(skb, sizeof(*mcu_txd));
+
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state)) {
+ txq = MT_TXQ_MCU_WA;
+ qidx = MT_TX_MCU_PORT_RX_Q0;
+ pkt_fmt = MT_TX_TYPE_CMD;
+ } else {
+ txq = MT_TXQ_MCU;
+ qidx = MT_TX_MCU_PORT_RX_Q0;
+ pkt_fmt = MT_TX_TYPE_CMD;
+ }
+
+ txd = mcu_txd->txd;
+
+ val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len) |
+ FIELD_PREP(MT_TXD0_PKT_FMT, pkt_fmt) |
+ FIELD_PREP(MT_TXD0_Q_IDX, qidx);
+ txd[0] = cpu_to_le32(val);
+
+ val = MT_TXD1_LONG_FORMAT |
+ FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_CMD);
+ txd[1] = cpu_to_le32(val);
+
+ mcu_txd->len = cpu_to_le16(skb->len - sizeof(mcu_txd->txd));
+ mcu_txd->pq_id = cpu_to_le16(MCU_PQ_ID(MT_TX_PORT_IDX_MCU, qidx));
+ mcu_txd->pkt_type = MCU_PKT_ID;
+ mcu_txd->seq = seq;
+
+ if (cmd < 0) {
+ mcu_txd->set_query = MCU_Q_NA;
+ mcu_txd->cid = -cmd;
+ } else {
+ mcu_txd->cid = MCU_CMD_EXT_CID;
+ mcu_txd->ext_cid = cmd;
+ mcu_txd->ext_cid_ack = 1;
+
+ /* do not use Q_SET for efuse */
+ if (cmd == MCU_EXT_CMD_EFUSE_ACCESS)
+ mcu_txd->set_query = MCU_Q_QUERY;
+ else
+ mcu_txd->set_query = MCU_Q_SET;
+ }
+
+ mcu_txd->s2d_index = MCU_S2D_H2N;
+ WARN_ON(cmd == MCU_EXT_CMD_EFUSE_ACCESS &&
+ mcu_txd->set_query != MCU_Q_QUERY);
+
+exit:
+ if (wait_seq)
+ *wait_seq = seq;
+
+ return mt76_tx_queue_skb_raw(dev, txq, skb, 0);
+}
+
+static int
+mt7915_mcu_parse_eeprom(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt7915_mcu_eeprom_info *res;
+ u8 *buf;
+
+ if (!skb)
+ return -EINVAL;
+
+ skb_pull(skb, sizeof(struct mt7915_mcu_rxd));
+
+ res = (struct mt7915_mcu_eeprom_info *)skb->data;
+ buf = dev->mt76.eeprom.data + le32_to_cpu(res->addr);
+ memcpy(buf, res->data, 16);
+
+ return 0;
+}
+
+static int
+mt7915_mcu_parse_response(struct mt7915_dev *dev, int cmd,
+ struct sk_buff *skb, int seq)
+{
+ struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
+ int ret = 0;
+
+ if (seq != rxd->seq) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ switch (cmd) {
+ case -MCU_CMD_PATCH_SEM_CONTROL:
+ skb_pull(skb, sizeof(*rxd) - 4);
+ ret = *skb->data;
+ break;
+ case MCU_EXT_CMD_THERMAL_CTRL:
+ skb_pull(skb, sizeof(*rxd) + 4);
+ ret = le32_to_cpu(*(__le32 *)skb->data);
+ break;
+ case MCU_EXT_CMD_EFUSE_ACCESS:
+ ret = mt7915_mcu_parse_eeprom(dev, skb);
+ break;
+ default:
+ break;
+ }
+out:
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
+static int
+mt7915_mcu_wait_response(struct mt7915_dev *dev, int cmd, int seq)
+{
+ unsigned long expires = jiffies + 20 * HZ;
+ struct sk_buff *skb;
+ int ret = 0;
+
+ while (true) {
+ skb = mt76_mcu_get_response(&dev->mt76, expires);
+ if (!skb) {
+ dev_err(dev->mt76.dev, "Message %d (seq %d) timeout\n",
+ cmd, seq);
+ return -ETIMEDOUT;
+ }
+
+ ret = mt7915_mcu_parse_response(dev, cmd, skb, seq);
+ if (ret != -EAGAIN)
+ break;
+ }
+
+ return ret;
+}
+
+static int
+mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
+ int cmd, bool wait_resp)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ int ret, seq;
+
+ mutex_lock(&mdev->mcu.mutex);
+
+ ret = __mt7915_mcu_msg_send(dev, skb, cmd, &seq);
+ if (ret)
+ goto out;
+
+ if (wait_resp)
+ ret = mt7915_mcu_wait_response(dev, cmd, seq);
+
+out:
+ mutex_unlock(&mdev->mcu.mutex);
+
+ return ret;
+}
+
+static int
+mt7915_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
+ int len, bool wait_resp)
+{
+ struct sk_buff *skb;
+
+ skb = mt76_mcu_msg_alloc(mdev, data, len);
+ if (!skb)
+ return -ENOMEM;
+
+ return __mt76_mcu_skb_send_msg(mdev, skb, cmd, wait_resp);
+}
+
+static void
+mt7915_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ if (vif->csa_active)
+ ieee80211_csa_finish(vif);
+}
+
+static void
+mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7915_mcu_rdd_report *r;
+
+ r = (struct mt7915_mcu_rdd_report *)skb->data;
+
+ if (r->idx && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
+ ieee80211_radar_detected(mphy->hw);
+ dev->hw_pattern++;
+}
+
+static void
+mt7915_mcu_tx_rate_cal(struct mt76_phy *mphy, struct mt7915_mcu_ra_info *ra,
+ struct rate_info *rate, u16 r)
+{
+ struct ieee80211_supported_band *sband;
+ u16 ru_idx = le16_to_cpu(ra->ru_idx);
+ u16 flags = 0;
+
+ rate->mcs = FIELD_GET(MT_RA_RATE_MCS, r);
+ rate->nss = FIELD_GET(MT_RA_RATE_NSS, r) + 1;
+
+ switch (FIELD_GET(MT_RA_RATE_TX_MODE, r)) {
+ case MT_PHY_TYPE_CCK:
+ case MT_PHY_TYPE_OFDM:
+ if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
+ sband = &mphy->sband_5g.sband;
+ else
+ sband = &mphy->sband_2g.sband;
+
+ rate->legacy = sband->bitrates[rate->mcs].bitrate;
+ break;
+ case MT_PHY_TYPE_HT:
+ case MT_PHY_TYPE_HT_GF:
+ rate->mcs += (rate->nss - 1) * 8;
+ flags |= RATE_INFO_FLAGS_MCS;
+
+ if (ra->gi)
+ flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case MT_PHY_TYPE_VHT:
+ flags |= RATE_INFO_FLAGS_VHT_MCS;
+
+ if (ra->gi)
+ flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case MT_PHY_TYPE_HE_SU:
+ case MT_PHY_TYPE_HE_EXT_SU:
+ case MT_PHY_TYPE_HE_TB:
+ case MT_PHY_TYPE_HE_MU:
+ rate->he_gi = ra->gi;
+ rate->he_dcm = FIELD_GET(MT_RA_RATE_DCM_EN, r);
+
+ flags |= RATE_INFO_FLAGS_HE_MCS;
+ break;
+ default:
+ break;
+ }
+ rate->flags = flags;
+
+ if (ru_idx) {
+ switch (ru_idx) {
+ case 1 ... 2:
+ rate->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ case 3 ... 6:
+ rate->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ break;
+ case 7 ... 14:
+ rate->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ break;
+ default:
+ rate->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ break;
+ }
+ rate->bw = RATE_INFO_BW_HE_RU;
+ } else {
+ u8 bw = mt7915_mcu_chan_bw(&mphy->chandef) -
+ FIELD_GET(MT_RA_RATE_BW, r);
+
+ switch (bw) {
+ case IEEE80211_STA_RX_BW_160:
+ rate->bw = RATE_INFO_BW_160;
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ rate->bw = RATE_INFO_BW_80;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ rate->bw = RATE_INFO_BW_40;
+ break;
+ default:
+ rate->bw = RATE_INFO_BW_20;
+ break;
+ }
+ }
+}
+
+static void
+mt7915_mcu_tx_rate_report(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt7915_mcu_ra_info *ra = (struct mt7915_mcu_ra_info *)skb->data;
+ struct rate_info rate = {}, prob_rate = {};
+ u16 probe = le16_to_cpu(ra->prob_up_rate);
+ u16 attempts = le16_to_cpu(ra->attempts);
+ u16 curr = le16_to_cpu(ra->curr_rate);
+ u16 wcidx = le16_to_cpu(ra->wlan_idx);
+ struct mt76_phy *mphy = &dev->mphy;
+ struct mt7915_sta_stats *stats;
+ struct mt7915_sta *msta;
+ struct mt76_wcid *wcid;
+
+ if (wcidx >= MT76_N_WCIDS)
+ return;
+
+ wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
+ if (!wcid)
+ return;
+
+ msta = container_of(wcid, struct mt7915_sta, wcid);
+ stats = &msta->stats;
+
+ if (msta->wcid.ext_phy && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
+ /* current rate */
+ mt7915_mcu_tx_rate_cal(mphy, ra, &rate, curr);
+ stats->tx_rate = rate;
+
+ /* probing rate */
+ mt7915_mcu_tx_rate_cal(mphy, ra, &prob_rate, probe);
+ stats->prob_rate = prob_rate;
+
+ if (attempts) {
+ u16 success = le16_to_cpu(ra->success);
+
+ stats->per = 1000 * (attempts - success) / attempts;
+ }
+}
+
+static void
+mt7915_mcu_rx_log_message(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
+ const char *data = (char *)&rxd[1];
+ const char *type;
+
+ switch (rxd->s2d_index) {
+ case 0:
+ type = "WM";
+ break;
+ case 2:
+ type = "WA";
+ break;
+ default:
+ type = "unknown";
+ break;
+ }
+
+ wiphy_info(mt76_hw(dev)->wiphy, "%s: %s", type, data);
+}
+
+static void
+mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
+
+ switch (rxd->ext_eid) {
+ case MCU_EXT_EVENT_RDD_REPORT:
+ mt7915_mcu_rx_radar_detected(dev, skb);
+ break;
+ case MCU_EXT_EVENT_CSA_NOTIFY:
+ ieee80211_iterate_active_interfaces_atomic(dev->mt76.hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7915_mcu_csa_finish, dev);
+ break;
+ case MCU_EXT_EVENT_RATE_REPORT:
+ mt7915_mcu_tx_rate_report(dev, skb);
+ break;
+ case MCU_EXT_EVENT_FW_LOG_2_HOST:
+ mt7915_mcu_rx_log_message(dev, skb);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+mt7915_mcu_rx_unsolicited_event(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
+
+ switch (rxd->eid) {
+ case MCU_EVENT_EXT:
+ mt7915_mcu_rx_ext_event(dev, skb);
+ break;
+ default:
+ break;
+ }
+ dev_kfree_skb(skb);
+}
+
+void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
+
+ if (rxd->ext_eid == MCU_EXT_EVENT_THERMAL_PROTECT ||
+ rxd->ext_eid == MCU_EXT_EVENT_FW_LOG_2_HOST ||
+ rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP ||
+ rxd->ext_eid == MCU_EXT_EVENT_PS_SYNC ||
+ rxd->ext_eid == MCU_EXT_EVENT_RATE_REPORT ||
+ !rxd->seq)
+ mt7915_mcu_rx_unsolicited_event(dev, skb);
+ else
+ mt76_mcu_rx_event(&dev->mt76, skb);
+}
+
+static struct sk_buff *
+mt7915_mcu_alloc_sta_req(struct mt7915_dev *dev, struct mt7915_vif *mvif,
+ struct mt7915_sta *msta, int len)
+{
+ struct sta_req_hdr hdr = {
+ .bss_idx = mvif->idx,
+ .wlan_idx_lo = msta ? to_wcid_lo(msta->wcid.idx) : 0,
+ .wlan_idx_hi = msta ? to_wcid_hi(msta->wcid.idx) : 0,
+ .muar_idx = msta && msta->wcid.sta ? mvif->omac_idx : 0xe,
+ .is_tlv_append = 1,
+ };
+ struct sk_buff *skb;
+
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ skb_put_data(skb, &hdr, sizeof(hdr));
+
+ return skb;
+}
+
+static struct wtbl_req_hdr *
+mt7915_mcu_alloc_wtbl_req(struct mt7915_dev *dev, struct mt7915_sta *msta,
+ int cmd, void *sta_wtbl, struct sk_buff **skb)
+{
+ struct tlv *sta_hdr = sta_wtbl;
+ struct wtbl_req_hdr hdr = {
+ .wlan_idx_lo = to_wcid_lo(msta->wcid.idx),
+ .wlan_idx_hi = to_wcid_hi(msta->wcid.idx),
+ .operation = cmd,
+ };
+ struct sk_buff *nskb = *skb;
+
+ if (!nskb) {
+ nskb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
+ MT7915_WTBL_UPDATE_BA_SIZE);
+ if (!nskb)
+ return ERR_PTR(-ENOMEM);
+
+ *skb = nskb;
+ }
+
+ if (sta_hdr)
+ le16_add_cpu(&sta_hdr->len, sizeof(hdr));
+
+ return skb_put_data(nskb, &hdr, sizeof(hdr));
+}
+
+static struct tlv *
+mt7915_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
+ void *sta_ntlv, void *sta_wtbl)
+{
+ struct sta_ntlv_hdr *ntlv_hdr = sta_ntlv;
+ struct tlv *sta_hdr = sta_wtbl;
+ struct tlv *ptlv, tlv = {
+ .tag = cpu_to_le16(tag),
+ .len = cpu_to_le16(len),
+ };
+ u16 ntlv;
+
+ ptlv = skb_put(skb, len);
+ memcpy(ptlv, &tlv, sizeof(tlv));
+
+ ntlv = le16_to_cpu(ntlv_hdr->tlv_num);
+ ntlv_hdr->tlv_num = cpu_to_le16(ntlv + 1);
+
+ if (sta_hdr) {
+ u16 size = le16_to_cpu(sta_hdr->len);
+
+ sta_hdr->len = cpu_to_le16(size + len);
+ }
+
+ return ptlv;
+}
+
+static struct tlv *
+mt7915_mcu_add_tlv(struct sk_buff *skb, int tag, int len)
+{
+ return mt7915_mcu_add_nested_tlv(skb, tag, len, skb->data, NULL);
+}
+
+static struct tlv *
+mt7915_mcu_add_nested_subtlv(struct sk_buff *skb, int sub_tag, int sub_len,
+ __le16 *sub_ntlv, __le16 *len)
+{
+ struct tlv *ptlv, tlv = {
+ .tag = cpu_to_le16(sub_tag),
+ .len = cpu_to_le16(sub_len),
+ };
+
+ ptlv = skb_put(skb, sub_len);
+ memcpy(ptlv, &tlv, sizeof(tlv));
+
+ le16_add_cpu(sub_ntlv, 1);
+ le16_add_cpu(len, sub_len);
+
+ return ptlv;
+}
+
+/** bss info **/
+static int
+mt7915_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct mt7915_phy *phy, bool enable)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ enum nl80211_band band = chandef->chan->band;
+ struct bss_info_basic *bss;
+ u16 wlan_idx = mvif->sta.wcid.idx;
+ u32 type = NETWORK_INFRA;
+ struct tlv *tlv;
+
+ tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_BASIC, sizeof(*bss));
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ break;
+ case NL80211_IFTYPE_STATION:
+ /* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
+ if (enable) {
+ struct ieee80211_sta *sta;
+ struct mt7915_sta *msta;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
+ if (!sta) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ msta = (struct mt7915_sta *)sta->drv_priv;
+ wlan_idx = msta->wcid.idx;
+ rcu_read_unlock();
+ }
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ type = NETWORK_IBSS;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ bss = (struct bss_info_basic *)tlv;
+ memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
+ bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
+ bss->network_type = cpu_to_le32(type);
+ bss->dtim_period = vif->bss_conf.dtim_period;
+ bss->bmc_wcid_lo = to_wcid_lo(wlan_idx);
+ bss->bmc_wcid_hi = to_wcid_hi(wlan_idx);
+ bss->phy_mode = mt7915_get_phy_mode(phy->dev, vif, band, NULL);
+ bss->wmm_idx = mvif->wmm_idx;
+ bss->active = enable;
+
+ return 0;
+}
+
+static void
+mt7915_mcu_bss_omac_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct bss_info_omac *omac;
+ struct tlv *tlv;
+ u32 type = 0;
+ u8 idx;
+
+ tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_OMAC, sizeof(*omac));
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ type = CONNECTION_INFRA_AP;
+ break;
+ case NL80211_IFTYPE_STATION:
+ type = CONNECTION_INFRA_STA;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ type = CONNECTION_IBSS_ADHOC;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ omac = (struct bss_info_omac *)tlv;
+ idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
+ omac->conn_type = cpu_to_le32(type);
+ omac->omac_idx = mvif->omac_idx;
+ omac->band_idx = mvif->band_idx;
+ omac->hw_bss_idx = idx;
+}
+
+struct mt7915_he_obss_narrow_bw_ru_data {
+ bool tolerated;
+};
+
+static void mt7915_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy,
+ struct cfg80211_bss *bss,
+ void *_data)
+{
+ struct mt7915_he_obss_narrow_bw_ru_data *data = _data;
+ const struct element *elem;
+
+ elem = ieee80211_bss_get_elem(bss, WLAN_EID_EXT_CAPABILITY);
+
+ if (!elem || elem->datalen <= 10 ||
+ !(elem->data[10] &
+ WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT))
+ data->tolerated = false;
+}
+
+static bool mt7915_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7915_he_obss_narrow_bw_ru_data iter_data = {
+ .tolerated = true,
+ };
+
+ if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR))
+ return false;
+
+ cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef,
+ mt7915_check_he_obss_narrow_bw_ru_iter,
+ &iter_data);
+
+ /*
+ * If there is at least one AP on radar channel that cannot
+ * tolerate 26-tone RU UL OFDMA transmissions using HE TB PPDU.
+ */
+ return !iter_data.tolerated;
+}
+
+static void
+mt7915_mcu_bss_rfch_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct mt7915_phy *phy)
+{
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ struct bss_info_rf_ch *ch;
+ struct tlv *tlv;
+ int freq1 = chandef->center_freq1;
+
+ tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_RF_CH, sizeof(*ch));
+
+ ch = (struct bss_info_rf_ch *)tlv;
+ ch->pri_ch = chandef->chan->hw_value;
+ ch->center_ch0 = ieee80211_frequency_to_channel(freq1);
+ ch->bw = mt7915_mcu_chan_bw(chandef);
+
+ if (chandef->width == NL80211_CHAN_WIDTH_80P80) {
+ int freq2 = chandef->center_freq2;
+
+ ch->center_ch1 = ieee80211_frequency_to_channel(freq2);
+ }
+
+ if (vif->bss_conf.he_support && vif->type == NL80211_IFTYPE_STATION) {
+ struct mt7915_dev *dev = phy->dev;
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ bool ext_phy = phy != &dev->phy;
+
+ if (ext_phy && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
+ ch->he_ru26_block =
+ mt7915_check_he_obss_narrow_bw_ru(mphy->hw, vif);
+ ch->he_all_disable = false;
+ } else {
+ ch->he_all_disable = true;
+ }
+}
+
+static void
+mt7915_mcu_bss_ra_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct mt7915_phy *phy)
+{
+ struct bss_info_ra *ra;
+ struct tlv *tlv;
+ int max_nss = hweight8(phy->chainmask);
+
+ tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_RA, sizeof(*ra));
+
+ ra = (struct bss_info_ra *)tlv;
+ ra->op_mode = vif->type == NL80211_IFTYPE_AP;
+ ra->adhoc_en = vif->type == NL80211_IFTYPE_ADHOC;
+ ra->short_preamble = true;
+ ra->tx_streams = max_nss;
+ ra->rx_streams = max_nss;
+ ra->algo = 4;
+ ra->train_up_rule = 2;
+ ra->train_up_high_thres = 110;
+ ra->train_up_rule_rssi = -70;
+ ra->low_traffic_thres = 2;
+ ra->phy_cap = cpu_to_le32(0xfdf);
+ ra->interval = cpu_to_le32(500);
+ ra->fast_interval = cpu_to_le32(100);
+}
+
+static void
+mt7915_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct mt7915_phy *phy)
+{
+#define DEFAULT_HE_PE_DURATION 4
+#define DEFAULT_HE_DURATION_RTS_THRES 1023
+ const struct ieee80211_sta_he_cap *cap;
+ struct bss_info_he *he;
+ struct tlv *tlv;
+
+ cap = mt7915_get_he_phy_cap(phy, vif);
+
+ tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_HE_BASIC, sizeof(*he));
+
+ he = (struct bss_info_he *)tlv;
+ he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext;
+ if (!he->he_pe_duration)
+ he->he_pe_duration = DEFAULT_HE_PE_DURATION;
+
+ he->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th);
+ if (!he->he_rts_thres)
+ he->he_rts_thres = cpu_to_le16(DEFAULT_HE_DURATION_RTS_THRES);
+
+ he->max_nss_mcs[CMD_HE_MCS_BW80] = cap->he_mcs_nss_supp.tx_mcs_80;
+ he->max_nss_mcs[CMD_HE_MCS_BW160] = cap->he_mcs_nss_supp.tx_mcs_160;
+ he->max_nss_mcs[CMD_HE_MCS_BW8080] = cap->he_mcs_nss_supp.tx_mcs_80p80;
+}
+
+static void
+mt7915_mcu_bss_hw_amsdu_tlv(struct sk_buff *skb)
+{
+#define TXD_CMP_MAP1 GENMASK(15, 0)
+#define TXD_CMP_MAP2 (GENMASK(31, 0) & ~BIT(23))
+ struct bss_info_hw_amsdu *amsdu;
+ struct tlv *tlv;
+
+ tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_HW_AMSDU, sizeof(*amsdu));
+
+ amsdu = (struct bss_info_hw_amsdu *)tlv;
+ amsdu->cmp_bitmap_0 = cpu_to_le32(TXD_CMP_MAP1);
+ amsdu->cmp_bitmap_1 = cpu_to_le32(TXD_CMP_MAP2);
+ amsdu->trig_thres = cpu_to_le16(2);
+ amsdu->enable = true;
+}
+
+static void
+mt7915_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt7915_vif *mvif)
+{
+/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
+#define BCN_TX_ESTIMATE_TIME (4096 + 20)
+ struct bss_info_ext_bss *ext;
+ int ext_bss_idx, tsf_offset;
+ struct tlv *tlv;
+
+ ext_bss_idx = mvif->omac_idx - EXT_BSSID_START;
+ if (ext_bss_idx < 0)
+ return;
+
+ tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_EXT_BSS, sizeof(*ext));
+
+ ext = (struct bss_info_ext_bss *)tlv;
+ tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME;
+ ext->mbss_tsf_offset = cpu_to_le32(tsf_offset);
+}
+
+static void
+mt7915_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt7915_phy *phy)
+{
+ struct bss_info_bmc_rate *bmc;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ enum nl80211_band band = chandef->chan->band;
+ struct tlv *tlv;
+
+ tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_BMC_RATE, sizeof(*bmc));
+
+ bmc = (struct bss_info_bmc_rate *)tlv;
+ if (band == NL80211_BAND_2GHZ) {
+ bmc->short_preamble = true;
+ } else {
+ bmc->bc_trans = cpu_to_le16(0x2000);
+ bmc->mc_trans = cpu_to_le16(0x2080);
+ }
+}
+
+static void
+mt7915_mcu_bss_sync_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
+{
+ struct bss_info_sync_mode *sync;
+ struct tlv *tlv;
+
+ tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_SYNC_MODE, sizeof(*sync));
+
+ sync = (struct bss_info_sync_mode *)tlv;
+ sync->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
+ sync->dtim_period = vif->bss_conf.dtim_period;
+ sync->enable = true;
+}
+
+int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
+ struct ieee80211_vif *vif, int enable)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct sk_buff *skb;
+
+ skb = mt7915_mcu_alloc_sta_req(phy->dev, mvif, NULL,
+ MT7915_BSS_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ /* bss_omac must be first */
+ if (enable)
+ mt7915_mcu_bss_omac_tlv(skb, vif);
+
+ mt7915_mcu_bss_basic_tlv(skb, vif, phy, enable);
+
+ if (enable) {
+ mt7915_mcu_bss_rfch_tlv(skb, vif, phy);
+ mt7915_mcu_bss_bmc_tlv(skb, phy);
+ mt7915_mcu_bss_ra_tlv(skb, vif, phy);
+ mt7915_mcu_bss_hw_amsdu_tlv(skb);
+
+ if (vif->bss_conf.he_support)
+ mt7915_mcu_bss_he_tlv(skb, vif, phy);
+
+ if (mvif->omac_idx > HW_BSSID_MAX)
+ mt7915_mcu_bss_ext_tlv(skb, mvif);
+ else
+ mt7915_mcu_bss_sync_tlv(skb, vif);
+ }
+
+ return __mt76_mcu_skb_send_msg(&phy->dev->mt76, skb,
+ MCU_EXT_CMD_BSS_INFO_UPDATE, true);
+}
+
+/** starec & wtbl **/
+static int
+mt7915_mcu_sta_key_tlv(struct sk_buff *skb, struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd)
+{
+ struct sta_rec_sec *sec;
+ struct tlv *tlv;
+ u32 len = sizeof(*sec);
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec));
+
+ sec = (struct sta_rec_sec *)tlv;
+ sec->add = cmd;
+
+ if (cmd == SET_KEY) {
+ struct sec_key *sec_key;
+ u8 cipher;
+
+ cipher = mt7915_mcu_get_cipher(key->cipher);
+ if (cipher == MT_CIPHER_NONE)
+ return -EOPNOTSUPP;
+
+ sec_key = &sec->key[0];
+ sec_key->cipher_len = sizeof(*sec_key);
+ sec_key->key_id = key->keyidx;
+
+ if (cipher == MT_CIPHER_BIP_CMAC_128) {
+ sec_key->cipher_id = MT_CIPHER_AES_CCMP;
+ sec_key->key_len = 16;
+ memcpy(sec_key->key, key->key, 16);
+
+ sec_key = &sec->key[1];
+ sec_key->cipher_id = MT_CIPHER_BIP_CMAC_128;
+ sec_key->cipher_len = sizeof(*sec_key);
+ sec_key->key_len = 16;
+ memcpy(sec_key->key, key->key + 16, 16);
+
+ sec->n_cipher = 2;
+ } else {
+ sec_key->cipher_id = cipher;
+ sec_key->key_len = key->keylen;
+ memcpy(sec_key->key, key->key, key->keylen);
+
+ if (cipher == MT_CIPHER_TKIP) {
+ /* Rx/Tx MIC keys are swapped */
+ memcpy(sec_key->key + 16, key->key + 24, 8);
+ memcpy(sec_key->key + 24, key->key + 16, 8);
+ }
+
+ len -= sizeof(*sec_key);
+ sec->n_cipher = 1;
+ }
+ } else {
+ len -= sizeof(sec->key);
+ sec->n_cipher = 0;
+ }
+ sec->len = cpu_to_le16(len);
+
+ return 0;
+}
+
+int mt7915_mcu_add_key(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct mt7915_sta *msta, struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct sk_buff *skb;
+ int len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_sec);
+ int ret;
+
+ skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ ret = mt7915_mcu_sta_key_tlv(skb, key, cmd);
+ if (ret)
+ return ret;
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_STA_REC_UPDATE, true);
+}
+
+static void
+mt7915_mcu_sta_ba_tlv(struct sk_buff *skb,
+ struct ieee80211_ampdu_params *params,
+ bool enable, bool tx)
+{
+ struct sta_rec_ba *ba;
+ struct tlv *tlv;
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_BA, sizeof(*ba));
+
+ ba = (struct sta_rec_ba *)tlv;
+ ba->ba_type = tx ? MT_BA_TYPE_ORIGINATOR : MT_BA_TYPE_RECIPIENT,
+ ba->winsize = cpu_to_le16(params->buf_size);
+ ba->ssn = cpu_to_le16(params->ssn);
+ ba->ba_en = enable << params->tid;
+ ba->amsdu = params->amsdu;
+ ba->tid = params->tid;
+}
+
+static void
+mt7915_mcu_wtbl_ba_tlv(struct sk_buff *skb,
+ struct ieee80211_ampdu_params *params,
+ bool enable, bool tx, void *sta_wtbl,
+ void *wtbl_tlv)
+{
+ struct wtbl_ba *ba;
+ struct tlv *tlv;
+
+ tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_BA, sizeof(*ba),
+ wtbl_tlv, sta_wtbl);
+
+ ba = (struct wtbl_ba *)tlv;
+ ba->tid = params->tid;
+
+ if (tx) {
+ ba->ba_type = MT_BA_TYPE_ORIGINATOR;
+ ba->sn = enable ? cpu_to_le16(params->ssn) : 0;
+ ba->ba_en = enable;
+ } else {
+ memcpy(ba->peer_addr, params->sta->addr, ETH_ALEN);
+ ba->ba_type = MT_BA_TYPE_RECIPIENT;
+ ba->rst_ba_tid = params->tid;
+ ba->rst_ba_sel = RST_BA_MAC_TID_MATCH;
+ ba->rst_ba_sb = 1;
+ }
+
+ if (enable && tx)
+ ba->ba_winsize = cpu_to_le16(params->buf_size);
+}
+
+static int
+mt7915_mcu_sta_ba(struct mt7915_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable, bool tx)
+{
+ struct mt7915_sta *msta = (struct mt7915_sta *)params->sta->drv_priv;
+ struct mt7915_vif *mvif = msta->vif;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct tlv *sta_wtbl;
+ struct sk_buff *skb;
+ int ret;
+
+ if (enable && tx && !params->amsdu)
+ msta->wcid.amsdu = false;
+
+ skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
+ MT7915_STA_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+
+ wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
+ &skb);
+ mt7915_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr);
+
+ ret = __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_STA_REC_UPDATE, true);
+ if (ret)
+ return ret;
+
+ skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
+ MT7915_STA_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7915_mcu_sta_ba_tlv(skb, params, enable, tx);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_STA_REC_UPDATE, true);
+}
+
+int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ return mt7915_mcu_sta_ba(dev, params, enable, true);
+}
+
+int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ return mt7915_mcu_sta_ba(dev, params, enable, false);
+}
+
+static void
+mt7915_mcu_wtbl_generic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, void *sta_wtbl,
+ void *wtbl_tlv)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct wtbl_generic *generic;
+ struct wtbl_rx *rx;
+ struct tlv *tlv;
+
+ tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_GENERIC, sizeof(*generic),
+ wtbl_tlv, sta_wtbl);
+
+ generic = (struct wtbl_generic *)tlv;
+
+ if (sta) {
+ if (vif->type == NL80211_IFTYPE_STATION)
+ generic->partial_aid = cpu_to_le16(vif->bss_conf.aid);
+ else
+ generic->partial_aid = cpu_to_le16(sta->aid);
+ memcpy(generic->peer_addr, sta->addr, ETH_ALEN);
+ generic->muar_idx = mvif->omac_idx;
+ generic->qos = sta->wme;
+ } else {
+ /* use BSSID in station mode */
+ if (vif->type == NL80211_IFTYPE_STATION)
+ memcpy(generic->peer_addr, vif->bss_conf.bssid,
+ ETH_ALEN);
+ else
+ eth_broadcast_addr(generic->peer_addr);
+
+ generic->muar_idx = 0xe;
+ }
+
+ tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_RX, sizeof(*rx),
+ wtbl_tlv, sta_wtbl);
+
+ rx = (struct wtbl_rx *)tlv;
+ rx->rca1 = sta ? vif->type != NL80211_IFTYPE_AP : 1;
+ rx->rca2 = 1;
+ rx->rv = 1;
+}
+
+static void
+mt7915_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+{
+#define EXTRA_INFO_VER BIT(0)
+#define EXTRA_INFO_NEW BIT(1)
+ struct sta_rec_basic *basic;
+ struct tlv *tlv;
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_BASIC, sizeof(*basic));
+
+ basic = (struct sta_rec_basic *)tlv;
+ basic->extra_info = cpu_to_le16(EXTRA_INFO_VER);
+
+ if (enable) {
+ basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
+ basic->conn_state = CONN_STATE_PORT_SECURE;
+ } else {
+ basic->conn_state = CONN_STATE_DISCONNECT;
+ }
+
+ if (!sta) {
+ basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
+ eth_broadcast_addr(basic->peer_addr);
+ return;
+ }
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ basic->conn_type = cpu_to_le32(CONNECTION_INFRA_STA);
+ basic->aid = cpu_to_le16(sta->aid);
+ break;
+ case NL80211_IFTYPE_STATION:
+ basic->conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
+ basic->aid = cpu_to_le16(vif->bss_conf.aid);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
+ basic->aid = cpu_to_le16(sta->aid);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ memcpy(basic->peer_addr, sta->addr, ETH_ALEN);
+ basic->qos = sta->wme;
+}
+
+static void
+mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+{
+ struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
+ struct sta_rec_he *he;
+ struct tlv *tlv;
+ u32 cap = 0;
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_HE, sizeof(*he));
+
+ he = (struct sta_rec_he *)tlv;
+
+ if (elem->mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_HTC_HE)
+ cap |= STA_REC_HE_CAP_HTC;
+
+ if (elem->mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
+ cap |= STA_REC_HE_CAP_BSR;
+
+ if (elem->mac_cap_info[3] & IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
+ cap |= STA_REC_HE_CAP_OM;
+
+ if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU)
+ cap |= STA_REC_HE_CAP_AMSDU_IN_AMPDU;
+
+ if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
+ cap |= STA_REC_HE_CAP_BQR;
+
+ if (elem->phy_cap_info[0] &
+ (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G))
+ cap |= STA_REC_HE_CAP_BW20_RU242_SUPPORT;
+
+ if (elem->phy_cap_info[1] &
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)
+ cap |= STA_REC_HE_CAP_LDPC;
+
+ if (elem->phy_cap_info[1] &
+ IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US)
+ cap |= STA_REC_HE_CAP_SU_PPDU_1LTF_8US_GI;
+
+ if (elem->phy_cap_info[2] &
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US)
+ cap |= STA_REC_HE_CAP_NDP_4LTF_3DOT2MS_GI;
+
+ if (elem->phy_cap_info[2] &
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ)
+ cap |= STA_REC_HE_CAP_LE_EQ_80M_TX_STBC;
+
+ if (elem->phy_cap_info[2] &
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
+ cap |= STA_REC_HE_CAP_LE_EQ_80M_RX_STBC;
+
+ if (elem->phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE)
+ cap |= STA_REC_HE_CAP_PARTIAL_BW_EXT_RANGE;
+
+ if (elem->phy_cap_info[7] &
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI)
+ cap |= STA_REC_HE_CAP_SU_MU_PPDU_4LTF_8US_GI;
+
+ if (elem->phy_cap_info[7] &
+ IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ)
+ cap |= STA_REC_HE_CAP_GT_80M_TX_STBC;
+
+ if (elem->phy_cap_info[7] &
+ IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ)
+ cap |= STA_REC_HE_CAP_GT_80M_RX_STBC;
+
+ if (elem->phy_cap_info[8] &
+ IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI)
+ cap |= STA_REC_HE_CAP_ER_SU_PPDU_4LTF_8US_GI;
+
+ if (elem->phy_cap_info[8] &
+ IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI)
+ cap |= STA_REC_HE_CAP_ER_SU_PPDU_1LTF_8US_GI;
+
+ if (elem->phy_cap_info[9] &
+ IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK)
+ cap |= STA_REC_HE_CAP_TRIG_CQI_FK;
+
+ if (elem->phy_cap_info[9] &
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU)
+ cap |= STA_REC_HE_CAP_TX_1024QAM_UNDER_RU242;
+
+ if (elem->phy_cap_info[9] &
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU)
+ cap |= STA_REC_HE_CAP_RX_1024QAM_UNDER_RU242;
+
+ he->he_cap = cpu_to_le32(cap);
+
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ if (elem->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ he->max_nss_mcs[CMD_HE_MCS_BW8080] =
+ he_cap->he_mcs_nss_supp.rx_mcs_80p80;
+
+ he->max_nss_mcs[CMD_HE_MCS_BW160] =
+ he_cap->he_mcs_nss_supp.rx_mcs_160;
+ fallthrough;
+ default:
+ he->max_nss_mcs[CMD_HE_MCS_BW80] =
+ he_cap->he_mcs_nss_supp.rx_mcs_80;
+ break;
+ }
+
+ he->t_frame_dur =
+ HE_MAC(CAP1_TF_MAC_PAD_DUR_MASK, elem->mac_cap_info[1]);
+ he->max_ampdu_exp =
+ HE_MAC(CAP3_MAX_AMPDU_LEN_EXP_MASK, elem->mac_cap_info[3]);
+
+ he->bw_set =
+ HE_PHY(CAP0_CHANNEL_WIDTH_SET_MASK, elem->phy_cap_info[0]);
+ he->device_class =
+ HE_PHY(CAP1_DEVICE_CLASS_A, elem->phy_cap_info[1]);
+ he->punc_pream_rx =
+ HE_PHY(CAP1_PREAMBLE_PUNC_RX_MASK, elem->phy_cap_info[1]);
+
+ he->dcm_tx_mode =
+ HE_PHY(CAP3_DCM_MAX_CONST_TX_MASK, elem->phy_cap_info[3]);
+ he->dcm_tx_max_nss =
+ HE_PHY(CAP3_DCM_MAX_TX_NSS_2, elem->phy_cap_info[3]);
+ he->dcm_rx_mode =
+ HE_PHY(CAP3_DCM_MAX_CONST_RX_MASK, elem->phy_cap_info[3]);
+ he->dcm_rx_max_nss =
+ HE_PHY(CAP3_DCM_MAX_RX_NSS_2, elem->phy_cap_info[3]);
+ he->dcm_rx_max_nss =
+ HE_PHY(CAP8_DCM_MAX_RU_MASK, elem->phy_cap_info[8]);
+
+ he->pkt_ext = 2;
+}
+
+static void
+mt7915_mcu_sta_uapsd_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif)
+{
+ struct sta_rec_uapsd *uapsd;
+ struct tlv *tlv;
+
+ if (vif->type != NL80211_IFTYPE_AP || !sta->wme)
+ return;
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_APPS, sizeof(*uapsd));
+ uapsd = (struct sta_rec_uapsd *)tlv;
+
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) {
+ uapsd->dac_map |= BIT(3);
+ uapsd->tac_map |= BIT(3);
+ }
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) {
+ uapsd->dac_map |= BIT(2);
+ uapsd->tac_map |= BIT(2);
+ }
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) {
+ uapsd->dac_map |= BIT(1);
+ uapsd->tac_map |= BIT(1);
+ }
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) {
+ uapsd->dac_map |= BIT(0);
+ uapsd->tac_map |= BIT(0);
+ }
+ uapsd->max_sp = sta->max_sp;
+}
+
+static void
+mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+{
+ struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
+ struct sta_rec_muru *muru;
+ struct tlv *tlv;
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_MURU, sizeof(*muru));
+
+ muru = (struct sta_rec_muru *)tlv;
+ muru->cfg.ofdma_dl_en = true;
+ muru->cfg.ofdma_ul_en = true;
+ muru->cfg.mimo_dl_en = true;
+ muru->cfg.mimo_ul_en = true;
+
+ muru->ofdma_dl.punc_pream_rx =
+ HE_PHY(CAP1_PREAMBLE_PUNC_RX_MASK, elem->phy_cap_info[1]);
+ muru->ofdma_dl.he_20m_in_40m_2g =
+ HE_PHY(CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G, elem->phy_cap_info[8]);
+ muru->ofdma_dl.he_20m_in_160m =
+ HE_PHY(CAP8_20MHZ_IN_160MHZ_HE_PPDU, elem->phy_cap_info[8]);
+ muru->ofdma_dl.he_80m_in_160m =
+ HE_PHY(CAP8_80MHZ_IN_160MHZ_HE_PPDU, elem->phy_cap_info[8]);
+ muru->ofdma_dl.lt16_sigb = 0;
+ muru->ofdma_dl.rx_su_comp_sigb = 0;
+ muru->ofdma_dl.rx_su_non_comp_sigb = 0;
+
+ muru->ofdma_ul.t_frame_dur =
+ HE_MAC(CAP1_TF_MAC_PAD_DUR_MASK, elem->mac_cap_info[1]);
+ muru->ofdma_ul.mu_cascading =
+ HE_MAC(CAP2_MU_CASCADING, elem->mac_cap_info[2]);
+ muru->ofdma_ul.uo_ra =
+ HE_MAC(CAP3_OFDMA_RA, elem->mac_cap_info[3]);
+ muru->ofdma_ul.he_2x996_tone = 0;
+ muru->ofdma_ul.rx_t_frame_11ac = 0;
+
+ muru->mimo_dl.vht_mu_bfee =
+ !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+ muru->mimo_dl.partial_bw_dl_mimo =
+ HE_PHY(CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO, elem->phy_cap_info[6]);
+
+ muru->mimo_ul.full_ul_mimo =
+ HE_PHY(CAP2_UL_MU_FULL_MU_MIMO, elem->phy_cap_info[2]);
+ muru->mimo_ul.partial_ul_mimo =
+ HE_PHY(CAP2_UL_MU_PARTIAL_MU_MIMO, elem->phy_cap_info[2]);
+}
+
+static int
+mt7915_mcu_add_mu(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct sk_buff *skb;
+ int len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_muru);
+
+ if (!sta->vht_cap.vht_supported && !sta->he_cap.has_he)
+ return 0;
+
+ skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ /* starec muru */
+ mt7915_mcu_sta_muru_tlv(skb, sta);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_STA_REC_UPDATE, true);
+}
+
+static void
+mt7915_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+{
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct sta_rec_amsdu *amsdu;
+ struct tlv *tlv;
+
+ if (!sta->max_amsdu_len)
+ return;
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu));
+ amsdu = (struct sta_rec_amsdu *)tlv;
+ amsdu->max_amsdu_num = 8;
+ amsdu->amsdu_en = true;
+ amsdu->max_mpdu_size = sta->max_amsdu_len >=
+ IEEE80211_MAX_MPDU_LEN_VHT_7991;
+ msta->wcid.amsdu = true;
+}
+
+static bool
+mt7915_hw_amsdu_supported(struct ieee80211_vif *vif)
+{
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_STATION:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void
+mt7915_mcu_sta_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
+ struct ieee80211_sta *sta, struct ieee80211_vif *vif)
+{
+ struct tlv *tlv;
+
+ /* starec ht */
+ if (sta->ht_cap.ht_supported) {
+ struct sta_rec_ht *ht;
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
+ ht = (struct sta_rec_ht *)tlv;
+ ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
+
+ if (mt7915_hw_amsdu_supported(vif))
+ mt7915_mcu_sta_amsdu_tlv(skb, sta);
+ }
+
+ /* starec vht */
+ if (sta->vht_cap.vht_supported) {
+ struct sta_rec_vht *vht;
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
+ vht = (struct sta_rec_vht *)tlv;
+ vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
+ vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map;
+ vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map;
+ }
+
+ /* starec he */
+ if (sta->he_cap.has_he)
+ mt7915_mcu_sta_he_tlv(skb, sta);
+
+ /* starec uapsd */
+ mt7915_mcu_sta_uapsd_tlv(skb, sta, vif);
+}
+
+static void
+mt7915_mcu_wtbl_smps_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
+ void *sta_wtbl, void *wtbl_tlv)
+{
+ struct wtbl_smps *smps;
+ struct tlv *tlv;
+
+ tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_SMPS, sizeof(*smps),
+ wtbl_tlv, sta_wtbl);
+ smps = (struct wtbl_smps *)tlv;
+
+ if (sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+ smps->smps = true;
+}
+
+static void
+mt7915_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
+ void *sta_wtbl, void *wtbl_tlv)
+{
+ struct wtbl_ht *ht = NULL;
+ struct tlv *tlv;
+
+ /* wtbl ht */
+ if (sta->ht_cap.ht_supported) {
+ tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht),
+ wtbl_tlv, sta_wtbl);
+ ht = (struct wtbl_ht *)tlv;
+ ht->ldpc = sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING;
+ ht->af = sta->ht_cap.ampdu_factor;
+ ht->mm = sta->ht_cap.ampdu_density;
+ ht->ht = true;
+ }
+
+ /* wtbl vht */
+ if (sta->vht_cap.vht_supported) {
+ struct wtbl_vht *vht;
+ u8 af;
+
+ tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_VHT, sizeof(*vht),
+ wtbl_tlv, sta_wtbl);
+ vht = (struct wtbl_vht *)tlv;
+ vht->ldpc = sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC,
+ vht->vht = true;
+
+ af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
+ sta->vht_cap.cap);
+ if (ht)
+ ht->af = max_t(u8, ht->af, af);
+ }
+
+ mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_tlv);
+}
+
+int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct tlv *sta_wtbl;
+ struct sk_buff *skb;
+
+ skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
+ MT7915_STA_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+
+ wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
+ &skb);
+ mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_STA_REC_UPDATE, true);
+}
+
+static void
+mt7915_mcu_sta_sounding_rate(struct sta_rec_bf *bf)
+{
+ bf->sounding_phy = MT_PHY_TYPE_OFDM;
+ bf->ndp_rate = 0; /* mcs0 */
+ bf->ndpa_rate = MT7915_CFEND_RATE_DEFAULT; /* ofdm 24m */
+ bf->rept_poll_rate = MT7915_CFEND_RATE_DEFAULT; /* ofdm 24m */
+}
+
+static void
+mt7915_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct sta_rec_bf *bf)
+{
+ struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs;
+ u8 n = 0;
+
+ bf->tx_mode = MT_PHY_TYPE_HT;
+ bf->bf_cap |= MT_IBF;
+
+ if (mcs->tx_params & IEEE80211_HT_MCS_TX_RX_DIFF &&
+ (mcs->tx_params & IEEE80211_HT_MCS_TX_DEFINED))
+ n = FIELD_GET(IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK,
+ mcs->tx_params);
+ else if (mcs->rx_mask[3])
+ n = 3;
+ else if (mcs->rx_mask[2])
+ n = 2;
+ else if (mcs->rx_mask[1])
+ n = 1;
+
+ bf->nc = min_t(u8, bf->nr, n);
+ bf->ibf_ncol = bf->nc;
+
+ if (sta->bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->nc)
+ bf->ibf_timeout = 0x48;
+}
+
+static void
+mt7915_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7915_phy *phy,
+ struct sta_rec_bf *bf)
+{
+ struct ieee80211_sta_vht_cap *pc = &sta->vht_cap;
+ struct ieee80211_sta_vht_cap *vc = &phy->mt76->sband_5g.sband.vht_cap;
+ u8 bfee_nr, bfer_nr, n, tx_ant = hweight8(phy->chainmask) - 1;
+ u16 mcs_map;
+
+ bf->tx_mode = MT_PHY_TYPE_VHT;
+ bf->bf_cap |= MT_EBF;
+
+ mt7915_mcu_sta_sounding_rate(bf);
+
+ bfee_nr = FIELD_GET(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK,
+ pc->cap);
+ bfer_nr = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
+ vc->cap);
+ mcs_map = le16_to_cpu(pc->vht_mcs.rx_mcs_map);
+
+ n = min_t(u8, bfer_nr, bfee_nr);
+ bf->nr = min_t(u8, n, tx_ant);
+ n = mt7915_mcu_get_sta_nss(mcs_map);
+
+ bf->nc = min_t(u8, n, bf->nr);
+ bf->ibf_ncol = bf->nc;
+
+ /* force nr from 4 to 2 */
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ bf->nr = 1;
+}
+
+static void
+mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
+ struct mt7915_phy *phy, struct sta_rec_bf *bf)
+{
+ struct ieee80211_sta_he_cap *pc = &sta->he_cap;
+ struct ieee80211_he_cap_elem *pe = &pc->he_cap_elem;
+ const struct ieee80211_he_cap_elem *ve;
+ const struct ieee80211_sta_he_cap *vc;
+ u8 bfee_nr, bfer_nr, nss_mcs;
+ u16 mcs_map;
+
+ vc = mt7915_get_he_phy_cap(phy, vif);
+ ve = &vc->he_cap_elem;
+
+ bf->tx_mode = MT_PHY_TYPE_HE_SU;
+ bf->bf_cap |= MT_EBF;
+
+ mt7915_mcu_sta_sounding_rate(bf);
+
+ bf->trigger_su = HE_PHY(CAP6_TRIG_SU_BEAMFORMER_FB,
+ pe->phy_cap_info[6]);
+ bf->trigger_mu = HE_PHY(CAP6_TRIG_MU_BEAMFORMER_FB,
+ pe->phy_cap_info[6]);
+ bfer_nr = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
+ ve->phy_cap_info[5]);
+ bfee_nr = HE_PHY(CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK,
+ pe->phy_cap_info[4]);
+
+ mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.tx_mcs_80);
+ nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
+
+ bf->nr = min_t(u8, bfer_nr, bfee_nr);
+ bf->nc = min_t(u8, nss_mcs, bf->nr);
+ bf->ibf_ncol = bf->nc;
+
+ if (sta->bandwidth != IEEE80211_STA_RX_BW_160)
+ return;
+
+ /* go over for 160MHz and 80p80 */
+ if (pe->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) {
+ mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_160);
+ nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
+
+ bf->nc_bw160 = nss_mcs;
+ }
+
+ if (pe->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
+ mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_80p80);
+ nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
+
+ if (bf->nc_bw160)
+ bf->nc_bw160 = min_t(u8, bf->nc_bw160, nss_mcs);
+ else
+ bf->nc_bw160 = nss_mcs;
+ }
+
+ bfer_nr = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK,
+ ve->phy_cap_info[5]);
+ bfee_nr = HE_PHY(CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK,
+ pe->phy_cap_info[4]);
+
+ bf->nr_bw160 = min_t(int, bfer_nr, bfee_nr);
+}
+
+static void
+mt7915_mcu_sta_bfer_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif, struct mt7915_phy *phy,
+ bool enable)
+{
+ struct sta_rec_bf *bf;
+ struct tlv *tlv;
+ int tx_ant = hweight8(phy->chainmask) - 1;
+ const u8 matrix[4][4] = {
+ {0, 0, 0, 0},
+ {1, 1, 0, 0}, /* 2x1, 2x2, 2x3, 2x4 */
+ {2, 4, 4, 0}, /* 3x1, 3x2, 3x3, 3x4 */
+ {3, 5, 6, 0} /* 4x1, 4x2, 4x3, 4x4 */
+ };
+
+#define MT_BFER_FREE cpu_to_le16(GENMASK(15, 0))
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf));
+ bf = (struct sta_rec_bf *)tlv;
+
+ if (!enable) {
+ bf->pfmu = MT_BFER_FREE;
+ return;
+ }
+
+ bf->bw = sta->bandwidth;
+ bf->ibf_dbw = sta->bandwidth;
+ bf->ibf_nrow = tx_ant;
+ bf->ibf_timeout = 0x18;
+
+ if (sta->he_cap.has_he)
+ mt7915_mcu_sta_bfer_he(sta, vif, phy, bf);
+ else if (sta->vht_cap.vht_supported)
+ mt7915_mcu_sta_bfer_vht(sta, phy, bf);
+ else if (sta->ht_cap.ht_supported)
+ mt7915_mcu_sta_bfer_ht(sta, bf);
+
+ if (bf->bf_cap & MT_EBF && bf->nr != tx_ant)
+ bf->mem_20m = matrix[tx_ant][bf->nc];
+ else
+ bf->mem_20m = matrix[bf->nr][bf->nc];
+
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ case IEEE80211_STA_RX_BW_80:
+ bf->mem_total = bf->mem_20m * 2;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ bf->mem_total = bf->mem_20m;
+ break;
+ case IEEE80211_STA_RX_BW_20:
+ default:
+ break;
+ }
+}
+
+static void
+mt7915_mcu_sta_bfee_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
+ struct mt7915_phy *phy)
+{
+ struct sta_rec_bfee *bfee;
+ struct tlv *tlv;
+ int tx_ant = hweight8(phy->chainmask) - 1;
+ u8 nr = 0;
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_BFEE, sizeof(*bfee));
+ bfee = (struct sta_rec_bfee *)tlv;
+
+ if (sta->he_cap.has_he) {
+ struct ieee80211_he_cap_elem *pe = &sta->he_cap.he_cap_elem;
+
+ nr = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
+ pe->phy_cap_info[5]);
+ } else if (sta->vht_cap.vht_supported) {
+ struct ieee80211_sta_vht_cap *pc = &sta->vht_cap;
+
+ nr = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
+ pc->cap);
+ }
+
+ /* reply with identity matrix to avoid 2x2 BF negative gain */
+ if (nr == 1 && tx_ant == 2)
+ bfee->fb_identity_matrix = true;
+}
+
+static u8
+mt7915_mcu_sta_txbf_type(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ u8 type = 0;
+
+ if (vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_AP)
+ return 0;
+
+ if (sta->he_cap.has_he) {
+ struct ieee80211_he_cap_elem *pe;
+ const struct ieee80211_he_cap_elem *ve;
+ const struct ieee80211_sta_he_cap *vc;
+
+ pe = &sta->he_cap.he_cap_elem;
+ vc = mt7915_get_he_phy_cap(phy, vif);
+ ve = &vc->he_cap_elem;
+
+ if ((HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]) ||
+ HE_PHY(CAP4_MU_BEAMFORMER, pe->phy_cap_info[4])) &&
+ HE_PHY(CAP4_SU_BEAMFORMEE, ve->phy_cap_info[4]))
+ type |= MT_STA_BFEE;
+
+ if ((HE_PHY(CAP3_SU_BEAMFORMER, ve->phy_cap_info[3]) ||
+ HE_PHY(CAP4_MU_BEAMFORMER, ve->phy_cap_info[4])) &&
+ HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4]))
+ type |= MT_STA_BFER;
+ } else if (sta->vht_cap.vht_supported) {
+ struct ieee80211_sta_vht_cap *pc;
+ struct ieee80211_sta_vht_cap *vc;
+ u32 cr, ce;
+
+ pc = &sta->vht_cap;
+ vc = &phy->mt76->sband_5g.sband.vht_cap;
+ cr = IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE;
+ ce = IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+
+ if ((pc->cap & cr) && (vc->cap & ce))
+ type |= MT_STA_BFEE;
+
+ if ((vc->cap & cr) && (pc->cap & ce))
+ type |= MT_STA_BFER;
+ } else if (sta->ht_cap.ht_supported) {
+ /* TODO: iBF */
+ }
+
+ return type;
+}
+
+static int
+mt7915_mcu_add_txbf(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_phy *phy;
+ struct sk_buff *skb;
+ int r, len;
+ u8 type;
+
+ phy = mvif->band_idx ? mt7915_ext_phy(dev) : &dev->phy;
+
+ type = mt7915_mcu_sta_txbf_type(phy, vif, sta);
+
+ /* must keep each tag independent */
+
+ /* starec bf */
+ if (type & MT_STA_BFER) {
+ len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_bf);
+
+ skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7915_mcu_sta_bfer_tlv(skb, sta, vif, phy, enable);
+
+ r = __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_STA_REC_UPDATE, true);
+ if (r)
+ return r;
+ }
+
+ /* starec bfee */
+ if (type & MT_STA_BFEE) {
+ len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_bfee);
+
+ skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7915_mcu_sta_bfee_tlv(skb, sta, phy);
+
+ r = __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_STA_REC_UPDATE, true);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static void
+mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct cfg80211_chan_def *chandef = &dev->mphy.chandef;
+ struct sta_rec_ra *ra;
+ struct tlv *tlv;
+ enum nl80211_band band = chandef->chan->band;
+ u32 supp_rate = sta->supp_rates[band];
+ int n_rates = hweight32(supp_rate);
+ u32 cap = sta->wme ? STA_CAP_WMM : 0;
+ u8 i, nss = sta->rx_nss, mcs = 0;
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra));
+
+ ra = (struct sta_rec_ra *)tlv;
+ ra->valid = true;
+ ra->auto_rate = true;
+ ra->phy_mode = mt7915_get_phy_mode(dev, vif, band, sta);
+ ra->channel = chandef->chan->hw_value;
+ ra->bw = sta->bandwidth;
+ ra->rate_len = n_rates;
+ ra->phy.bw = sta->bandwidth;
+
+ if (n_rates) {
+ if (band == NL80211_BAND_2GHZ) {
+ ra->supp_mode = MODE_CCK;
+ ra->supp_cck_rate = supp_rate & GENMASK(3, 0);
+ ra->phy.type = MT_PHY_TYPE_CCK;
+
+ if (n_rates > 4) {
+ ra->supp_mode |= MODE_OFDM;
+ ra->supp_ofdm_rate = supp_rate >> 4;
+ ra->phy.type = MT_PHY_TYPE_OFDM;
+ }
+ } else {
+ ra->supp_mode = MODE_OFDM;
+ ra->supp_ofdm_rate = supp_rate;
+ ra->phy.type = MT_PHY_TYPE_OFDM;
+ }
+ }
+
+ if (sta->ht_cap.ht_supported) {
+ for (i = 0; i < nss; i++)
+ ra->ht_mcs[i] = sta->ht_cap.mcs.rx_mask[i];
+
+ ra->supp_ht_mcs = *(__le32 *)ra->ht_mcs;
+ ra->supp_mode |= MODE_HT;
+ mcs = hweight32(le32_to_cpu(ra->supp_ht_mcs)) - 1;
+ ra->af = sta->ht_cap.ampdu_factor;
+ ra->ht_gf = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
+
+ cap |= STA_CAP_HT;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
+ cap |= STA_CAP_SGI_20;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ cap |= STA_CAP_SGI_40;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC)
+ cap |= STA_CAP_TX_STBC;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
+ cap |= STA_CAP_RX_STBC;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
+ cap |= STA_CAP_LDPC;
+ }
+
+ if (sta->vht_cap.vht_supported) {
+ u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map);
+ u16 vht_mcs;
+ u8 af, mcs_prev;
+
+ af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
+ sta->vht_cap.cap);
+ ra->af = max_t(u8, ra->af, af);
+
+ cap |= STA_CAP_VHT;
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+ cap |= STA_CAP_VHT_SGI_80;
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+ cap |= STA_CAP_VHT_SGI_160;
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_TXSTBC)
+ cap |= STA_CAP_VHT_TX_STBC;
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1)
+ cap |= STA_CAP_VHT_RX_STBC;
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
+ cap |= STA_CAP_VHT_LDPC;
+
+ ra->supp_mode |= MODE_VHT;
+ for (mcs = 0, i = 0; i < nss; i++, mcs_map >>= 2) {
+ switch (mcs_map & 0x3) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_9:
+ vht_mcs = GENMASK(9, 0);
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8:
+ vht_mcs = GENMASK(8, 0);
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_7:
+ vht_mcs = GENMASK(7, 0);
+ break;
+ default:
+ vht_mcs = 0;
+ }
+
+ ra->supp_vht_mcs[i] = cpu_to_le16(vht_mcs);
+
+ mcs_prev = hweight16(vht_mcs) - 1;
+ if (mcs_prev > mcs)
+ mcs = mcs_prev;
+
+ /* only support 2ss on 160MHz */
+ if (i > 1 && (ra->bw == CMD_CBW_160MHZ ||
+ ra->bw == CMD_CBW_8080MHZ))
+ break;
+ }
+ }
+
+ if (sta->he_cap.has_he) {
+ ra->supp_mode |= MODE_HE;
+ cap |= STA_CAP_HE;
+ }
+
+ ra->sta_status = cpu_to_le32(cap);
+
+ switch (BIT(fls(ra->supp_mode) - 1)) {
+ case MODE_VHT:
+ ra->phy.type = MT_PHY_TYPE_VHT;
+ ra->phy.mcs = mcs;
+ ra->phy.nss = nss;
+ ra->phy.stbc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_TXSTBC);
+ ra->phy.ldpc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
+ ra->phy.sgi =
+ !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
+ break;
+ case MODE_HT:
+ ra->phy.type = MT_PHY_TYPE_HT;
+ ra->phy.mcs = mcs;
+ ra->phy.ldpc = sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING;
+ ra->phy.stbc = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC);
+ ra->phy.sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
+ break;
+ default:
+ break;
+ }
+}
+
+int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct sk_buff *skb;
+ int len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_ra);
+
+ skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7915_mcu_sta_rate_ctrl_tlv(skb, dev, vif, sta);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_STA_REC_UPDATE, true);
+}
+
+static int
+mt7915_mcu_add_group(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+#define MT_STA_BSS_GROUP 1
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct {
+ __le32 action;
+ u8 wlan_idx_lo;
+ u8 status;
+ u8 wlan_idx_hi;
+ u8 rsv0[5];
+ __le32 val;
+ u8 rsv1[8];
+ } __packed req = {
+ .action = cpu_to_le32(MT_STA_BSS_GROUP),
+ .wlan_idx_lo = to_wcid_lo(msta->wcid.idx),
+ .wlan_idx_hi = to_wcid_hi(msta->wcid.idx),
+ .val = cpu_to_le32(mvif->idx),
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_DRR_CTRL,
+ &req, sizeof(req), true);
+}
+
+int mt7915_mcu_add_sta_adv(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+{
+ int ret;
+
+ if (!sta)
+ return 0;
+
+ /* must keep the order */
+ ret = mt7915_mcu_add_group(dev, vif, sta);
+ if (ret)
+ return ret;
+
+ ret = mt7915_mcu_add_txbf(dev, vif, sta, enable);
+ if (ret)
+ return ret;
+
+ ret = mt7915_mcu_add_mu(dev, vif, sta);
+ if (ret)
+ return ret;
+
+ if (enable)
+ return mt7915_mcu_add_rate_ctrl(dev, vif, sta);
+
+ return 0;
+}
+
+int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct mt7915_sta *msta;
+ struct tlv *sta_wtbl;
+ struct sk_buff *skb;
+
+ msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta;
+
+ skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
+ MT7915_STA_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7915_mcu_sta_basic_tlv(skb, vif, sta, enable);
+ if (enable && sta)
+ mt7915_mcu_sta_tlv(dev, skb, sta, vif);
+
+ sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+
+ wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET,
+ sta_wtbl, &skb);
+ if (enable) {
+ mt7915_mcu_wtbl_generic_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr);
+ if (sta)
+ mt7915_mcu_wtbl_ht_tlv(skb, sta, sta_wtbl, wtbl_hdr);
+ }
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_STA_REC_UPDATE, true);
+}
+
+int mt7915_mcu_set_fixed_rate(struct mt7915_dev *dev,
+ struct ieee80211_sta *sta, u32 rate)
+{
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_vif *mvif = msta->vif;
+ struct sta_rec_ra_fixed *ra;
+ struct sk_buff *skb;
+ struct tlv *tlv;
+ int len = sizeof(struct sta_req_hdr) + sizeof(*ra);
+
+ skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_RA_UPDATE, sizeof(*ra));
+ ra = (struct sta_rec_ra_fixed *)tlv;
+
+ if (!rate) {
+ ra->field = cpu_to_le32(RATE_PARAM_AUTO);
+ goto out;
+ } else {
+ ra->field = cpu_to_le32(RATE_PARAM_FIXED);
+ }
+
+ ra->phy.type = FIELD_GET(RATE_CFG_PHY_TYPE, rate);
+ ra->phy.bw = FIELD_GET(RATE_CFG_BW, rate);
+ ra->phy.nss = FIELD_GET(RATE_CFG_NSS, rate);
+ ra->phy.mcs = FIELD_GET(RATE_CFG_MCS, rate);
+ ra->phy.stbc = FIELD_GET(RATE_CFG_STBC, rate);
+
+ if (ra->phy.bw)
+ ra->phy.ldpc = 7;
+ else
+ ra->phy.ldpc = FIELD_GET(RATE_CFG_LDPC, rate) * 7;
+
+ /* HT/VHT - SGI: 1, LGI: 0; HE - SGI: 0, MGI: 1, LGI: 2 */
+ if (ra->phy.type > MT_PHY_TYPE_VHT)
+ ra->phy.sgi = ra->phy.mcs * 85;
+ else
+ ra->phy.sgi = ra->phy.mcs * 15;
+
+out:
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_STA_REC_UPDATE, true);
+}
+
+int mt7915_mcu_add_dev_info(struct mt7915_dev *dev,
+ struct ieee80211_vif *vif, bool enable)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct {
+ struct req_hdr {
+ u8 omac_idx;
+ u8 dbdc_idx;
+ __le16 tlv_num;
+ u8 is_tlv_append;
+ u8 rsv[3];
+ } __packed hdr;
+ struct req_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 active;
+ u8 dbdc_idx;
+ u8 omac_addr[ETH_ALEN];
+ } __packed tlv;
+ } data = {
+ .hdr = {
+ .omac_idx = mvif->omac_idx,
+ .dbdc_idx = mvif->band_idx,
+ .tlv_num = cpu_to_le16(1),
+ .is_tlv_append = 1,
+ },
+ .tlv = {
+ .tag = cpu_to_le16(DEV_INFO_ACTIVE),
+ .len = cpu_to_le16(sizeof(struct req_tlv)),
+ .active = enable,
+ .dbdc_idx = mvif->band_idx,
+ },
+ };
+
+ memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN);
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_DEV_INFO_UPDATE,
+ &data, sizeof(data), true);
+}
+
+static void
+mt7915_mcu_beacon_csa(struct sk_buff *rskb, struct sk_buff *skb,
+ struct bss_info_bcn *bcn,
+ struct ieee80211_mutable_offsets *offs)
+{
+ if (offs->cntdwn_counter_offs[0]) {
+ struct tlv *tlv;
+ struct bss_info_bcn_csa *csa;
+
+ tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_CSA,
+ sizeof(*csa), &bcn->sub_ntlv,
+ &bcn->len);
+ csa = (struct bss_info_bcn_csa *)tlv;
+ csa->cnt = skb->data[offs->cntdwn_counter_offs[0]];
+ }
+}
+
+static void
+mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct sk_buff *rskb,
+ struct sk_buff *skb, struct bss_info_bcn *bcn,
+ struct ieee80211_mutable_offsets *offs)
+{
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ struct bss_info_bcn_cont *cont;
+ struct tlv *tlv;
+ u8 *buf;
+ int len = sizeof(*cont) + MT_TXD_SIZE + skb->len;
+
+ tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_CONTENT,
+ len, &bcn->sub_ntlv, &bcn->len);
+
+ cont = (struct bss_info_bcn_cont *)tlv;
+ cont->pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
+ cont->tim_ofs = cpu_to_le16(offs->tim_offset);
+
+ if (offs->cntdwn_counter_offs[0])
+ cont->csa_ofs = cpu_to_le16(offs->cntdwn_counter_offs[0] - 4);
+
+ buf = (u8 *)tlv + sizeof(*cont);
+ mt7915_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, NULL,
+ true);
+ memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
+}
+
+int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int en)
+{
+#define MAX_BEACON_SIZE 512
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct ieee80211_mutable_offsets offs;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb, *rskb;
+ struct tlv *tlv;
+ struct bss_info_bcn *bcn;
+ int len = MT7915_BEACON_UPDATE_SIZE + MAX_BEACON_SIZE;
+
+ skb = ieee80211_beacon_get_template(hw, vif, &offs);
+ if (!skb)
+ return -EINVAL;
+
+ if (skb->len > MAX_BEACON_SIZE - MT_TXD_SIZE) {
+ dev_err(dev->mt76.dev, "Bcn size limit exceed\n");
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len);
+ if (IS_ERR(rskb)) {
+ dev_kfree_skb(skb);
+ return PTR_ERR(rskb);
+ }
+
+ tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
+ bcn = (struct bss_info_bcn *)tlv;
+ bcn->enable = en;
+
+ if (mvif->band_idx) {
+ info = IEEE80211_SKB_CB(skb);
+ info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
+ }
+
+ /* TODO: subtag - bss color count & 11v MBSSID */
+ mt7915_mcu_beacon_csa(rskb, skb, bcn, &offs);
+ mt7915_mcu_beacon_cont(dev, rskb, skb, bcn, &offs);
+ dev_kfree_skb(skb);
+
+ return __mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb,
+ MCU_EXT_CMD_BSS_INFO_UPDATE, true);
+}
+
+static int mt7915_mcu_send_firmware(struct mt7915_dev *dev, const void *data,
+ int len)
+{
+ int ret = 0, cur_len;
+
+ while (len > 0) {
+ cur_len = min_t(int, 4096 - sizeof(struct mt7915_mcu_txd),
+ len);
+
+ ret = __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_SCATTER,
+ data, cur_len, false);
+ if (ret)
+ break;
+
+ data += cur_len;
+ len -= cur_len;
+ mt76_queue_tx_cleanup(dev, MT_TXQ_FWDL, false);
+ }
+
+ return ret;
+}
+
+static int mt7915_mcu_start_firmware(struct mt7915_dev *dev, u32 addr,
+ u32 option)
+{
+ struct {
+ __le32 option;
+ __le32 addr;
+ } req = {
+ .option = cpu_to_le32(option),
+ .addr = cpu_to_le32(addr),
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_START_REQ,
+ &req, sizeof(req), true);
+}
+
+static int mt7915_mcu_restart(struct mt76_dev *dev)
+{
+ struct {
+ u8 power_mode;
+ u8 rsv[3];
+ } req = {
+ .power_mode = 1,
+ };
+
+ return __mt76_mcu_send_msg(dev, -MCU_CMD_NIC_POWER_CTRL, &req,
+ sizeof(req), false);
+}
+
+static int mt7915_mcu_patch_sem_ctrl(struct mt7915_dev *dev, bool get)
+{
+ struct {
+ __le32 op;
+ } req = {
+ .op = cpu_to_le32(get ? PATCH_SEM_GET : PATCH_SEM_RELEASE),
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_PATCH_SEM_CONTROL,
+ &req, sizeof(req), true);
+}
+
+static int mt7915_mcu_start_patch(struct mt7915_dev *dev)
+{
+ struct {
+ u8 check_crc;
+ u8 reserved[3];
+ } req = {
+ .check_crc = 0,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_PATCH_FINISH_REQ,
+ &req, sizeof(req), true);
+}
+
+static int mt7915_driver_own(struct mt7915_dev *dev)
+{
+ u32 reg = mt7915_reg_map_l1(dev, MT_TOP_LPCR_HOST_BAND0);
+
+ mt76_wr(dev, reg, MT_TOP_LPCR_HOST_DRV_OWN);
+ if (!mt76_poll_msec(dev, reg, MT_TOP_LPCR_HOST_FW_OWN,
+ 0, 500)) {
+ dev_err(dev->mt76.dev, "Timeout for driver own\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mt7915_mcu_init_download(struct mt7915_dev *dev, u32 addr,
+ u32 len, u32 mode)
+{
+ struct {
+ __le32 addr;
+ __le32 len;
+ __le32 mode;
+ } req = {
+ .addr = cpu_to_le32(addr),
+ .len = cpu_to_le32(len),
+ .mode = cpu_to_le32(mode),
+ };
+ int attr;
+
+ if (req.addr == cpu_to_le32(MCU_PATCH_ADDRESS))
+ attr = -MCU_CMD_PATCH_START_REQ;
+ else
+ attr = -MCU_CMD_TARGET_ADDRESS_LEN_REQ;
+
+ return __mt76_mcu_send_msg(&dev->mt76, attr, &req, sizeof(req), true);
+}
+
+static int mt7915_load_patch(struct mt7915_dev *dev)
+{
+ const struct mt7915_patch_hdr *hdr;
+ const struct firmware *fw = NULL;
+ int i, ret, sem;
+
+ sem = mt7915_mcu_patch_sem_ctrl(dev, 1);
+ switch (sem) {
+ case PATCH_IS_DL:
+ return 0;
+ case PATCH_NOT_DL_SEM_SUCCESS:
+ break;
+ default:
+ dev_err(dev->mt76.dev, "Failed to get patch semaphore\n");
+ return -EAGAIN;
+ }
+
+ ret = request_firmware(&fw, MT7915_ROM_PATCH, dev->mt76.dev);
+ if (ret)
+ goto out;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt7915_patch_hdr *)(fw->data);
+
+ dev_info(dev->mt76.dev, "HW/SW Version: 0x%x, Build Time: %.16s\n",
+ be32_to_cpu(hdr->hw_sw_ver), hdr->build_date);
+
+ for (i = 0; i < be32_to_cpu(hdr->desc.n_region); i++) {
+ struct mt7915_patch_sec *sec;
+ const u8 *dl;
+ u32 len, addr;
+
+ sec = (struct mt7915_patch_sec *)(fw->data + sizeof(*hdr) +
+ i * sizeof(*sec));
+ if ((be32_to_cpu(sec->type) & PATCH_SEC_TYPE_MASK) !=
+ PATCH_SEC_TYPE_INFO) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ addr = be32_to_cpu(sec->info.addr);
+ len = be32_to_cpu(sec->info.len);
+ dl = fw->data + be32_to_cpu(sec->offs);
+
+ ret = mt7915_mcu_init_download(dev, addr, len,
+ DL_MODE_NEED_RSP);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Download request failed\n");
+ goto out;
+ }
+
+ ret = mt7915_mcu_send_firmware(dev, dl, len);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to send patch\n");
+ goto out;
+ }
+ }
+
+ ret = mt7915_mcu_start_patch(dev);
+ if (ret)
+ dev_err(dev->mt76.dev, "Failed to start patch\n");
+
+out:
+ sem = mt7915_mcu_patch_sem_ctrl(dev, 0);
+ switch (sem) {
+ case PATCH_REL_SEM_SUCCESS:
+ break;
+ default:
+ ret = -EAGAIN;
+ dev_err(dev->mt76.dev, "Failed to release patch semaphore\n");
+ break;
+ }
+ release_firmware(fw);
+
+ return ret;
+}
+
+static u32 mt7915_mcu_gen_dl_mode(u8 feature_set, bool is_wa)
+{
+ u32 ret = 0;
+
+ ret |= (feature_set & FW_FEATURE_SET_ENCRYPT) ?
+ (DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV) : 0;
+ ret |= FIELD_PREP(DL_MODE_KEY_IDX,
+ FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set));
+ ret |= DL_MODE_NEED_RSP;
+ ret |= is_wa ? DL_MODE_WORKING_PDA_CR4 : 0;
+
+ return ret;
+}
+
+static int
+mt7915_mcu_send_ram_firmware(struct mt7915_dev *dev,
+ const struct mt7915_fw_trailer *hdr,
+ const u8 *data, bool is_wa)
+{
+ int i, offset = 0;
+ u32 override = 0, option = 0;
+
+ for (i = 0; i < hdr->n_region; i++) {
+ const struct mt7915_fw_region *region;
+ int err;
+ u32 len, addr, mode;
+
+ region = (const struct mt7915_fw_region *)((const u8 *)hdr -
+ (hdr->n_region - i) * sizeof(*region));
+ mode = mt7915_mcu_gen_dl_mode(region->feature_set, is_wa);
+ len = le32_to_cpu(region->len);
+ addr = le32_to_cpu(region->addr);
+
+ if (region->feature_set & FW_FEATURE_OVERRIDE_ADDR)
+ override = addr;
+
+ err = mt7915_mcu_init_download(dev, addr, len, mode);
+ if (err) {
+ dev_err(dev->mt76.dev, "Download request failed\n");
+ return err;
+ }
+
+ err = mt7915_mcu_send_firmware(dev, data + offset, len);
+ if (err) {
+ dev_err(dev->mt76.dev, "Failed to send firmware.\n");
+ return err;
+ }
+
+ offset += len;
+ }
+
+ if (override)
+ option |= FW_START_OVERRIDE;
+
+ if (is_wa)
+ option |= FW_START_WORKING_PDA_CR4;
+
+ return mt7915_mcu_start_firmware(dev, override, option);
+}
+
+static int mt7915_load_ram(struct mt7915_dev *dev)
+{
+ const struct mt7915_fw_trailer *hdr;
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, MT7915_FIRMWARE_WM, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt7915_fw_trailer *)(fw->data + fw->size -
+ sizeof(*hdr));
+
+ dev_info(dev->mt76.dev, "WM Firmware Version: %.10s, Build Time: %.15s\n",
+ hdr->fw_ver, hdr->build_date);
+
+ ret = mt7915_mcu_send_ram_firmware(dev, hdr, fw->data, false);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to start WM firmware\n");
+ goto out;
+ }
+
+ release_firmware(fw);
+
+ ret = request_firmware(&fw, MT7915_FIRMWARE_WA, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt7915_fw_trailer *)(fw->data + fw->size -
+ sizeof(*hdr));
+
+ dev_info(dev->mt76.dev, "WA Firmware Version: %.10s, Build Time: %.15s\n",
+ hdr->fw_ver, hdr->build_date);
+
+ ret = mt7915_mcu_send_ram_firmware(dev, hdr, fw->data, true);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to start WA firmware\n");
+ goto out;
+ }
+
+ snprintf(dev->mt76.hw->wiphy->fw_version,
+ sizeof(dev->mt76.hw->wiphy->fw_version),
+ "%.10s-%.15s", hdr->fw_ver, hdr->build_date);
+
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int mt7915_load_firmware(struct mt7915_dev *dev)
+{
+ int ret;
+ u32 val, reg = mt7915_reg_map_l1(dev, MT_TOP_MISC);
+
+ val = FIELD_PREP(MT_TOP_MISC_FW_STATE, FW_STATE_FW_DOWNLOAD);
+
+ if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE, val, 1000)) {
+ /* restart firmware once */
+ __mt76_mcu_restart(&dev->mt76);
+ if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE,
+ val, 1000)) {
+ dev_err(dev->mt76.dev,
+ "Firmware is not ready for download\n");
+ return -EIO;
+ }
+ }
+
+ ret = mt7915_load_patch(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7915_load_ram(dev);
+ if (ret)
+ return ret;
+
+ if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE,
+ FIELD_PREP(MT_TOP_MISC_FW_STATE,
+ FW_STATE_WACPU_RDY), 1000)) {
+ dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
+ return -EIO;
+ }
+
+ mt76_queue_tx_cleanup(dev, MT_TXQ_FWDL, false);
+
+ dev_dbg(dev->mt76.dev, "Firmware init done\n");
+
+ return 0;
+}
+
+int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 ctrl)
+{
+ struct {
+ u8 ctrl_val;
+ u8 pad[3];
+ } data = {
+ .ctrl_val = ctrl
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_FW_LOG_2_HOST,
+ &data, sizeof(data), true);
+}
+
+int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level)
+{
+ struct {
+ u8 ver;
+ u8 pad;
+ __le16 len;
+ u8 level;
+ u8 rsv[3];
+ __le32 module_idx;
+ } data = {
+ .module_idx = cpu_to_le32(module),
+ .level = level,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_FW_DBG_CTRL,
+ &data, sizeof(data), false);
+}
+
+int mt7915_mcu_init(struct mt7915_dev *dev)
+{
+ static const struct mt76_mcu_ops mt7915_mcu_ops = {
+ .headroom = sizeof(struct mt7915_mcu_txd),
+ .mcu_skb_send_msg = mt7915_mcu_send_message,
+ .mcu_send_msg = mt7915_mcu_msg_send,
+ .mcu_restart = mt7915_mcu_restart,
+ };
+ int ret;
+
+ dev->mt76.mcu_ops = &mt7915_mcu_ops,
+
+ ret = mt7915_driver_own(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7915_load_firmware(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+ mt7915_mcu_fw_log_2_host(dev, 0);
+
+ return 0;
+}
+
+void mt7915_mcu_exit(struct mt7915_dev *dev)
+{
+ u32 reg = mt7915_reg_map_l1(dev, MT_TOP_MISC);
+
+ __mt76_mcu_restart(&dev->mt76);
+ if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE,
+ FIELD_PREP(MT_TOP_MISC_FW_STATE,
+ FW_STATE_FW_DOWNLOAD), 1000)) {
+ dev_err(dev->mt76.dev, "Failed to exit mcu\n");
+ return;
+ }
+
+ reg = mt7915_reg_map_l1(dev, MT_TOP_LPCR_HOST_BAND0);
+ mt76_wr(dev, reg, MT_TOP_LPCR_HOST_FW_OWN);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
+}
+
+int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band,
+ bool enable, bool hdr_trans)
+{
+ struct {
+ u8 operation;
+ u8 enable;
+ u8 check_bssid;
+ u8 insert_vlan;
+ u8 remove_vlan;
+ u8 tid;
+ u8 mode;
+ u8 rsv;
+ } __packed req_trans = {
+ .enable = hdr_trans,
+ };
+ struct {
+ u8 enable;
+ u8 band;
+ u8 rsv[2];
+ } __packed req_mac = {
+ .enable = enable,
+ .band = band,
+ };
+ int ret;
+
+ ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RX_HDR_TRANS,
+ &req_trans, sizeof(req_trans), false);
+ if (ret)
+ return ret;
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_MAC_INIT_CTRL,
+ &req_mac, sizeof(req_mac), true);
+}
+
+int mt7915_mcu_set_scs(struct mt7915_dev *dev, u8 band, bool enable)
+{
+ struct {
+ __le32 cmd;
+ u8 band;
+ u8 enable;
+ } __packed req = {
+ .cmd = cpu_to_le32(SCS_ENABLE),
+ .band = band,
+ .enable = enable + 1,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SCS_CTRL, &req,
+ sizeof(req), false);
+}
+
+int mt7915_mcu_set_rts_thresh(struct mt7915_phy *phy, u32 val)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct {
+ u8 prot_idx;
+ u8 band;
+ u8 rsv[2];
+ __le32 len_thresh;
+ __le32 pkt_thresh;
+ } __packed req = {
+ .prot_idx = 1,
+ .band = phy != &dev->phy,
+ .len_thresh = cpu_to_le32(val),
+ .pkt_thresh = cpu_to_le32(0x2),
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PROTECT_CTRL,
+ &req, sizeof(req), true);
+}
+
+int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif)
+{
+#define WMM_AIFS_SET BIT(0)
+#define WMM_CW_MIN_SET BIT(1)
+#define WMM_CW_MAX_SET BIT(2)
+#define WMM_TXOP_SET BIT(3)
+#define WMM_PARAM_SET GENMASK(3, 0)
+#define TX_CMD_MODE 1
+ struct edca {
+ u8 queue;
+ u8 set;
+ u8 aifs;
+ u8 cw_min;
+ __le16 cw_max;
+ __le16 txop;
+ };
+ struct mt7915_mcu_tx {
+ u8 total;
+ u8 action;
+ u8 valid;
+ u8 mode;
+
+ struct edca edca[IEEE80211_NUM_ACS];
+ } __packed req = {
+ .valid = true,
+ .mode = TX_CMD_MODE,
+ .total = IEEE80211_NUM_ACS,
+ };
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ int ac;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
+ struct edca *e = &req.edca[ac];
+
+ e->set = WMM_PARAM_SET;
+ e->queue = ac + mvif->wmm_idx * MT7915_MAX_WMM_SETS;
+ e->aifs = q->aifs;
+ e->txop = cpu_to_le16(q->txop);
+
+ if (q->cw_min)
+ e->cw_min = fls(q->cw_min);
+ else
+ e->cw_min = 5;
+
+ if (q->cw_max)
+ e->cw_max = cpu_to_le16(fls(q->cw_max));
+ else
+ e->cw_max = cpu_to_le16(10);
+ }
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EDCA_UPDATE,
+ &req, sizeof(req), true);
+}
+
+int mt7915_mcu_set_pm(struct mt7915_dev *dev, int band, int enter)
+{
+#define ENTER_PM_STATE 1
+#define EXIT_PM_STATE 2
+ struct {
+ u8 pm_number;
+ u8 pm_state;
+ u8 bssid[ETH_ALEN];
+ u8 dtim_period;
+ u8 wlan_idx_lo;
+ __le16 bcn_interval;
+ __le32 aid;
+ __le32 rx_filter;
+ u8 band_idx;
+ u8 wlan_idx_hi;
+ u8 rsv[2];
+ __le32 feature;
+ u8 omac_idx;
+ u8 wmm_idx;
+ u8 bcn_loss_cnt;
+ u8 bcn_sp_duration;
+ } __packed req = {
+ .pm_number = 5,
+ .pm_state = (enter) ? ENTER_PM_STATE : EXIT_PM_STATE,
+ .band_idx = band,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PM_STATE_CTRL,
+ &req, sizeof(req), true);
+}
+
+int mt7915_mcu_rdd_cmd(struct mt7915_dev *dev,
+ enum mt7915_rdd_cmd cmd, u8 index,
+ u8 rx_sel, u8 val)
+{
+ struct {
+ u8 ctrl;
+ u8 rdd_idx;
+ u8 rdd_rx_sel;
+ u8 val;
+ u8 rsv[4];
+ } __packed req = {
+ .ctrl = cmd,
+ .rdd_idx = index,
+ .rdd_rx_sel = rx_sel,
+ .val = val,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_CTRL,
+ &req, sizeof(req), true);
+}
+
+int mt7915_mcu_set_fcc5_lpn(struct mt7915_dev *dev, int val)
+{
+ struct {
+ __le32 tag;
+ __le16 min_lpn;
+ u8 rsv[2];
+ } __packed req = {
+ .tag = cpu_to_le32(0x1),
+ .min_lpn = cpu_to_le16(val),
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH,
+ &req, sizeof(req), true);
+}
+
+int mt7915_mcu_set_pulse_th(struct mt7915_dev *dev,
+ const struct mt7915_dfs_pulse *pulse)
+{
+ struct {
+ __le32 tag;
+
+ __le32 max_width; /* us */
+ __le32 max_pwr; /* dbm */
+ __le32 min_pwr; /* dbm */
+ __le32 min_stgr_pri; /* us */
+ __le32 max_stgr_pri; /* us */
+ __le32 min_cr_pri; /* us */
+ __le32 max_cr_pri; /* us */
+ } __packed req = {
+ .tag = cpu_to_le32(0x3),
+
+#define __req_field(field) .field = cpu_to_le32(pulse->field)
+ __req_field(max_width),
+ __req_field(max_pwr),
+ __req_field(min_pwr),
+ __req_field(min_stgr_pri),
+ __req_field(max_stgr_pri),
+ __req_field(min_cr_pri),
+ __req_field(max_cr_pri),
+#undef __req_field
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH,
+ &req, sizeof(req), true);
+}
+
+int mt7915_mcu_set_radar_th(struct mt7915_dev *dev, int index,
+ const struct mt7915_dfs_pattern *pattern)
+{
+ struct {
+ __le32 tag;
+ __le16 radar_type;
+
+ u8 enb;
+ u8 stgr;
+ u8 min_crpn;
+ u8 max_crpn;
+ u8 min_crpr;
+ u8 min_pw;
+ u32 min_pri;
+ u32 max_pri;
+ u8 max_pw;
+ u8 min_crbn;
+ u8 max_crbn;
+ u8 min_stgpn;
+ u8 max_stgpn;
+ u8 min_stgpr;
+ u8 rsv[2];
+ u32 min_stgpr_diff;
+ } __packed req = {
+ .tag = cpu_to_le32(0x2),
+ .radar_type = cpu_to_le16(index),
+
+#define __req_field_u8(field) .field = pattern->field
+#define __req_field_u32(field) .field = cpu_to_le32(pattern->field)
+ __req_field_u8(enb),
+ __req_field_u8(stgr),
+ __req_field_u8(min_crpn),
+ __req_field_u8(max_crpn),
+ __req_field_u8(min_crpr),
+ __req_field_u8(min_pw),
+ __req_field_u32(min_pri),
+ __req_field_u32(max_pri),
+ __req_field_u8(max_pw),
+ __req_field_u8(min_crbn),
+ __req_field_u8(max_crbn),
+ __req_field_u8(min_stgpn),
+ __req_field_u8(max_stgpn),
+ __req_field_u8(min_stgpr),
+ __req_field_u32(min_stgpr_diff),
+#undef __req_field_u8
+#undef __req_field_u32
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH,
+ &req, sizeof(req), true);
+}
+
+int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ int freq1 = chandef->center_freq1;
+ struct {
+ u8 control_ch;
+ u8 center_ch;
+ u8 bw;
+ u8 tx_streams_num;
+ u8 rx_streams; /* mask or num */
+ u8 switch_reason;
+ u8 band_idx;
+ u8 center_ch2; /* for 80+80 only */
+ __le16 cac_case;
+ u8 channel_band;
+ u8 rsv0;
+ __le32 outband_freq;
+ u8 txpower_drop;
+ u8 ap_bw;
+ u8 ap_center_ch;
+ u8 rsv1[57];
+ } __packed req = {
+ .control_ch = chandef->chan->hw_value,
+ .center_ch = ieee80211_frequency_to_channel(freq1),
+ .bw = mt7915_mcu_chan_bw(chandef),
+ .tx_streams_num = hweight8(phy->mt76->antenna_mask),
+ .rx_streams = phy->chainmask,
+ .band_idx = phy != &dev->phy,
+ .channel_band = chandef->chan->band,
+ };
+
+ if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
+ else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
+ chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
+ req.switch_reason = CH_SWITCH_DFS;
+ else
+ req.switch_reason = CH_SWITCH_NORMAL;
+
+ if (cmd == MCU_EXT_CMD_CHANNEL_SWITCH)
+ req.rx_streams = hweight8(req.rx_streams);
+
+ if (chandef->width == NL80211_CHAN_WIDTH_80P80) {
+ int freq2 = chandef->center_freq2;
+
+ req.center_ch2 = ieee80211_frequency_to_channel(freq2);
+ }
+
+ return __mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true);
+}
+
+int mt7915_mcu_set_eeprom(struct mt7915_dev *dev)
+{
+ struct req_hdr {
+ u8 buffer_mode;
+ u8 format;
+ __le16 len;
+ } __packed req = {
+ .buffer_mode = EE_MODE_EFUSE,
+ .format = EE_FORMAT_WHOLE,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
+ &req, sizeof(req), true);
+}
+
+int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset)
+{
+ struct mt7915_mcu_eeprom_info req = {
+ .addr = cpu_to_le32(round_down(offset, 16)),
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_ACCESS, &req,
+ sizeof(req), true);
+}
+
+int mt7915_mcu_get_temperature(struct mt7915_dev *dev, int index)
+{
+ struct {
+ u8 ctrl_id;
+ u8 action;
+ u8 band;
+ u8 rsv[5];
+ } req = {
+ .ctrl_id = THERMAL_SENSOR_TEMP_QUERY,
+ .action = index,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_THERMAL_CTRL, &req,
+ sizeof(req), true);
+}
+
+int mt7915_mcu_get_rate_info(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx)
+{
+ struct {
+ __le32 cmd;
+ __le16 wlan_idx;
+ __le16 ru_idx;
+ __le16 direction;
+ __le16 dump_group;
+ } req = {
+ .cmd = cpu_to_le32(cmd),
+ .wlan_idx = cpu_to_le16(wlan_idx),
+ .dump_group = cpu_to_le16(1),
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RATE_CTRL, &req,
+ sizeof(req), false);
+}
+
+int mt7915_mcu_set_sku(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt76_phy *mphy = phy->mt76;
+ struct ieee80211_hw *hw = mphy->hw;
+ struct mt7915_sku_val {
+ u8 format_id;
+ u8 limit_type;
+ u8 dbdc_idx;
+ s8 val[MT7915_SKU_RATE_NUM];
+ } __packed req = {
+ .format_id = 4,
+ .dbdc_idx = phy != &dev->phy,
+ };
+ int i;
+ s8 *delta;
+
+ delta = dev->rate_power[mphy->chandef.chan->band];
+ mphy->txpower_cur = hw->conf.power_level * 2 +
+ delta[MT7915_SKU_MAX_DELTA_IDX];
+
+ for (i = 0; i < MT7915_SKU_RATE_NUM; i++)
+ req.val[i] = hw->conf.power_level * 2 + delta[i];
+
+ return __mt76_mcu_send_msg(&dev->mt76,
+ MCU_EXT_CMD_TX_POWER_FEATURE_CTRL,
+ &req, sizeof(req), true);
+}
+
+int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt7915_sku {
+ u8 format_id;
+ u8 sku_enable;
+ u8 dbdc_idx;
+ u8 rsv;
+ } __packed req = {
+ .format_id = 0,
+ .dbdc_idx = phy != &dev->phy,
+ .sku_enable = enable,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76,
+ MCU_EXT_CMD_TX_POWER_FEATURE_CTRL,
+ &req, sizeof(req), true);
+}
+
+int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band)
+{
+ struct {
+ u8 action;
+ u8 set;
+ u8 band;
+ u8 rsv;
+ } req = {
+ .action = action,
+ .set = set,
+ .band = band,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_SER_TRIGGER,
+ &req, sizeof(req), false);
+}
+
+int mt7915_mcu_set_txbf_type(struct mt7915_dev *dev)
+{
+#define MT_BF_TYPE_UPDATE 20
+ struct {
+ u8 action;
+ bool ebf;
+ bool ibf;
+ u8 rsv;
+ } __packed req = {
+ .action = MT_BF_TYPE_UPDATE,
+ .ebf = true,
+ .ibf = false,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_TXBF_ACTION,
+ &req, sizeof(req), true);
+}
+
+int mt7915_mcu_set_txbf_sounding(struct mt7915_dev *dev)
+{
+#define MT_BF_PROCESSING 4
+ struct {
+ u8 action;
+ u8 snd_mode;
+ u8 sta_num;
+ u8 rsv;
+ u8 wlan_idx[4];
+ __le32 snd_period; /* ms */
+ } __packed req = {
+ .action = true,
+ .snd_mode = MT_BF_PROCESSING,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_TXBF_ACTION,
+ &req, sizeof(req), true);
+}
+
+int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ bool enable)
+{
+#define MT_SPR_ENABLE 1
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct {
+ u8 action;
+ u8 arg_num;
+ u8 band_idx;
+ u8 status;
+ u8 drop_tx_idx;
+ u8 sta_idx; /* 256 sta */
+ u8 rsv[2];
+ __le32 val;
+ } __packed req = {
+ .action = MT_SPR_ENABLE,
+ .arg_num = 1,
+ .band_idx = mvif->band_idx,
+ .val = cpu_to_le32(enable),
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_SPR,
+ &req, sizeof(req), true);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
new file mode 100644
index 000000000..c656d6638
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -0,0 +1,1067 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#ifndef __MT7915_MCU_H
+#define __MT7915_MCU_H
+
+struct mt7915_mcu_txd {
+ __le32 txd[8];
+
+ __le16 len;
+ __le16 pq_id;
+
+ u8 cid;
+ u8 pkt_type;
+ u8 set_query; /* FW don't care */
+ u8 seq;
+
+ u8 uc_d2b0_rev;
+ u8 ext_cid;
+ u8 s2d_index;
+ u8 ext_cid_ack;
+
+ u32 reserved[5];
+} __packed __aligned(4);
+
+/* event table */
+enum {
+ MCU_EVENT_TARGET_ADDRESS_LEN = 0x01,
+ MCU_EVENT_FW_START = 0x01,
+ MCU_EVENT_GENERIC = 0x01,
+ MCU_EVENT_ACCESS_REG = 0x02,
+ MCU_EVENT_MT_PATCH_SEM = 0x04,
+ MCU_EVENT_CH_PRIVILEGE = 0x18,
+ MCU_EVENT_EXT = 0xed,
+ MCU_EVENT_RESTART_DL = 0xef,
+};
+
+/* ext event table */
+enum {
+ MCU_EXT_EVENT_PS_SYNC = 0x5,
+ MCU_EXT_EVENT_FW_LOG_2_HOST = 0x13,
+ MCU_EXT_EVENT_THERMAL_PROTECT = 0x22,
+ MCU_EXT_EVENT_ASSERT_DUMP = 0x23,
+ MCU_EXT_EVENT_RDD_REPORT = 0x3a,
+ MCU_EXT_EVENT_CSA_NOTIFY = 0x4f,
+ MCU_EXT_EVENT_RATE_REPORT = 0x87,
+};
+
+struct mt7915_mcu_rxd {
+ __le32 rxd[6];
+
+ __le16 len;
+ __le16 pkt_type_id;
+
+ u8 eid;
+ u8 seq;
+ __le16 __rsv;
+
+ u8 ext_eid;
+ u8 __rsv1[2];
+ u8 s2d_index;
+};
+
+struct mt7915_mcu_rdd_report {
+ struct mt7915_mcu_rxd rxd;
+
+ u8 idx;
+ u8 long_detected;
+ u8 constant_prf_detected;
+ u8 staggered_prf_detected;
+ u8 radar_type_idx;
+ u8 periodic_pulse_num;
+ u8 long_pulse_num;
+ u8 hw_pulse_num;
+
+ u8 out_lpn;
+ u8 out_spn;
+ u8 out_crpn;
+ u8 out_crpw;
+ u8 out_crbn;
+ u8 out_stgpn;
+ u8 out_stgpw;
+
+ u8 rsv;
+
+ __le32 out_pri_const;
+ __le32 out_pri_stg[3];
+
+ struct {
+ __le32 start;
+ __le16 pulse_width;
+ __le16 pulse_power;
+ u8 mdrdy_flag;
+ u8 rsv[3];
+ } long_pulse[32];
+
+ struct {
+ __le32 start;
+ __le16 pulse_width;
+ __le16 pulse_power;
+ u8 mdrdy_flag;
+ u8 rsv[3];
+ } periodic_pulse[32];
+
+ struct {
+ __le32 start;
+ __le16 pulse_width;
+ __le16 pulse_power;
+ u8 sc_pass;
+ u8 sw_reset;
+ u8 mdrdy_flag;
+ u8 tx_active;
+ } hw_pulse[32];
+} __packed;
+
+struct mt7915_mcu_eeprom_info {
+ __le32 addr;
+ __le32 valid;
+ u8 data[16];
+} __packed;
+
+struct mt7915_mcu_ra_info {
+ struct mt7915_mcu_rxd rxd;
+
+ __le32 event_id;
+ __le16 wlan_idx;
+ __le16 ru_idx;
+ __le16 direction;
+ __le16 dump_group;
+
+ __le32 suggest_rate;
+ __le32 min_rate; /* for dynamic sounding */
+ __le32 max_rate; /* for dynamic sounding */
+ __le32 init_rate_down_rate;
+
+ __le16 curr_rate;
+ __le16 init_rate_down_total;
+ __le16 init_rate_down_succ;
+ __le16 success;
+ __le16 attempts;
+
+ __le16 prev_rate;
+ __le16 prob_up_rate;
+ u8 no_rate_up_cnt;
+ u8 ppdu_cnt;
+ u8 gi;
+
+ u8 try_up_fail;
+ u8 try_up_total;
+ u8 suggest_wf;
+ u8 try_up_check;
+ u8 prob_up_period;
+ u8 prob_down_pending;
+} __packed;
+
+#define MT_RA_RATE_NSS GENMASK(8, 6)
+#define MT_RA_RATE_MCS GENMASK(3, 0)
+#define MT_RA_RATE_TX_MODE GENMASK(12, 9)
+#define MT_RA_RATE_DCM_EN BIT(4)
+#define MT_RA_RATE_BW GENMASK(14, 13)
+
+#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10))
+#define MCU_PKT_ID 0xa0
+
+enum {
+ MCU_Q_QUERY,
+ MCU_Q_SET,
+ MCU_Q_RESERVED,
+ MCU_Q_NA
+};
+
+enum {
+ MCU_S2D_H2N,
+ MCU_S2D_C2N,
+ MCU_S2D_H2C,
+ MCU_S2D_H2CN
+};
+
+enum {
+ MCU_CMD_TARGET_ADDRESS_LEN_REQ = 0x01,
+ MCU_CMD_FW_START_REQ = 0x02,
+ MCU_CMD_INIT_ACCESS_REG = 0x3,
+ MCU_CMD_NIC_POWER_CTRL = 0x4,
+ MCU_CMD_PATCH_START_REQ = 0x05,
+ MCU_CMD_PATCH_FINISH_REQ = 0x07,
+ MCU_CMD_PATCH_SEM_CONTROL = 0x10,
+ MCU_CMD_EXT_CID = 0xED,
+ MCU_CMD_FW_SCATTER = 0xEE,
+ MCU_CMD_RESTART_DL_REQ = 0xEF,
+};
+
+enum {
+ MCU_EXT_CMD_EFUSE_ACCESS = 0x01,
+ MCU_EXT_CMD_PM_STATE_CTRL = 0x07,
+ MCU_EXT_CMD_CHANNEL_SWITCH = 0x08,
+ MCU_EXT_CMD_FW_LOG_2_HOST = 0x13,
+ MCU_EXT_CMD_TXBF_ACTION = 0x1e,
+ MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21,
+ MCU_EXT_CMD_STA_REC_UPDATE = 0x25,
+ MCU_EXT_CMD_BSS_INFO_UPDATE = 0x26,
+ MCU_EXT_CMD_EDCA_UPDATE = 0x27,
+ MCU_EXT_CMD_DEV_INFO_UPDATE = 0x2A,
+ MCU_EXT_CMD_THERMAL_CTRL = 0x2c,
+ MCU_EXT_CMD_SET_DRR_CTRL = 0x36,
+ MCU_EXT_CMD_SET_RDD_CTRL = 0x3a,
+ MCU_EXT_CMD_PROTECT_CTRL = 0x3e,
+ MCU_EXT_CMD_MAC_INIT_CTRL = 0x46,
+ MCU_EXT_CMD_RX_HDR_TRANS = 0x47,
+ MCU_EXT_CMD_SET_RX_PATH = 0x4e,
+ MCU_EXT_CMD_TX_POWER_FEATURE_CTRL = 0x58,
+ MCU_EXT_CMD_SET_SER_TRIGGER = 0x81,
+ MCU_EXT_CMD_SCS_CTRL = 0x82,
+ MCU_EXT_CMD_RATE_CTRL = 0x87,
+ MCU_EXT_CMD_FW_DBG_CTRL = 0x95,
+ MCU_EXT_CMD_SET_RDD_TH = 0x9d,
+ MCU_EXT_CMD_SET_SPR = 0xa8,
+};
+
+enum {
+ PATCH_SEM_RELEASE,
+ PATCH_SEM_GET
+};
+
+enum {
+ PATCH_NOT_DL_SEM_FAIL,
+ PATCH_IS_DL,
+ PATCH_NOT_DL_SEM_SUCCESS,
+ PATCH_REL_SEM_SUCCESS
+};
+
+enum {
+ FW_STATE_INITIAL,
+ FW_STATE_FW_DOWNLOAD,
+ FW_STATE_NORMAL_OPERATION,
+ FW_STATE_NORMAL_TRX,
+ FW_STATE_WACPU_RDY = 7
+};
+
+enum {
+ EE_MODE_EFUSE,
+ EE_MODE_BUFFER,
+};
+
+enum {
+ EE_FORMAT_BIN,
+ EE_FORMAT_WHOLE,
+ EE_FORMAT_MULTIPLE,
+};
+
+#define STA_TYPE_STA BIT(0)
+#define STA_TYPE_AP BIT(1)
+#define STA_TYPE_ADHOC BIT(2)
+#define STA_TYPE_WDS BIT(4)
+#define STA_TYPE_BC BIT(5)
+
+#define NETWORK_INFRA BIT(16)
+#define NETWORK_P2P BIT(17)
+#define NETWORK_IBSS BIT(18)
+#define NETWORK_WDS BIT(21)
+
+#define CONNECTION_INFRA_STA (STA_TYPE_STA | NETWORK_INFRA)
+#define CONNECTION_INFRA_AP (STA_TYPE_AP | NETWORK_INFRA)
+#define CONNECTION_P2P_GC (STA_TYPE_STA | NETWORK_P2P)
+#define CONNECTION_P2P_GO (STA_TYPE_AP | NETWORK_P2P)
+#define CONNECTION_IBSS_ADHOC (STA_TYPE_ADHOC | NETWORK_IBSS)
+#define CONNECTION_WDS (STA_TYPE_WDS | NETWORK_WDS)
+#define CONNECTION_INFRA_BC (STA_TYPE_BC | NETWORK_INFRA)
+
+#define CONN_STATE_DISCONNECT 0
+#define CONN_STATE_CONNECT 1
+#define CONN_STATE_PORT_SECURE 2
+
+enum {
+ DEV_INFO_ACTIVE,
+ DEV_INFO_MAX_NUM
+};
+
+enum {
+ SCS_SEND_DATA,
+ SCS_SET_MANUAL_PD_TH,
+ SCS_CONFIG,
+ SCS_ENABLE,
+ SCS_SHOW_INFO,
+ SCS_GET_GLO_ADDR,
+ SCS_GET_GLO_ADDR_EVENT,
+};
+
+enum {
+ CMD_CBW_20MHZ = IEEE80211_STA_RX_BW_20,
+ CMD_CBW_40MHZ = IEEE80211_STA_RX_BW_40,
+ CMD_CBW_80MHZ = IEEE80211_STA_RX_BW_80,
+ CMD_CBW_160MHZ = IEEE80211_STA_RX_BW_160,
+ CMD_CBW_10MHZ,
+ CMD_CBW_5MHZ,
+ CMD_CBW_8080MHZ,
+
+ CMD_HE_MCS_BW80 = 0,
+ CMD_HE_MCS_BW160,
+ CMD_HE_MCS_BW8080,
+ CMD_HE_MCS_BW_NUM
+};
+
+struct tlv {
+ __le16 tag;
+ __le16 len;
+} __packed;
+
+struct bss_info_omac {
+ __le16 tag;
+ __le16 len;
+ u8 hw_bss_idx;
+ u8 omac_idx;
+ u8 band_idx;
+ u8 rsv0;
+ __le32 conn_type;
+ u32 rsv1;
+} __packed;
+
+struct bss_info_basic {
+ __le16 tag;
+ __le16 len;
+ __le32 network_type;
+ u8 active;
+ u8 rsv0;
+ __le16 bcn_interval;
+ u8 bssid[ETH_ALEN];
+ u8 wmm_idx;
+ u8 dtim_period;
+ u8 bmc_wcid_lo;
+ u8 cipher;
+ u8 phy_mode;
+ u8 max_bssid; /* max BSSID. range: 1 ~ 8, 0: MBSSID disabled */
+ u8 non_tx_bssid;/* non-transmitted BSSID, 0: transmitted BSSID */
+ u8 bmc_wcid_hi; /* high Byte and version */
+ u8 rsv[2];
+} __packed;
+
+struct bss_info_rf_ch {
+ __le16 tag;
+ __le16 len;
+ u8 pri_ch;
+ u8 center_ch0;
+ u8 center_ch1;
+ u8 bw;
+ u8 he_ru26_block; /* 1: don't send HETB in RU26, 0: allow */
+ u8 he_all_disable; /* 1: disallow all HETB, 0: allow */
+ u8 rsv[2];
+} __packed;
+
+struct bss_info_ext_bss {
+ __le16 tag;
+ __le16 len;
+ __le32 mbss_tsf_offset; /* in unit of us */
+ u8 rsv[8];
+} __packed;
+
+struct bss_info_sync_mode {
+ __le16 tag;
+ __le16 len;
+ __le16 bcn_interval;
+ u8 enable;
+ u8 dtim_period;
+ u8 rsv[8];
+} __packed;
+
+struct bss_info_bmc_rate {
+ __le16 tag;
+ __le16 len;
+ __le16 bc_trans;
+ __le16 mc_trans;
+ u8 short_preamble;
+ u8 rsv[7];
+} __packed;
+
+struct bss_info_ra {
+ __le16 tag;
+ __le16 len;
+ u8 op_mode;
+ u8 adhoc_en;
+ u8 short_preamble;
+ u8 tx_streams;
+ u8 rx_streams;
+ u8 algo;
+ u8 force_sgi;
+ u8 force_gf;
+ u8 ht_mode;
+ u8 has_20_sta; /* Check if any sta support GF. */
+ u8 bss_width_trigger_events;
+ u8 vht_nss_cap;
+ u8 vht_bw_signal; /* not use */
+ u8 vht_force_sgi; /* not use */
+ u8 se_off;
+ u8 antenna_idx;
+ u8 train_up_rule;
+ u8 rsv[3];
+ unsigned short train_up_high_thres;
+ short train_up_rule_rssi;
+ unsigned short low_traffic_thres;
+ __le16 max_phyrate;
+ __le32 phy_cap;
+ __le32 interval;
+ __le32 fast_interval;
+} __packed;
+
+struct bss_info_hw_amsdu {
+ __le16 tag;
+ __le16 len;
+ __le32 cmp_bitmap_0;
+ __le32 cmp_bitmap_1;
+ __le16 trig_thres;
+ u8 enable;
+ u8 rsv;
+} __packed;
+
+struct bss_info_he {
+ __le16 tag;
+ __le16 len;
+ u8 he_pe_duration;
+ u8 vht_op_info_present;
+ __le16 he_rts_thres;
+ __le16 max_nss_mcs[CMD_HE_MCS_BW_NUM];
+ u8 rsv[6];
+} __packed;
+
+struct bss_info_bcn {
+ __le16 tag;
+ __le16 len;
+ u8 ver;
+ u8 enable;
+ __le16 sub_ntlv;
+} __packed __aligned(4);
+
+struct bss_info_bcn_csa {
+ __le16 tag;
+ __le16 len;
+ u8 cnt;
+ u8 rsv[3];
+} __packed __aligned(4);
+
+struct bss_info_bcn_bcc {
+ __le16 tag;
+ __le16 len;
+ u8 cnt;
+ u8 rsv[3];
+} __packed __aligned(4);
+
+struct bss_info_bcn_mbss {
+#define MAX_BEACON_NUM 32
+ __le16 tag;
+ __le16 len;
+ __le32 bitmap;
+ __le16 offset[MAX_BEACON_NUM];
+ u8 rsv[8];
+} __packed __aligned(4);
+
+struct bss_info_bcn_cont {
+ __le16 tag;
+ __le16 len;
+ __le16 tim_ofs;
+ __le16 csa_ofs;
+ __le16 bcc_ofs;
+ __le16 pkt_len;
+} __packed __aligned(4);
+
+enum {
+ BSS_INFO_BCN_CSA,
+ BSS_INFO_BCN_BCC,
+ BSS_INFO_BCN_MBSSID,
+ BSS_INFO_BCN_CONTENT,
+ BSS_INFO_BCN_MAX
+};
+
+enum {
+ BSS_INFO_OMAC,
+ BSS_INFO_BASIC,
+ BSS_INFO_RF_CH, /* optional, for BT/LTE coex */
+ BSS_INFO_PM, /* sta only */
+ BSS_INFO_UAPSD, /* sta only */
+ BSS_INFO_ROAM_DETECT, /* obsoleted */
+ BSS_INFO_LQ_RM, /* obsoleted */
+ BSS_INFO_EXT_BSS,
+ BSS_INFO_BMC_RATE, /* for bmc rate control in CR4 */
+ BSS_INFO_SYNC_MODE,
+ BSS_INFO_RA,
+ BSS_INFO_HW_AMSDU,
+ BSS_INFO_BSS_COLOR,
+ BSS_INFO_HE_BASIC,
+ BSS_INFO_PROTECT_INFO,
+ BSS_INFO_OFFLOAD,
+ BSS_INFO_11V_MBSSID,
+ BSS_INFO_MAX_NUM
+};
+
+enum {
+ WTBL_RESET_AND_SET = 1,
+ WTBL_SET,
+ WTBL_QUERY,
+ WTBL_RESET_ALL
+};
+
+struct wtbl_req_hdr {
+ u8 wlan_idx_lo;
+ u8 operation;
+ __le16 tlv_num;
+ u8 wlan_idx_hi;
+ u8 rsv[3];
+} __packed;
+
+struct wtbl_generic {
+ __le16 tag;
+ __le16 len;
+ u8 peer_addr[ETH_ALEN];
+ u8 muar_idx;
+ u8 skip_tx;
+ u8 cf_ack;
+ u8 qos;
+ u8 mesh;
+ u8 adm;
+ __le16 partial_aid;
+ u8 baf_en;
+ u8 aad_om;
+} __packed;
+
+struct wtbl_rx {
+ __le16 tag;
+ __le16 len;
+ u8 rcid;
+ u8 rca1;
+ u8 rca2;
+ u8 rv;
+ u8 rsv[4];
+} __packed;
+
+struct wtbl_ht {
+ __le16 tag;
+ __le16 len;
+ u8 ht;
+ u8 ldpc;
+ u8 af;
+ u8 mm;
+ u8 rsv[4];
+} __packed;
+
+struct wtbl_vht {
+ __le16 tag;
+ __le16 len;
+ u8 ldpc;
+ u8 dyn_bw;
+ u8 vht;
+ u8 txop_ps;
+ u8 rsv[4];
+} __packed;
+
+enum {
+ MT_BA_TYPE_INVALID,
+ MT_BA_TYPE_ORIGINATOR,
+ MT_BA_TYPE_RECIPIENT
+};
+
+enum {
+ RST_BA_MAC_TID_MATCH,
+ RST_BA_MAC_MATCH,
+ RST_BA_NO_MATCH
+};
+
+struct wtbl_ba {
+ __le16 tag;
+ __le16 len;
+ /* common */
+ u8 tid;
+ u8 ba_type;
+ u8 rsv0[2];
+ /* originator only */
+ __le16 sn;
+ u8 ba_en;
+ u8 ba_winsize_idx;
+ __le16 ba_winsize;
+ /* recipient only */
+ u8 peer_addr[ETH_ALEN];
+ u8 rst_ba_tid;
+ u8 rst_ba_sel;
+ u8 rst_ba_sb;
+ u8 band_idx;
+ u8 rsv1[4];
+} __packed;
+
+struct wtbl_smps {
+ __le16 tag;
+ __le16 len;
+ u8 smps;
+ u8 rsv[3];
+} __packed;
+
+enum {
+ WTBL_GENERIC,
+ WTBL_RX,
+ WTBL_HT,
+ WTBL_VHT,
+ WTBL_PEER_PS, /* not used */
+ WTBL_TX_PS,
+ WTBL_HDR_TRANS,
+ WTBL_SEC_KEY,
+ WTBL_BA,
+ WTBL_RDG, /* obsoleted */
+ WTBL_PROTECT, /* not used */
+ WTBL_CLEAR, /* not used */
+ WTBL_BF,
+ WTBL_SMPS,
+ WTBL_RAW_DATA, /* debug only */
+ WTBL_PN,
+ WTBL_SPE,
+ WTBL_MAX_NUM
+};
+
+struct sta_ntlv_hdr {
+ u8 rsv[2];
+ __le16 tlv_num;
+} __packed;
+
+struct sta_req_hdr {
+ u8 bss_idx;
+ u8 wlan_idx_lo;
+ __le16 tlv_num;
+ u8 is_tlv_append;
+ u8 muar_idx;
+ u8 wlan_idx_hi;
+ u8 rsv;
+} __packed;
+
+struct sta_rec_basic {
+ __le16 tag;
+ __le16 len;
+ __le32 conn_type;
+ u8 conn_state;
+ u8 qos;
+ __le16 aid;
+ u8 peer_addr[ETH_ALEN];
+ __le16 extra_info;
+} __packed;
+
+struct sta_rec_ht {
+ __le16 tag;
+ __le16 len;
+ __le16 ht_cap;
+ u16 rsv;
+} __packed;
+
+struct sta_rec_vht {
+ __le16 tag;
+ __le16 len;
+ __le32 vht_cap;
+ __le16 vht_rx_mcs_map;
+ __le16 vht_tx_mcs_map;
+ u8 rts_bw_sig;
+ u8 rsv[3];
+} __packed;
+
+struct sta_rec_uapsd {
+ __le16 tag;
+ __le16 len;
+ u8 dac_map;
+ u8 tac_map;
+ u8 max_sp;
+ u8 rsv0;
+ __le16 listen_interval;
+ u8 rsv1[2];
+} __packed;
+
+struct sta_rec_muru {
+ __le16 tag;
+ __le16 len;
+
+ struct {
+ bool ofdma_dl_en;
+ bool ofdma_ul_en;
+ bool mimo_dl_en;
+ bool mimo_ul_en;
+ u8 rsv[4];
+ } cfg;
+
+ struct {
+ u8 punc_pream_rx;
+ bool he_20m_in_40m_2g;
+ bool he_20m_in_160m;
+ bool he_80m_in_160m;
+ bool lt16_sigb;
+ bool rx_su_comp_sigb;
+ bool rx_su_non_comp_sigb;
+ u8 rsv;
+ } ofdma_dl;
+
+ struct {
+ u8 t_frame_dur;
+ u8 mu_cascading;
+ u8 uo_ra;
+ u8 he_2x996_tone;
+ u8 rx_t_frame_11ac;
+ u8 rsv[3];
+ } ofdma_ul;
+
+ struct {
+ bool vht_mu_bfee;
+ bool partial_bw_dl_mimo;
+ u8 rsv[2];
+ } mimo_dl;
+
+ struct {
+ bool full_ul_mimo;
+ bool partial_ul_mimo;
+ u8 rsv[2];
+ } mimo_ul;
+} __packed;
+
+struct sta_rec_he {
+ __le16 tag;
+ __le16 len;
+
+ __le32 he_cap;
+
+ u8 t_frame_dur;
+ u8 max_ampdu_exp;
+ u8 bw_set;
+ u8 device_class;
+ u8 dcm_tx_mode;
+ u8 dcm_tx_max_nss;
+ u8 dcm_rx_mode;
+ u8 dcm_rx_max_nss;
+ u8 dcm_max_ru;
+ u8 punc_pream_rx;
+ u8 pkt_ext;
+ u8 rsv1;
+
+ __le16 max_nss_mcs[CMD_HE_MCS_BW_NUM];
+
+ u8 rsv2[2];
+} __packed;
+
+struct sta_rec_ba {
+ __le16 tag;
+ __le16 len;
+ u8 tid;
+ u8 ba_type;
+ u8 amsdu;
+ u8 ba_en;
+ __le16 ssn;
+ __le16 winsize;
+} __packed;
+
+struct sta_rec_amsdu {
+ __le16 tag;
+ __le16 len;
+ u8 max_amsdu_num;
+ u8 max_mpdu_size;
+ u8 amsdu_en;
+ u8 rsv;
+} __packed;
+
+struct sec_key {
+ u8 cipher_id;
+ u8 cipher_len;
+ u8 key_id;
+ u8 key_len;
+ u8 key[32];
+} __packed;
+
+struct sta_rec_sec {
+ __le16 tag;
+ __le16 len;
+ u8 add;
+ u8 n_cipher;
+ u8 rsv[2];
+
+ struct sec_key key[2];
+} __packed;
+
+struct ra_phy {
+ u8 type;
+ u8 flag;
+ u8 stbc;
+ u8 sgi;
+ u8 bw;
+ u8 ldpc;
+ u8 mcs;
+ u8 nss;
+ u8 he_ltf;
+};
+
+struct sta_rec_ra {
+ __le16 tag;
+ __le16 len;
+
+ u8 valid;
+ u8 auto_rate;
+ u8 phy_mode;
+ u8 channel;
+ u8 bw;
+ u8 disable_cck;
+ u8 ht_mcs32;
+ u8 ht_gf;
+ u8 ht_mcs[4];
+ u8 mmps_mode;
+ u8 gband_256;
+ u8 af;
+ u8 auth_wapi_mode;
+ u8 rate_len;
+
+ u8 supp_mode;
+ u8 supp_cck_rate;
+ u8 supp_ofdm_rate;
+ __le32 supp_ht_mcs;
+ __le16 supp_vht_mcs[4];
+
+ u8 op_mode;
+ u8 op_vht_chan_width;
+ u8 op_vht_rx_nss;
+ u8 op_vht_rx_nss_type;
+
+ __le32 sta_status;
+
+ struct ra_phy phy;
+} __packed;
+
+struct sta_rec_ra_fixed {
+ __le16 tag;
+ __le16 len;
+
+ __le32 field;
+ u8 op_mode;
+ u8 op_vht_chan_width;
+ u8 op_vht_rx_nss;
+ u8 op_vht_rx_nss_type;
+
+ struct ra_phy phy;
+
+ u8 spe_en;
+ u8 short_preamble;
+ u8 is_5g;
+ u8 mmps_mode;
+} __packed;
+
+#define RATE_PARAM_FIXED 3
+#define RATE_PARAM_AUTO 20
+#define RATE_CFG_MCS GENMASK(3, 0)
+#define RATE_CFG_NSS GENMASK(7, 4)
+#define RATE_CFG_GI GENMASK(11, 8)
+#define RATE_CFG_BW GENMASK(15, 12)
+#define RATE_CFG_STBC GENMASK(19, 16)
+#define RATE_CFG_LDPC GENMASK(23, 20)
+#define RATE_CFG_PHY_TYPE GENMASK(27, 24)
+
+struct sta_rec_bf {
+ __le16 tag;
+ __le16 len;
+
+ __le16 pfmu; /* 0xffff: no access right for PFMU */
+ bool su_mu; /* 0: SU, 1: MU */
+ u8 bf_cap; /* 0: iBF, 1: eBF */
+ u8 sounding_phy; /* 0: legacy, 1: OFDM, 2: HT, 4: VHT */
+ u8 ndpa_rate;
+ u8 ndp_rate;
+ u8 rept_poll_rate;
+ u8 tx_mode; /* 0: legacy, 1: OFDM, 2: HT, 4: VHT ... */
+ u8 nc;
+ u8 nr;
+ u8 bw; /* 0: 20M, 1: 40M, 2: 80M, 3: 160M */
+
+ u8 mem_total;
+ u8 mem_20m;
+ struct {
+ u8 row;
+ u8 col: 6, row_msb: 2;
+ } mem[4];
+
+ __le16 smart_ant;
+ u8 se_idx;
+ u8 auto_sounding; /* b7: low traffic indicator
+ * b6: Stop sounding for this entry
+ * b5 ~ b0: postpone sounding
+ */
+ u8 ibf_timeout;
+ u8 ibf_dbw;
+ u8 ibf_ncol;
+ u8 ibf_nrow;
+ u8 nr_bw160;
+ u8 nc_bw160;
+ u8 ru_start_idx;
+ u8 ru_end_idx;
+
+ bool trigger_su;
+ bool trigger_mu;
+ bool ng16_su;
+ bool ng16_mu;
+ bool codebook42_su;
+ bool codebook75_mu;
+
+ u8 he_ltf;
+ u8 rsv[2];
+} __packed;
+
+struct sta_rec_bfee {
+ __le16 tag;
+ __le16 len;
+ bool fb_identity_matrix; /* 1: feedback identity matrix */
+ bool ignore_feedback; /* 1: ignore */
+ u8 rsv[2];
+} __packed;
+
+enum {
+ STA_REC_BASIC,
+ STA_REC_RA,
+ STA_REC_RA_CMM_INFO,
+ STA_REC_RA_UPDATE,
+ STA_REC_BF,
+ STA_REC_AMSDU,
+ STA_REC_BA,
+ STA_REC_RED, /* not used */
+ STA_REC_TX_PROC, /* for hdr trans and CSO in CR4 */
+ STA_REC_HT,
+ STA_REC_VHT,
+ STA_REC_APPS,
+ STA_REC_KEY,
+ STA_REC_WTBL,
+ STA_REC_HE,
+ STA_REC_HW_AMSDU,
+ STA_REC_WTBL_AADOM,
+ STA_REC_KEY_V2,
+ STA_REC_MURU,
+ STA_REC_MUEDCA,
+ STA_REC_BFEE,
+ STA_REC_MAX_NUM
+};
+
+enum mt7915_cipher_type {
+ MT_CIPHER_NONE,
+ MT_CIPHER_WEP40,
+ MT_CIPHER_WEP104,
+ MT_CIPHER_WEP128,
+ MT_CIPHER_TKIP,
+ MT_CIPHER_AES_CCMP,
+ MT_CIPHER_CCMP_256,
+ MT_CIPHER_GCMP,
+ MT_CIPHER_GCMP_256,
+ MT_CIPHER_WAPI,
+ MT_CIPHER_BIP_CMAC_128,
+};
+
+enum {
+ CH_SWITCH_NORMAL = 0,
+ CH_SWITCH_SCAN = 3,
+ CH_SWITCH_MCC = 4,
+ CH_SWITCH_DFS = 5,
+ CH_SWITCH_BACKGROUND_SCAN_START = 6,
+ CH_SWITCH_BACKGROUND_SCAN_RUNNING = 7,
+ CH_SWITCH_BACKGROUND_SCAN_STOP = 8,
+ CH_SWITCH_SCAN_BYPASS_DPD = 9
+};
+
+enum {
+ THERMAL_SENSOR_TEMP_QUERY,
+ THERMAL_SENSOR_MANUAL_CTRL,
+ THERMAL_SENSOR_INFO_QUERY,
+ THERMAL_SENSOR_TASK_CTRL,
+};
+
+enum {
+ MT_EBF = BIT(0), /* explicit beamforming */
+ MT_IBF = BIT(1) /* implicit beamforming */
+};
+
+#define MT7915_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_req_hdr) + \
+ sizeof(struct wtbl_generic) + \
+ sizeof(struct wtbl_rx) + \
+ sizeof(struct wtbl_ht) + \
+ sizeof(struct wtbl_vht) + \
+ sizeof(struct wtbl_ba) + \
+ sizeof(struct wtbl_smps))
+
+#define MT7915_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
+ sizeof(struct sta_rec_basic) + \
+ sizeof(struct sta_rec_ht) + \
+ sizeof(struct sta_rec_he) + \
+ sizeof(struct sta_rec_ba) + \
+ sizeof(struct sta_rec_vht) + \
+ sizeof(struct sta_rec_uapsd) + \
+ sizeof(struct sta_rec_amsdu) + \
+ sizeof(struct tlv) + \
+ MT7915_WTBL_UPDATE_MAX_SIZE)
+
+#define MT7915_WTBL_UPDATE_BA_SIZE (sizeof(struct wtbl_req_hdr) + \
+ sizeof(struct wtbl_ba))
+
+#define MT7915_BSS_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
+ sizeof(struct bss_info_omac) + \
+ sizeof(struct bss_info_basic) +\
+ sizeof(struct bss_info_rf_ch) +\
+ sizeof(struct bss_info_ra) + \
+ sizeof(struct bss_info_hw_amsdu) +\
+ sizeof(struct bss_info_he) + \
+ sizeof(struct bss_info_bmc_rate) +\
+ sizeof(struct bss_info_ext_bss) +\
+ sizeof(struct bss_info_sync_mode))
+
+#define MT7915_BEACON_UPDATE_SIZE (sizeof(struct sta_req_hdr) + \
+ sizeof(struct bss_info_bcn_csa) + \
+ sizeof(struct bss_info_bcn_bcc) + \
+ sizeof(struct bss_info_bcn_mbss) + \
+ sizeof(struct bss_info_bcn_cont))
+
+#define PHY_MODE_A BIT(0)
+#define PHY_MODE_B BIT(1)
+#define PHY_MODE_G BIT(2)
+#define PHY_MODE_GN BIT(3)
+#define PHY_MODE_AN BIT(4)
+#define PHY_MODE_AC BIT(5)
+#define PHY_MODE_AX_24G BIT(6)
+#define PHY_MODE_AX_5G BIT(7)
+#define PHY_MODE_AX_6G BIT(8)
+
+#define MODE_CCK BIT(0)
+#define MODE_OFDM BIT(1)
+#define MODE_HT BIT(2)
+#define MODE_VHT BIT(3)
+#define MODE_HE BIT(4)
+
+#define STA_CAP_WMM BIT(0)
+#define STA_CAP_SGI_20 BIT(4)
+#define STA_CAP_SGI_40 BIT(5)
+#define STA_CAP_TX_STBC BIT(6)
+#define STA_CAP_RX_STBC BIT(7)
+#define STA_CAP_VHT_SGI_80 BIT(16)
+#define STA_CAP_VHT_SGI_160 BIT(17)
+#define STA_CAP_VHT_TX_STBC BIT(18)
+#define STA_CAP_VHT_RX_STBC BIT(19)
+#define STA_CAP_VHT_LDPC BIT(23)
+#define STA_CAP_LDPC BIT(24)
+#define STA_CAP_HT BIT(26)
+#define STA_CAP_VHT BIT(27)
+#define STA_CAP_HE BIT(28)
+
+/* HE MAC */
+#define STA_REC_HE_CAP_HTC BIT(0)
+#define STA_REC_HE_CAP_BQR BIT(1)
+#define STA_REC_HE_CAP_BSR BIT(2)
+#define STA_REC_HE_CAP_OM BIT(3)
+#define STA_REC_HE_CAP_AMSDU_IN_AMPDU BIT(4)
+/* HE PHY */
+#define STA_REC_HE_CAP_DUAL_BAND BIT(5)
+#define STA_REC_HE_CAP_LDPC BIT(6)
+#define STA_REC_HE_CAP_TRIG_CQI_FK BIT(7)
+#define STA_REC_HE_CAP_PARTIAL_BW_EXT_RANGE BIT(8)
+/* STBC */
+#define STA_REC_HE_CAP_LE_EQ_80M_TX_STBC BIT(9)
+#define STA_REC_HE_CAP_LE_EQ_80M_RX_STBC BIT(10)
+#define STA_REC_HE_CAP_GT_80M_TX_STBC BIT(11)
+#define STA_REC_HE_CAP_GT_80M_RX_STBC BIT(12)
+/* GI */
+#define STA_REC_HE_CAP_SU_PPDU_1LTF_8US_GI BIT(13)
+#define STA_REC_HE_CAP_SU_MU_PPDU_4LTF_8US_GI BIT(14)
+#define STA_REC_HE_CAP_ER_SU_PPDU_1LTF_8US_GI BIT(15)
+#define STA_REC_HE_CAP_ER_SU_PPDU_4LTF_8US_GI BIT(16)
+#define STA_REC_HE_CAP_NDP_4LTF_3DOT2MS_GI BIT(17)
+/* 242 TONE */
+#define STA_REC_HE_CAP_BW20_RU242_SUPPORT BIT(18)
+#define STA_REC_HE_CAP_TX_1024QAM_UNDER_RU242 BIT(19)
+#define STA_REC_HE_CAP_RX_1024QAM_UNDER_RU242 BIT(20)
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
new file mode 100644
index 000000000..c84110e34
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -0,0 +1,466 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#ifndef __MT7915_H
+#define __MT7915_H
+
+#include <linux/interrupt.h>
+#include <linux/ktime.h>
+#include "../mt76.h"
+#include "regs.h"
+
+#define MT7915_MAX_INTERFACES 4
+#define MT7915_MAX_WMM_SETS 4
+#define MT7915_WTBL_SIZE 288
+#define MT7915_WTBL_RESERVED (MT7915_WTBL_SIZE - 1)
+#define MT7915_WTBL_STA (MT7915_WTBL_RESERVED - \
+ MT7915_MAX_INTERFACES)
+
+#define MT7915_WATCHDOG_TIME (HZ / 10)
+#define MT7915_RESET_TIMEOUT (30 * HZ)
+
+#define MT7915_TX_RING_SIZE 2048
+#define MT7915_TX_MCU_RING_SIZE 256
+#define MT7915_TX_FWDL_RING_SIZE 128
+
+#define MT7915_RX_RING_SIZE 1536
+#define MT7915_RX_MCU_RING_SIZE 512
+
+#define MT7915_FIRMWARE_WA "mediatek/mt7915_wa.bin"
+#define MT7915_FIRMWARE_WM "mediatek/mt7915_wm.bin"
+#define MT7915_ROM_PATCH "mediatek/mt7915_rom_patch.bin"
+
+#define MT7915_EEPROM_SIZE 3584
+#define MT7915_TOKEN_SIZE 8192
+
+#define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
+#define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
+#define MT7915_5G_RATE_DEFAULT 0x4b /* OFDM 6M */
+#define MT7915_2G_RATE_DEFAULT 0x0 /* CCK 1M */
+
+#define MT7915_SKU_RATE_NUM 161
+#define MT7915_SKU_MAX_DELTA_IDX MT7915_SKU_RATE_NUM
+#define MT7915_SKU_TABLE_SIZE (MT7915_SKU_RATE_NUM + 1)
+
+struct mt7915_vif;
+struct mt7915_sta;
+struct mt7915_dfs_pulse;
+struct mt7915_dfs_pattern;
+
+enum mt7915_txq_id {
+ MT7915_TXQ_FWDL = 16,
+ MT7915_TXQ_MCU_WM,
+ MT7915_TXQ_BAND0,
+ MT7915_TXQ_BAND1,
+ MT7915_TXQ_MCU_WA,
+};
+
+enum mt7915_rxq_id {
+ MT7915_RXQ_BAND0 = 0,
+ MT7915_RXQ_BAND1,
+ MT7915_RXQ_MCU_WM = 0,
+ MT7915_RXQ_MCU_WA,
+};
+
+struct mt7915_sta_stats {
+ struct rate_info prob_rate;
+ struct rate_info tx_rate;
+
+ unsigned long per;
+ unsigned long changed;
+ unsigned long jiffies;
+};
+
+struct mt7915_sta {
+ struct mt76_wcid wcid; /* must be first */
+
+ struct mt7915_vif *vif;
+
+ struct list_head stats_list;
+ struct list_head poll_list;
+ struct list_head rc_list;
+ u32 airtime_ac[8];
+
+ struct mt7915_sta_stats stats;
+
+ unsigned long ampdu_state;
+};
+
+struct mt7915_vif {
+ u16 idx;
+ u8 omac_idx;
+ u8 band_idx;
+ u8 wmm_idx;
+
+ struct mt7915_sta sta;
+ struct mt7915_phy *phy;
+
+ struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
+};
+
+struct mib_stats {
+ u32 ack_fail_cnt;
+ u32 fcs_err_cnt;
+ u32 rts_cnt;
+ u32 rts_retries_cnt;
+ u32 ba_miss_cnt;
+};
+
+struct mt7915_phy {
+ struct mt76_phy *mt76;
+ struct mt7915_dev *dev;
+
+ struct ieee80211_sband_iftype_data iftype[2][NUM_NL80211_IFTYPES];
+
+ u32 rxfilter;
+ u32 omac_mask;
+
+ u16 noise;
+ u16 chainmask;
+
+ s16 coverage_class;
+ u8 slottime;
+
+ u8 rdd_state;
+ int dfs_state;
+
+ __le32 rx_ampdu_ts;
+ u32 ampdu_ref;
+
+ struct mib_stats mib;
+ struct list_head stats_list;
+
+ struct delayed_work mac_work;
+ u8 mac_work_count;
+ u8 sta_work_count;
+};
+
+struct mt7915_dev {
+ union { /* must be first */
+ struct mt76_dev mt76;
+ struct mt76_phy mphy;
+ };
+
+ const struct mt76_bus_ops *bus_ops;
+ struct mt7915_phy phy;
+
+ u16 chainmask;
+
+ struct work_struct init_work;
+ struct work_struct rc_work;
+ struct work_struct reset_work;
+ wait_queue_head_t reset_wait;
+ u32 reset_state;
+
+ struct list_head sta_rc_list;
+ struct list_head sta_poll_list;
+ spinlock_t sta_poll_lock;
+
+ u32 hw_pattern;
+
+ spinlock_t token_lock;
+ struct idr token;
+
+ s8 **rate_power; /* TODO: use mt76_rate_power */
+
+ bool fw_debug;
+};
+
+enum {
+ HW_BSSID_0 = 0x0,
+ HW_BSSID_1,
+ HW_BSSID_2,
+ HW_BSSID_3,
+ HW_BSSID_MAX,
+ EXT_BSSID_START = 0x10,
+ EXT_BSSID_1,
+ EXT_BSSID_2,
+ EXT_BSSID_3,
+ EXT_BSSID_4,
+ EXT_BSSID_5,
+ EXT_BSSID_6,
+ EXT_BSSID_7,
+ EXT_BSSID_8,
+ EXT_BSSID_9,
+ EXT_BSSID_10,
+ EXT_BSSID_11,
+ EXT_BSSID_12,
+ EXT_BSSID_13,
+ EXT_BSSID_14,
+ EXT_BSSID_15,
+ EXT_BSSID_END
+};
+
+enum {
+ MT_LMAC_AC00,
+ MT_LMAC_AC01,
+ MT_LMAC_AC02,
+ MT_LMAC_AC03,
+ MT_LMAC_ALTX0 = 0x10,
+ MT_LMAC_BMC0,
+ MT_LMAC_BCN0,
+};
+
+enum {
+ MT_RX_SEL0,
+ MT_RX_SEL1,
+};
+
+enum mt7915_rdd_cmd {
+ RDD_STOP,
+ RDD_START,
+ RDD_DET_MODE,
+ RDD_RADAR_EMULATE,
+ RDD_START_TXQ = 20,
+ RDD_CAC_START = 50,
+ RDD_CAC_END,
+ RDD_NORMAL_START,
+ RDD_DISABLE_DFS_CAL,
+ RDD_PULSE_DBG,
+ RDD_READ_PULSE,
+ RDD_RESUME_BF,
+ RDD_IRQ_OFF,
+};
+
+enum {
+ RATE_CTRL_RU_INFO,
+ RATE_CTRL_FIXED_RATE_INFO,
+ RATE_CTRL_DUMP_INFO,
+ RATE_CTRL_MU_INFO,
+};
+
+static inline struct mt7915_phy *
+mt7915_hw_phy(struct ieee80211_hw *hw)
+{
+ struct mt76_phy *phy = hw->priv;
+
+ return phy->priv;
+}
+
+static inline struct mt7915_dev *
+mt7915_hw_dev(struct ieee80211_hw *hw)
+{
+ struct mt76_phy *phy = hw->priv;
+
+ return container_of(phy->dev, struct mt7915_dev, mt76);
+}
+
+static inline struct mt7915_phy *
+mt7915_ext_phy(struct mt7915_dev *dev)
+{
+ struct mt76_phy *phy = dev->mt76.phy2;
+
+ if (!phy)
+ return NULL;
+
+ return phy->priv;
+}
+
+static inline u8 mt7915_lmac_mapping(struct mt7915_dev *dev, u8 ac)
+{
+ /* LMAC uses the reverse order of mac80211 AC indexes */
+ return 3 - ac;
+}
+
+extern const struct ieee80211_ops mt7915_ops;
+extern struct pci_driver mt7915_pci_driver;
+
+u32 mt7915_reg_map(struct mt7915_dev *dev, u32 addr);
+
+int mt7915_register_device(struct mt7915_dev *dev);
+void mt7915_unregister_device(struct mt7915_dev *dev);
+int mt7915_register_ext_phy(struct mt7915_dev *dev);
+void mt7915_unregister_ext_phy(struct mt7915_dev *dev);
+int mt7915_eeprom_init(struct mt7915_dev *dev);
+u32 mt7915_eeprom_read(struct mt7915_dev *dev, u32 offset);
+int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
+ struct ieee80211_channel *chan,
+ u8 chain_idx);
+void mt7915_eeprom_init_sku(struct mt7915_dev *dev);
+int mt7915_dma_init(struct mt7915_dev *dev);
+void mt7915_dma_prefetch(struct mt7915_dev *dev);
+void mt7915_dma_cleanup(struct mt7915_dev *dev);
+int mt7915_mcu_init(struct mt7915_dev *dev);
+int mt7915_mcu_add_dev_info(struct mt7915_dev *dev,
+ struct ieee80211_vif *vif, bool enable);
+int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
+ struct ieee80211_vif *vif, int enable);
+int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable);
+int mt7915_mcu_add_sta_adv(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable);
+int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool add);
+int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool add);
+int mt7915_mcu_add_key(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct mt7915_sta *msta, struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd);
+int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ int enable);
+int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ bool enable);
+int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd);
+int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif);
+int mt7915_mcu_set_fixed_rate(struct mt7915_dev *dev,
+ struct ieee80211_sta *sta, u32 rate);
+int mt7915_mcu_set_eeprom(struct mt7915_dev *dev);
+int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset);
+int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, bool enable,
+ bool hdr_trans);
+int mt7915_mcu_set_scs(struct mt7915_dev *dev, u8 band, bool enable);
+int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band);
+int mt7915_mcu_set_rts_thresh(struct mt7915_phy *phy, u32 val);
+int mt7915_mcu_set_pm(struct mt7915_dev *dev, int band, int enter);
+int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable);
+int mt7915_mcu_set_sku(struct mt7915_phy *phy);
+int mt7915_mcu_set_txbf_type(struct mt7915_dev *dev);
+int mt7915_mcu_set_txbf_sounding(struct mt7915_dev *dev);
+int mt7915_mcu_set_fcc5_lpn(struct mt7915_dev *dev, int val);
+int mt7915_mcu_set_pulse_th(struct mt7915_dev *dev,
+ const struct mt7915_dfs_pulse *pulse);
+int mt7915_mcu_set_radar_th(struct mt7915_dev *dev, int index,
+ const struct mt7915_dfs_pattern *pattern);
+int mt7915_mcu_get_rate_info(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx);
+int mt7915_mcu_get_temperature(struct mt7915_dev *dev, int index);
+int mt7915_mcu_rdd_cmd(struct mt7915_dev *dev, enum mt7915_rdd_cmd cmd,
+ u8 index, u8 rx_sel, u8 val);
+int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 ctrl);
+int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level);
+void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb);
+void mt7915_mcu_exit(struct mt7915_dev *dev);
+
+static inline bool is_mt7915(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7915;
+}
+
+static inline void mt7915_irq_enable(struct mt7915_dev *dev, u32 mask)
+{
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask);
+}
+
+static inline void mt7915_irq_disable(struct mt7915_dev *dev, u32 mask)
+{
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
+}
+
+static inline u32
+mt7915_reg_map_l1(struct mt7915_dev *dev, u32 addr)
+{
+ u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr);
+ u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr);
+
+ mt76_rmw_field(dev, MT_HIF_REMAP_L1, MT_HIF_REMAP_L1_MASK, base);
+ /* use read to push write */
+ mt76_rr(dev, MT_HIF_REMAP_L1);
+
+ return MT_HIF_REMAP_BASE_L1 + offset;
+}
+
+static inline u32
+mt7915_l1_rr(struct mt7915_dev *dev, u32 addr)
+{
+ return mt76_rr(dev, mt7915_reg_map_l1(dev, addr));
+}
+
+static inline void
+mt7915_l1_wr(struct mt7915_dev *dev, u32 addr, u32 val)
+{
+ mt76_wr(dev, mt7915_reg_map_l1(dev, addr), val);
+}
+
+static inline u32
+mt7915_l1_rmw(struct mt7915_dev *dev, u32 addr, u32 mask, u32 val)
+{
+ val |= mt7915_l1_rr(dev, addr) & ~mask;
+ mt7915_l1_wr(dev, addr, val);
+
+ return val;
+}
+
+#define mt7915_l1_set(dev, addr, val) mt7915_l1_rmw(dev, addr, 0, val)
+#define mt7915_l1_clear(dev, addr, val) mt7915_l1_rmw(dev, addr, val, 0)
+
+static inline u32
+mt7915_reg_map_l2(struct mt7915_dev *dev, u32 addr)
+{
+ u32 offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr);
+ u32 base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr);
+
+ mt76_rmw_field(dev, MT_HIF_REMAP_L2, MT_HIF_REMAP_L2_MASK, base);
+ /* use read to push write */
+ mt76_rr(dev, MT_HIF_REMAP_L2);
+
+ return MT_HIF_REMAP_BASE_L2 + offset;
+}
+
+static inline u32
+mt7915_l2_rr(struct mt7915_dev *dev, u32 addr)
+{
+ return mt76_rr(dev, mt7915_reg_map_l2(dev, addr));
+}
+
+static inline void
+mt7915_l2_wr(struct mt7915_dev *dev, u32 addr, u32 val)
+{
+ mt76_wr(dev, mt7915_reg_map_l2(dev, addr), val);
+}
+
+static inline u32
+mt7915_l2_rmw(struct mt7915_dev *dev, u32 addr, u32 mask, u32 val)
+{
+ val |= mt7915_l2_rr(dev, addr) & ~mask;
+ mt7915_l2_wr(dev, addr, val);
+
+ return val;
+}
+
+#define mt7915_l2_set(dev, addr, val) mt7915_l2_rmw(dev, addr, 0, val)
+#define mt7915_l2_clear(dev, addr, val) mt7915_l2_rmw(dev, addr, val, 0)
+
+bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask);
+void mt7915_mac_reset_counters(struct mt7915_phy *phy);
+void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy);
+void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_key_conf *key, bool beacon);
+void mt7915_mac_set_timing(struct mt7915_phy *phy);
+int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb);
+void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb);
+int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt7915_mac_work(struct work_struct *work);
+void mt7915_mac_reset_work(struct work_struct *work);
+void mt7915_mac_sta_rc_work(struct work_struct *work);
+int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info);
+void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
+void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb);
+void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
+void mt7915_stats_work(struct work_struct *work);
+void mt7915_txp_skb_unmap(struct mt76_dev *dev,
+ struct mt76_txwi_cache *txwi);
+int mt76_dfs_start_rdd(struct mt7915_dev *dev, bool force);
+int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy);
+void mt7915_set_stream_he_caps(struct mt7915_phy *phy);
+void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy);
+void mt7915_update_channel(struct mt76_dev *mdev);
+int mt7915_init_debugfs(struct mt7915_dev *dev);
+#ifdef CONFIG_MAC80211_DEBUGFS
+void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir);
+#endif
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
new file mode 100644
index 000000000..3ac5bbb94
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc.
+ *
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mt7915.h"
+#include "mac.h"
+#include "../trace.h"
+
+static const struct pci_device_id mt7915_pci_device_table[] = {
+ { PCI_DEVICE(0x14c3, 0x7915) },
+ { },
+};
+
+static void
+mt7915_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+
+ mt7915_irq_enable(dev, MT_INT_RX_DONE(q));
+}
+
+/* TODO: support 2/4/6/8 MSI-X vectors */
+static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
+{
+ struct mt7915_dev *dev = dev_instance;
+ u32 intr, mask;
+
+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ intr &= dev->mt76.mmio.irqmask;
+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+ return IRQ_NONE;
+
+ trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
+
+ mask = intr & MT_INT_RX_DONE_ALL;
+ if (intr & MT_INT_TX_DONE_MCU)
+ mask |= MT_INT_TX_DONE_MCU;
+
+ mt7915_irq_disable(dev, mask);
+
+ if (intr & MT_INT_TX_DONE_MCU)
+ napi_schedule(&dev->mt76.tx_napi);
+
+ if (intr & MT_INT_RX_DONE_DATA)
+ napi_schedule(&dev->mt76.napi[0]);
+
+ if (intr & MT_INT_RX_DONE_WM)
+ napi_schedule(&dev->mt76.napi[1]);
+
+ if (intr & MT_INT_RX_DONE_WA)
+ napi_schedule(&dev->mt76.napi[2]);
+
+ if (intr & MT_INT_MCU_CMD) {
+ u32 val = mt76_rr(dev, MT_MCU_CMD);
+
+ mt76_wr(dev, MT_MCU_CMD, val);
+ if (val & MT_MCU_CMD_ERROR_MASK) {
+ dev->reset_state = val;
+ ieee80211_queue_work(mt76_hw(dev), &dev->reset_work);
+ wake_up(&dev->reset_wait);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int
+mt7915_alloc_device(struct pci_dev *pdev, struct mt7915_dev *dev)
+{
+#define NUM_BANDS 2
+ int i;
+ s8 **sku;
+
+ sku = devm_kzalloc(&pdev->dev, NUM_BANDS * sizeof(*sku), GFP_KERNEL);
+ if (!sku)
+ return -ENOMEM;
+
+ for (i = 0; i < NUM_BANDS; i++) {
+ sku[i] = devm_kzalloc(&pdev->dev, MT7915_SKU_TABLE_SIZE *
+ sizeof(**sku), GFP_KERNEL);
+ if (!sku[i])
+ return -ENOMEM;
+ }
+ dev->rate_power = sku;
+
+ return 0;
+}
+
+static int mt7915_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ /* txwi_size = txd size + txp size */
+ .txwi_size = MT_TXD_SIZE + sizeof(struct mt7915_txp),
+ .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ |
+ MT_DRV_AMSDU_OFFLOAD,
+ .survey_flags = SURVEY_INFO_TIME_TX |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_BSS_RX,
+ .tx_prepare_skb = mt7915_tx_prepare_skb,
+ .tx_complete_skb = mt7915_tx_complete_skb,
+ .rx_skb = mt7915_queue_rx_skb,
+ .rx_poll_complete = mt7915_rx_poll_complete,
+ .sta_ps = mt7915_sta_ps,
+ .sta_add = mt7915_mac_sta_add,
+ .sta_remove = mt7915_mac_sta_remove,
+ .update_survey = mt7915_update_channel,
+ };
+ struct mt7915_dev *dev;
+ struct mt76_dev *mdev;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7915_ops,
+ &drv_ops);
+ if (!mdev)
+ return -ENOMEM;
+
+ dev = container_of(mdev, struct mt7915_dev, mt76);
+ ret = mt7915_alloc_device(pdev, dev);
+ if (ret)
+ goto error;
+
+ mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
+ mdev->rev = (mt7915_l1_rr(dev, MT_HW_CHIPID) << 16) |
+ (mt7915_l1_rr(dev, MT_HW_REV) & 0xff);
+ dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
+ /* master switch of PCIe tnterrupt enable */
+ mt7915_l1_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
+
+ ret = devm_request_irq(mdev->dev, pdev->irq, mt7915_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+ goto error;
+
+ ret = mt7915_register_device(dev);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ mt76_free_device(&dev->mt76);
+
+ return ret;
+}
+
+static void mt7915_pci_remove(struct pci_dev *pdev)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+
+ mt7915_unregister_device(dev);
+}
+
+struct pci_driver mt7915_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt7915_pci_device_table,
+ .probe = mt7915_pci_probe,
+ .remove = mt7915_pci_remove,
+};
+
+module_pci_driver(mt7915_pci_driver);
+
+MODULE_DEVICE_TABLE(pci, mt7915_pci_device_table);
+MODULE_FIRMWARE(MT7915_FIRMWARE_WA);
+MODULE_FIRMWARE(MT7915_FIRMWARE_WM);
+MODULE_FIRMWARE(MT7915_ROM_PATCH);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
new file mode 100644
index 000000000..64327153b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
@@ -0,0 +1,395 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#ifndef __MT7915_REGS_H
+#define __MT7915_REGS_H
+
+/* MCU WFDMA1 */
+#define MT_MCU_WFDMA1_BASE 0x3000
+#define MT_MCU_WFDMA1(ofs) (MT_MCU_WFDMA1_BASE + (ofs))
+
+#define MT_MCU_INT_EVENT MT_MCU_WFDMA1(0x108)
+#define MT_MCU_INT_EVENT_DMA_STOPPED BIT(0)
+#define MT_MCU_INT_EVENT_DMA_INIT BIT(1)
+#define MT_MCU_INT_EVENT_SER_TRIGGER BIT(2)
+#define MT_MCU_INT_EVENT_RESET_DONE BIT(3)
+
+#define MT_PLE_BASE 0x8000
+#define MT_PLE(ofs) (MT_PLE_BASE + (ofs))
+
+#define MT_PLE_FL_Q0_CTRL MT_PLE(0x1b0)
+#define MT_PLE_FL_Q1_CTRL MT_PLE(0x1b4)
+#define MT_PLE_FL_Q2_CTRL MT_PLE(0x1b8)
+#define MT_PLE_FL_Q3_CTRL MT_PLE(0x1bc)
+
+#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x300 + 0x10 * (ac) + \
+ ((n) << 2))
+#define MT_PLE_AMSDU_PACK_MSDU_CNT(n) MT_PLE(0x10e0 + ((n) << 2))
+
+#define MT_MDP_BASE 0xf000
+#define MT_MDP(ofs) (MT_MDP_BASE + (ofs))
+
+#define MT_MDP_DCR0 MT_MDP(0x000)
+#define MT_MDP_DCR0_DAMSDU_EN BIT(15)
+
+#define MT_MDP_DCR1 MT_MDP(0x004)
+#define MT_MDP_DCR1_MAX_RX_LEN GENMASK(15, 3)
+
+#define MT_MDP_BNRCFR0(_band) MT_MDP(0x070 + ((_band) << 8))
+#define MT_MDP_RCFR0_MCU_RX_MGMT GENMASK(5, 4)
+#define MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR GENMASK(7, 6)
+#define MT_MDP_RCFR0_MCU_RX_CTL_BAR GENMASK(9, 8)
+
+#define MT_MDP_BNRCFR1(_band) MT_MDP(0x074 + ((_band) << 8))
+#define MT_MDP_RCFR1_MCU_RX_BYPASS GENMASK(23, 22)
+#define MT_MDP_RCFR1_RX_DROPPED_UCAST GENMASK(28, 27)
+#define MT_MDP_RCFR1_RX_DROPPED_MCAST GENMASK(30, 29)
+#define MT_MDP_TO_HIF 0
+#define MT_MDP_TO_WM 1
+
+/* TMAC: band 0(0x21000), band 1(0xa1000) */
+#define MT_WF_TMAC_BASE(_band) ((_band) ? 0xa1000 : 0x21000)
+#define MT_WF_TMAC(_band, ofs) (MT_WF_TMAC_BASE(_band) + (ofs))
+
+#define MT_TMAC_CDTR(_band) MT_WF_TMAC(_band, 0x090)
+#define MT_TMAC_ODTR(_band) MT_WF_TMAC(_band, 0x094)
+#define MT_TIMEOUT_VAL_PLCP GENMASK(15, 0)
+#define MT_TIMEOUT_VAL_CCA GENMASK(31, 16)
+
+#define MT_TMAC_ICR0(_band) MT_WF_TMAC(_band, 0x0a4)
+#define MT_IFS_EIFS GENMASK(8, 0)
+#define MT_IFS_RIFS GENMASK(14, 10)
+#define MT_IFS_SIFS GENMASK(22, 16)
+#define MT_IFS_SLOT GENMASK(30, 24)
+
+#define MT_TMAC_CTCR0(_band) MT_WF_TMAC(_band, 0x0f4)
+#define MT_TMAC_CTCR0_INS_DDLMT_REFTIME GENMASK(5, 0)
+#define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17)
+#define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN BIT(18)
+
+/* DMA Band 0 */
+#define MT_WF_DMA_BASE 0x21e00
+#define MT_WF_DMA(ofs) (MT_WF_DMA_BASE + (ofs))
+
+#define MT_DMA_DCR0 MT_WF_DMA(0x000)
+#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 3)
+#define MT_DMA_DCR0_RXD_G5_EN BIT(23)
+
+/* ETBF: band 0(0x24000), band 1(0xa4000) */
+#define MT_WF_ETBF_BASE(_band) ((_band) ? 0xa4000 : 0x24000)
+#define MT_WF_ETBF(_band, ofs) (MT_WF_ETBF_BASE(_band) + (ofs))
+
+#define MT_ETBF_TX_NDP_BFRP(_band) MT_WF_ETBF(_band, 0x040)
+#define MT_ETBF_TX_FB_CPL GENMASK(31, 16)
+#define MT_ETBF_TX_FB_TRI GENMASK(15, 0)
+
+#define MT_ETBF_TX_APP_CNT(_band) MT_WF_ETBF(_band, 0x0f0)
+#define MT_ETBF_TX_IBF_CNT GENMASK(31, 16)
+#define MT_ETBF_TX_EBF_CNT GENMASK(15, 0)
+
+#define MT_ETBF_RX_FB_CNT(_band) MT_WF_ETBF(_band, 0x0f8)
+#define MT_ETBF_RX_FB_ALL GENMASK(31, 24)
+#define MT_ETBF_RX_FB_HE GENMASK(23, 16)
+#define MT_ETBF_RX_FB_VHT GENMASK(15, 8)
+#define MT_ETBF_RX_FB_HT GENMASK(7, 0)
+
+/* LPON: band 0(0x24200), band 1(0xa4200) */
+#define MT_WF_LPON_BASE(_band) ((_band) ? 0xa4200 : 0x24200)
+#define MT_WF_LPON(_band, ofs) (MT_WF_LPON_BASE(_band) + (ofs))
+
+#define MT_LPON_UTTR0(_band) MT_WF_LPON(_band, 0x080)
+#define MT_LPON_UTTR1(_band) MT_WF_LPON(_band, 0x084)
+
+#define MT_LPON_TCR(_band, n) MT_WF_LPON(_band, 0x0a8 + (n) * 4)
+#define MT_LPON_TCR_SW_MODE GENMASK(1, 0)
+#define MT_LPON_TCR_SW_WRITE BIT(0)
+
+/* MIB: band 0(0x24800), band 1(0xa4800) */
+#define MT_WF_MIB_BASE(_band) ((_band) ? 0xa4800 : 0x24800)
+#define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE(_band) + (ofs))
+
+#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, 0x014)
+#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(15, 0)
+
+#define MT_MIB_SDR9(_band) MT_WF_MIB(_band, 0x02c)
+#define MT_MIB_SDR9_BUSY_MASK GENMASK(23, 0)
+
+#define MT_MIB_SDR16(_band) MT_WF_MIB(_band, 0x048)
+#define MT_MIB_SDR16_BUSY_MASK GENMASK(23, 0)
+
+#define MT_MIB_SDR34(_band) MT_WF_MIB(_band, 0x090)
+#define MT_MIB_MU_BF_TX_CNT GENMASK(15, 0)
+
+#define MT_MIB_SDR36(_band) MT_WF_MIB(_band, 0x098)
+#define MT_MIB_SDR36_TXTIME_MASK GENMASK(23, 0)
+#define MT_MIB_SDR37(_band) MT_WF_MIB(_band, 0x09c)
+#define MT_MIB_SDR37_RXTIME_MASK GENMASK(23, 0)
+
+#define MT_MIB_DR8(_band) MT_WF_MIB(_band, 0x0c0)
+#define MT_MIB_DR9(_band) MT_WF_MIB(_band, 0x0c4)
+#define MT_MIB_DR11(_band) MT_WF_MIB(_band, 0x0cc)
+
+#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(_band, 0x100 + ((n) << 4))
+#define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16)
+#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0)
+
+#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(_band, 0x104 + ((n) << 4))
+#define MT_MIB_BA_MISS_COUNT_MASK GENMASK(15, 0)
+#define MT_MIB_ACK_FAIL_COUNT_MASK GENMASK(31, 16)
+
+#define MT_MIB_MB_SDR2(_band, n) MT_WF_MIB(_band, 0x108 + ((n) << 4))
+#define MT_MIB_FRAME_RETRIES_COUNT_MASK GENMASK(15, 0)
+
+#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, 0x0a8 + ((n) << 2))
+#define MT_TX_AGG_CNT2(_band, n) MT_WF_MIB(_band, 0x164 + ((n) << 2))
+#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, 0x4b8 + ((n) << 2))
+#define MT_MIB_ARNCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(7, 0))
+
+#define MT_WTBLON_TOP_BASE 0x34000
+#define MT_WTBLON_TOP(ofs) (MT_WTBLON_TOP_BASE + (ofs))
+#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(0x0)
+#define MT_WTBLON_TOP_WDUCR_GROUP GENMASK(2, 0)
+
+#define MT_WTBL_UPDATE MT_WTBLON_TOP(0x030)
+#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(9, 0)
+#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12)
+#define MT_WTBL_UPDATE_BUSY BIT(31)
+
+#define MT_WTBL_BASE 0x38000
+#define MT_WTBL_LMAC_ID GENMASK(14, 8)
+#define MT_WTBL_LMAC_DW GENMASK(7, 2)
+#define MT_WTBL_LMAC_OFFS(_id, _dw) (MT_WTBL_BASE | \
+ FIELD_PREP(MT_WTBL_LMAC_ID, _id) | \
+ FIELD_PREP(MT_WTBL_LMAC_DW, _dw))
+
+/* AGG: band 0(0x20800), band 1(0xa0800) */
+#define MT_WF_AGG_BASE(_band) ((_band) ? 0xa0800 : 0x20800)
+#define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs))
+
+#define MT_AGG_ACR0(_band) MT_WF_AGG(_band, 0x084)
+#define MT_AGG_ACR_CFEND_RATE GENMASK(13, 0)
+#define MT_AGG_ACR_BAR_RATE GENMASK(29, 16)
+
+/* ARB: band 0(0x20c00), band 1(0xa0c00) */
+#define MT_WF_ARB_BASE(_band) ((_band) ? 0xa0c00 : 0x20c00)
+#define MT_WF_ARB(_band, ofs) (MT_WF_ARB_BASE(_band) + (ofs))
+
+#define MT_ARB_SCR(_band) MT_WF_ARB(_band, 0x080)
+#define MT_ARB_SCR_TX_DISABLE BIT(8)
+#define MT_ARB_SCR_RX_DISABLE BIT(9)
+
+/* RMAC: band 0(0x21400), band 1(0xa1400) */
+#define MT_WF_RMAC_BASE(_band) ((_band) ? 0xa1400 : 0x21400)
+#define MT_WF_RMAC(_band, ofs) (MT_WF_RMAC_BASE(_band) + (ofs))
+
+#define MT_WF_RFCR(_band) MT_WF_RMAC(_band, 0x000)
+#define MT_WF_RFCR_DROP_STBC_MULTI BIT(0)
+#define MT_WF_RFCR_DROP_FCSFAIL BIT(1)
+#define MT_WF_RFCR_DROP_VERSION BIT(3)
+#define MT_WF_RFCR_DROP_PROBEREQ BIT(4)
+#define MT_WF_RFCR_DROP_MCAST BIT(5)
+#define MT_WF_RFCR_DROP_BCAST BIT(6)
+#define MT_WF_RFCR_DROP_MCAST_FILTERED BIT(7)
+#define MT_WF_RFCR_DROP_A3_MAC BIT(8)
+#define MT_WF_RFCR_DROP_A3_BSSID BIT(9)
+#define MT_WF_RFCR_DROP_A2_BSSID BIT(10)
+#define MT_WF_RFCR_DROP_OTHER_BEACON BIT(11)
+#define MT_WF_RFCR_DROP_FRAME_REPORT BIT(12)
+#define MT_WF_RFCR_DROP_CTL_RSV BIT(13)
+#define MT_WF_RFCR_DROP_CTS BIT(14)
+#define MT_WF_RFCR_DROP_RTS BIT(15)
+#define MT_WF_RFCR_DROP_DUPLICATE BIT(16)
+#define MT_WF_RFCR_DROP_OTHER_BSS BIT(17)
+#define MT_WF_RFCR_DROP_OTHER_UC BIT(18)
+#define MT_WF_RFCR_DROP_OTHER_TIM BIT(19)
+#define MT_WF_RFCR_DROP_NDPA BIT(20)
+#define MT_WF_RFCR_DROP_UNWANTED_CTL BIT(21)
+
+#define MT_WF_RFCR1(_band) MT_WF_RMAC(_band, 0x004)
+#define MT_WF_RFCR1_DROP_ACK BIT(4)
+#define MT_WF_RFCR1_DROP_BF_POLL BIT(5)
+#define MT_WF_RFCR1_DROP_BA BIT(6)
+#define MT_WF_RFCR1_DROP_CFEND BIT(7)
+#define MT_WF_RFCR1_DROP_CFACK BIT(8)
+
+#define MT_WF_RMAC_MIB_TIME0(_band) MT_WF_RMAC(_band, 0x03c4)
+#define MT_WF_RMAC_MIB_RXTIME_CLR BIT(31)
+#define MT_WF_RMAC_MIB_RXTIME_EN BIT(30)
+
+#define MT_WF_RMAC_MIB_AIRTIME14(_band) MT_WF_RMAC(_band, 0x03b8)
+#define MT_MIB_OBSSTIME_MASK GENMASK(23, 0)
+#define MT_WF_RMAC_MIB_AIRTIME0(_band) MT_WF_RMAC(_band, 0x0380)
+
+/* WFDMA0 */
+#define MT_WFDMA0_BASE 0xd4000
+#define MT_WFDMA0(ofs) (MT_WFDMA0_BASE + (ofs))
+
+#define MT_WFDMA0_RST MT_WFDMA0(0x100)
+#define MT_WFDMA0_RST_LOGIC_RST BIT(4)
+#define MT_WFDMA0_RST_DMASHDL_ALL_RST BIT(5)
+
+#define MT_WFDMA0_BUSY_ENA MT_WFDMA0(0x13c)
+#define MT_WFDMA0_BUSY_ENA_TX_FIFO0 BIT(0)
+#define MT_WFDMA0_BUSY_ENA_TX_FIFO1 BIT(1)
+#define MT_WFDMA0_BUSY_ENA_RX_FIFO BIT(2)
+
+#define MT_WFDMA0_GLO_CFG MT_WFDMA0(0x208)
+#define MT_WFDMA0_GLO_CFG_TX_DMA_EN BIT(0)
+#define MT_WFDMA0_GLO_CFG_RX_DMA_EN BIT(2)
+
+#define MT_WFDMA0_RST_DTX_PTR MT_WFDMA0(0x20c)
+#define MT_WFDMA0_PRI_DLY_INT_CFG0 MT_WFDMA0(0x2f0)
+
+#define MT_RX_DATA_RING_BASE MT_WFDMA0(0x500)
+
+#define MT_WFDMA0_RX_RING0_EXT_CTRL MT_WFDMA0(0x680)
+#define MT_WFDMA0_RX_RING1_EXT_CTRL MT_WFDMA0(0x684)
+#define MT_WFDMA0_RX_RING2_EXT_CTRL MT_WFDMA0(0x688)
+
+/* WFDMA1 */
+#define MT_WFDMA1_BASE 0xd5000
+#define MT_WFDMA1(ofs) (MT_WFDMA1_BASE + (ofs))
+
+#define MT_WFDMA1_RST MT_WFDMA1(0x100)
+#define MT_WFDMA1_RST_LOGIC_RST BIT(4)
+#define MT_WFDMA1_RST_DMASHDL_ALL_RST BIT(5)
+
+#define MT_WFDMA1_BUSY_ENA MT_WFDMA1(0x13c)
+#define MT_WFDMA1_BUSY_ENA_TX_FIFO0 BIT(0)
+#define MT_WFDMA1_BUSY_ENA_TX_FIFO1 BIT(1)
+#define MT_WFDMA1_BUSY_ENA_RX_FIFO BIT(2)
+
+#define MT_MCU_CMD MT_WFDMA1(0x1f0)
+#define MT_MCU_CMD_STOP_DMA_FW_RELOAD BIT(1)
+#define MT_MCU_CMD_STOP_DMA BIT(2)
+#define MT_MCU_CMD_RESET_DONE BIT(3)
+#define MT_MCU_CMD_RECOVERY_DONE BIT(4)
+#define MT_MCU_CMD_NORMAL_STATE BIT(5)
+#define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1)
+
+#define MT_WFDMA1_GLO_CFG MT_WFDMA1(0x208)
+#define MT_WFDMA1_GLO_CFG_TX_DMA_EN BIT(0)
+#define MT_WFDMA1_GLO_CFG_RX_DMA_EN BIT(2)
+#define MT_WFDMA1_GLO_CFG_OMIT_TX_INFO BIT(28)
+#define MT_WFDMA1_GLO_CFG_OMIT_RX_INFO BIT(27)
+
+#define MT_WFDMA1_RST_DTX_PTR MT_WFDMA1(0x20c)
+#define MT_WFDMA1_PRI_DLY_INT_CFG0 MT_WFDMA1(0x2f0)
+
+#define MT_TX_RING_BASE MT_WFDMA1(0x300)
+#define MT_RX_EVENT_RING_BASE MT_WFDMA1(0x500)
+
+#define MT_WFDMA1_TX_RING0_EXT_CTRL MT_WFDMA1(0x600)
+#define MT_WFDMA1_TX_RING1_EXT_CTRL MT_WFDMA1(0x604)
+#define MT_WFDMA1_TX_RING2_EXT_CTRL MT_WFDMA1(0x608)
+#define MT_WFDMA1_TX_RING3_EXT_CTRL MT_WFDMA1(0x60c)
+#define MT_WFDMA1_TX_RING4_EXT_CTRL MT_WFDMA1(0x610)
+#define MT_WFDMA1_TX_RING5_EXT_CTRL MT_WFDMA1(0x614)
+#define MT_WFDMA1_TX_RING6_EXT_CTRL MT_WFDMA1(0x618)
+#define MT_WFDMA1_TX_RING7_EXT_CTRL MT_WFDMA1(0x61c)
+
+#define MT_WFDMA1_TX_RING16_EXT_CTRL MT_WFDMA1(0x640)
+#define MT_WFDMA1_TX_RING17_EXT_CTRL MT_WFDMA1(0x644)
+#define MT_WFDMA1_TX_RING18_EXT_CTRL MT_WFDMA1(0x648)
+#define MT_WFDMA1_TX_RING19_EXT_CTRL MT_WFDMA1(0x64c)
+#define MT_WFDMA1_TX_RING20_EXT_CTRL MT_WFDMA1(0x650)
+#define MT_WFDMA1_TX_RING21_EXT_CTRL MT_WFDMA1(0x654)
+#define MT_WFDMA1_TX_RING22_EXT_CTRL MT_WFDMA1(0x658)
+#define MT_WFDMA1_TX_RING23_EXT_CTRL MT_WFDMA1(0x65c)
+
+#define MT_WFDMA1_RX_RING0_EXT_CTRL MT_WFDMA1(0x680)
+#define MT_WFDMA1_RX_RING1_EXT_CTRL MT_WFDMA1(0x684)
+#define MT_WFDMA1_RX_RING2_EXT_CTRL MT_WFDMA1(0x688)
+#define MT_WFDMA1_RX_RING3_EXT_CTRL MT_WFDMA1(0x68c)
+
+/* WFDMA CSR */
+#define MT_WFDMA_EXT_CSR_BASE 0xd7000
+#define MT_WFDMA_EXT_CSR(ofs) (MT_WFDMA_EXT_CSR_BASE + (ofs))
+
+#define MT_INT_SOURCE_CSR MT_WFDMA_EXT_CSR(0x10)
+#define MT_INT_MASK_CSR MT_WFDMA_EXT_CSR(0x14)
+#define MT_INT_RX_DONE_DATA BIT(16)
+#define MT_INT_RX_DONE_WM BIT(0)
+#define MT_INT_RX_DONE_WA BIT(1)
+#define MT_INT_RX_DONE(_n) ((_n) ? BIT((_n) - 1) : BIT(16))
+#define MT_INT_RX_DONE_ALL (BIT(0) | BIT(1) | BIT(16))
+#define MT_INT_TX_DONE_MCU_WA BIT(15)
+#define MT_INT_TX_DONE_FWDL BIT(26)
+#define MT_INT_TX_DONE_MCU_WM BIT(27)
+#define MT_INT_TX_DONE_BAND0 BIT(30)
+#define MT_INT_TX_DONE_BAND1 BIT(31)
+#define MT_INT_MCU_CMD BIT(29)
+
+#define MT_INT_TX_DONE_MCU (MT_INT_TX_DONE_MCU_WA | \
+ MT_INT_TX_DONE_MCU_WM | \
+ MT_INT_TX_DONE_FWDL)
+
+#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR(0x44)
+#define MT_WFDMA_EXT_CSR_HIF_MISC_BUSY BIT(0)
+
+/* WFDMA0 PCIE1 */
+#define MT_WFDMA0_PCIE1_BASE 0xd8000
+#define MT_WFDMA0_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
+
+#define MT_WFDMA0_PCIE1_BUSY_ENA MT_WFDMA0_PCIE1(0x13c)
+#define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 BIT(0)
+#define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 BIT(1)
+#define MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO BIT(2)
+
+/* WFDMA1 PCIE1 */
+#define MT_WFDMA1_PCIE1_BASE 0xd9000
+#define MT_WFDMA1_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
+
+#define MT_WFDMA1_PCIE1_BUSY_ENA MT_WFDMA1_PCIE1(0x13c)
+#define MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 BIT(0)
+#define MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 BIT(1)
+#define MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO BIT(2)
+
+#define MT_INFRA_CFG_BASE 0xf1000
+#define MT_INFRA(ofs) (MT_INFRA_CFG_BASE + (ofs))
+
+#define MT_HIF_REMAP_L1 MT_INFRA(0x1ac)
+#define MT_HIF_REMAP_L1_MASK GENMASK(15, 0)
+#define MT_HIF_REMAP_L1_OFFSET GENMASK(15, 0)
+#define MT_HIF_REMAP_L1_BASE GENMASK(31, 16)
+#define MT_HIF_REMAP_BASE_L1 0xe0000
+
+#define MT_HIF_REMAP_L2 MT_INFRA(0x1b0)
+#define MT_HIF_REMAP_L2_MASK GENMASK(19, 0)
+#define MT_HIF_REMAP_L2_OFFSET GENMASK(11, 0)
+#define MT_HIF_REMAP_L2_BASE GENMASK(31, 12)
+#define MT_HIF_REMAP_BASE_L2 0x00000
+
+#define MT_SWDEF_BASE 0x41f200
+#define MT_SWDEF(ofs) (MT_SWDEF_BASE + (ofs))
+#define MT_SWDEF_MODE MT_SWDEF(0x3c)
+#define MT_SWDEF_NORMAL_MODE 0
+#define MT_SWDEF_ICAP_MODE 1
+#define MT_SWDEF_SPECTRUM_MODE 2
+
+#define MT_TOP_BASE 0x18060000
+#define MT_TOP(ofs) (MT_TOP_BASE + (ofs))
+
+#define MT_TOP_LPCR_HOST_BAND0 MT_TOP(0x10)
+#define MT_TOP_LPCR_HOST_FW_OWN BIT(0)
+#define MT_TOP_LPCR_HOST_DRV_OWN BIT(1)
+
+#define MT_TOP_MISC MT_TOP(0xf0)
+#define MT_TOP_MISC_FW_STATE GENMASK(2, 0)
+
+#define MT_HW_BOUND 0x70010020
+#define MT_HW_CHIPID 0x70010200
+#define MT_HW_REV 0x70010204
+
+#define MT_PCIE_MAC_BASE 0x74030000
+#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs))
+#define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188)
+
+/* PHY: band 0(0x83080000), band 1(0x83090000) */
+#define MT_WF_PHY_BASE 0x83080000
+#define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs))
+
+#define MT_WF_PHY_RX_CTRL1(_phy) MT_WF_PHY(0x2004 + ((_phy) << 16))
+#define MT_WF_PHY_RX_CTRL1_STSCNT_EN GENMASK(11, 9)
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/pci.c b/drivers/net/wireless/mediatek/mt76/pci.c
new file mode 100644
index 000000000..4c1c159fb
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/pci.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2019 Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include "mt76.h"
+#include <linux/pci.h>
+
+void mt76_pci_disable_aspm(struct pci_dev *pdev)
+{
+ struct pci_dev *parent = pdev->bus->self;
+ u16 aspm_conf, parent_aspm_conf = 0;
+
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &aspm_conf);
+ aspm_conf &= PCI_EXP_LNKCTL_ASPMC;
+ if (parent) {
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL,
+ &parent_aspm_conf);
+ parent_aspm_conf &= PCI_EXP_LNKCTL_ASPMC;
+ }
+
+ if (!aspm_conf && (!parent || !parent_aspm_conf)) {
+ /* aspm already disabled */
+ return;
+ }
+
+ dev_info(&pdev->dev, "disabling ASPM %s %s\n",
+ (aspm_conf & PCI_EXP_LNKCTL_ASPM_L0S) ? "L0s" : "",
+ (aspm_conf & PCI_EXP_LNKCTL_ASPM_L1) ? "L1" : "");
+
+ if (IS_ENABLED(CONFIG_PCIEASPM)) {
+ int err;
+
+ err = pci_disable_link_state(pdev, aspm_conf);
+ if (!err)
+ return;
+ }
+
+ /* both device and parent should have the same ASPM setting.
+ * disable ASPM in downstream component first and then upstream.
+ */
+ pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_conf);
+ if (parent)
+ pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
+ aspm_conf);
+}
+EXPORT_SYMBOL_GPL(mt76_pci_disable_aspm);
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
new file mode 100644
index 000000000..439ea4158
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc.
+ *
+ * This file is written based on mt76/usb.c.
+ *
+ * Author: Felix Fietkau <nbd@nbd.name>
+ * Lorenzo Bianconi <lorenzo@kernel.org>
+ * Sean Wang <sean.wang@mediatek.com>
+ */
+
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+
+#include "mt76.h"
+
+static int
+mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
+{
+ struct mt76_queue *q = &dev->q_rx[qid];
+
+ spin_lock_init(&q->lock);
+ q->entry = devm_kcalloc(dev->dev,
+ MT_NUM_RX_ENTRIES, sizeof(*q->entry),
+ GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ q->ndesc = MT_NUM_RX_ENTRIES;
+ q->head = q->tail = 0;
+ q->queued = 0;
+
+ return 0;
+}
+
+static int mt76s_alloc_tx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q;
+ int i;
+
+ for (i = 0; i < MT_TXQ_MCU_WA; i++) {
+ q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+
+ spin_lock_init(&q->lock);
+ q->hw_idx = i;
+ dev->q_tx[i] = q;
+
+ q->entry = devm_kcalloc(dev->dev,
+ MT_NUM_TX_ENTRIES, sizeof(*q->entry),
+ GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ q->ndesc = MT_NUM_TX_ENTRIES;
+ }
+
+ return 0;
+}
+
+void mt76s_stop_txrx(struct mt76_dev *dev)
+{
+ struct mt76_sdio *sdio = &dev->sdio;
+
+ cancel_work_sync(&sdio->tx.xmit_work);
+ cancel_work_sync(&sdio->tx.status_work);
+ cancel_work_sync(&sdio->rx.recv_work);
+ cancel_work_sync(&sdio->rx.net_work);
+ cancel_work_sync(&sdio->stat_work);
+ clear_bit(MT76_READING_STATS, &dev->phy.state);
+
+ mt76_tx_status_check(dev, NULL, true);
+}
+EXPORT_SYMBOL_GPL(mt76s_stop_txrx);
+
+int mt76s_alloc_queues(struct mt76_dev *dev)
+{
+ int err;
+
+ err = mt76s_alloc_rx_queue(dev, MT_RXQ_MAIN);
+ if (err < 0)
+ return err;
+
+ return mt76s_alloc_tx(dev);
+}
+EXPORT_SYMBOL_GPL(mt76s_alloc_queues);
+
+static struct mt76_queue_entry *
+mt76s_get_next_rx_entry(struct mt76_queue *q)
+{
+ struct mt76_queue_entry *e = NULL;
+
+ spin_lock_bh(&q->lock);
+ if (q->queued > 0) {
+ e = &q->entry[q->tail];
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
+ }
+ spin_unlock_bh(&q->lock);
+
+ return e;
+}
+
+static int
+mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ int qid = q - &dev->q_rx[MT_RXQ_MAIN];
+ int nframes = 0;
+
+ while (true) {
+ struct mt76_queue_entry *e;
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
+ break;
+
+ e = mt76s_get_next_rx_entry(q);
+ if (!e || !e->skb)
+ break;
+
+ dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb);
+ e->skb = NULL;
+ nframes++;
+ }
+ if (qid == MT_RXQ_MAIN)
+ mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
+
+ return nframes;
+}
+
+static void mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
+{
+ struct mt76_queue *q = dev->q_tx[qid];
+ struct mt76_queue_entry entry;
+ bool wake;
+
+ while (q->queued > 0) {
+ if (!q->entry[q->tail].done)
+ break;
+
+ entry = q->entry[q->tail];
+ q->entry[q->tail].done = false;
+
+ if (qid == MT_TXQ_MCU) {
+ dev_kfree_skb(entry.skb);
+ entry.skb = NULL;
+ }
+
+ mt76_queue_tx_complete(dev, q, &entry);
+ }
+
+ wake = q->stopped && q->queued < q->ndesc - 8;
+ if (wake)
+ q->stopped = false;
+
+ if (!q->queued)
+ wake_up(&dev->tx_wait);
+
+ if (qid == MT_TXQ_MCU)
+ return;
+
+ mt76_txq_schedule(&dev->phy, qid);
+
+ if (wake)
+ ieee80211_wake_queue(dev->hw, qid);
+}
+
+static void mt76s_tx_status_data(struct work_struct *work)
+{
+ struct mt76_sdio *sdio;
+ struct mt76_dev *dev;
+ u8 update = 1;
+ u16 count = 0;
+
+ sdio = container_of(work, struct mt76_sdio, stat_work);
+ dev = container_of(sdio, struct mt76_dev, sdio);
+
+ while (true) {
+ if (test_bit(MT76_REMOVED, &dev->phy.state))
+ break;
+
+ if (!dev->drv->tx_status_data(dev, &update))
+ break;
+ count++;
+ }
+
+ if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
+ queue_work(dev->wq, &sdio->stat_work);
+ else
+ clear_bit(MT76_READING_STATS, &dev->phy.state);
+}
+
+static int
+mt76s_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta)
+{
+ struct mt76_queue *q = dev->q_tx[qid];
+ struct mt76_tx_info tx_info = {
+ .skb = skb,
+ };
+ int err, len = skb->len;
+ u16 idx = q->head;
+
+ if (q->queued == q->ndesc)
+ return -ENOSPC;
+
+ skb->prev = skb->next = NULL;
+ err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
+ if (err < 0)
+ return err;
+
+ q->entry[q->head].skb = tx_info.skb;
+ q->entry[q->head].buf_sz = len;
+
+ smp_wmb();
+
+ q->head = (q->head + 1) % q->ndesc;
+ q->queued++;
+
+ return idx;
+}
+
+static int
+mt76s_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
+ struct sk_buff *skb, u32 tx_info)
+{
+ struct mt76_queue *q = dev->q_tx[qid];
+ int ret = -ENOSPC, len = skb->len, pad;
+
+ if (q->queued == q->ndesc)
+ goto error;
+
+ pad = round_up(skb->len, 4) - skb->len;
+ ret = mt76_skb_adjust_pad(skb, pad);
+ if (ret)
+ goto error;
+
+ spin_lock_bh(&q->lock);
+
+ q->entry[q->head].buf_sz = len;
+ q->entry[q->head].skb = skb;
+ q->head = (q->head + 1) % q->ndesc;
+ q->queued++;
+
+ spin_unlock_bh(&q->lock);
+
+ return 0;
+
+error:
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
+static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ struct mt76_sdio *sdio = &dev->sdio;
+
+ queue_work(sdio->txrx_wq, &sdio->tx.xmit_work);
+}
+
+static const struct mt76_queue_ops sdio_queue_ops = {
+ .tx_queue_skb = mt76s_tx_queue_skb,
+ .kick = mt76s_tx_kick,
+ .tx_queue_skb_raw = mt76s_tx_queue_skb_raw,
+};
+
+static void mt76s_tx_work(struct work_struct *work)
+{
+ struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
+ tx.status_work);
+ struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
+ int i;
+
+ for (i = 0; i < MT_TXQ_MCU_WA; i++)
+ mt76s_process_tx_queue(dev, i);
+
+ if (dev->drv->tx_status_data &&
+ !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
+ queue_work(dev->wq, &dev->sdio.stat_work);
+}
+
+static void mt76s_rx_work(struct work_struct *work)
+{
+ struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
+ rx.net_work);
+ struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
+ int i;
+
+ /* rx processing */
+ local_bh_disable();
+ rcu_read_lock();
+
+ mt76_for_each_q_rx(dev, i)
+ mt76s_process_rx_queue(dev, &dev->q_rx[i]);
+
+ rcu_read_unlock();
+ local_bh_enable();
+}
+
+void mt76s_deinit(struct mt76_dev *dev)
+{
+ struct mt76_sdio *sdio = &dev->sdio;
+ int i;
+
+ mt76s_stop_txrx(dev);
+ if (sdio->txrx_wq) {
+ destroy_workqueue(sdio->txrx_wq);
+ sdio->txrx_wq = NULL;
+ }
+
+ sdio_claim_host(sdio->func);
+ sdio_release_irq(sdio->func);
+ sdio_release_host(sdio->func);
+
+ mt76_for_each_q_rx(dev, i) {
+ struct mt76_queue *q = &dev->q_rx[i];
+ int j;
+
+ for (j = 0; j < q->ndesc; j++) {
+ struct mt76_queue_entry *e = &q->entry[j];
+
+ if (!e->skb)
+ continue;
+
+ dev_kfree_skb(e->skb);
+ e->skb = NULL;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(mt76s_deinit);
+
+int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
+ const struct mt76_bus_ops *bus_ops)
+{
+ struct mt76_sdio *sdio = &dev->sdio;
+
+ sdio->txrx_wq = alloc_workqueue("mt76s_txrx_wq",
+ WQ_UNBOUND | WQ_HIGHPRI,
+ WQ_UNBOUND_MAX_ACTIVE);
+ if (!sdio->txrx_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&sdio->stat_work, mt76s_tx_status_data);
+ INIT_WORK(&sdio->tx.status_work, mt76s_tx_work);
+ INIT_WORK(&sdio->rx.net_work, mt76s_rx_work);
+
+ mutex_init(&sdio->sched.lock);
+ dev->queue_ops = &sdio_queue_ops;
+ dev->bus = bus_ops;
+ dev->sdio.func = func;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76s_init);
+
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c
new file mode 100644
index 000000000..7ab99efb7
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/testmode.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+#include "mt76.h"
+
+static const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = {
+ [MT76_TM_ATTR_RESET] = { .type = NLA_FLAG },
+ [MT76_TM_ATTR_STATE] = { .type = NLA_U8 },
+ [MT76_TM_ATTR_TX_COUNT] = { .type = NLA_U32 },
+ [MT76_TM_ATTR_TX_LENGTH] = { .type = NLA_U32 },
+ [MT76_TM_ATTR_TX_RATE_MODE] = { .type = NLA_U8 },
+ [MT76_TM_ATTR_TX_RATE_NSS] = { .type = NLA_U8 },
+ [MT76_TM_ATTR_TX_RATE_IDX] = { .type = NLA_U8 },
+ [MT76_TM_ATTR_TX_RATE_SGI] = { .type = NLA_U8 },
+ [MT76_TM_ATTR_TX_RATE_LDPC] = { .type = NLA_U8 },
+ [MT76_TM_ATTR_TX_ANTENNA] = { .type = NLA_U8 },
+ [MT76_TM_ATTR_TX_POWER_CONTROL] = { .type = NLA_U8 },
+ [MT76_TM_ATTR_TX_POWER] = { .type = NLA_NESTED },
+ [MT76_TM_ATTR_FREQ_OFFSET] = { .type = NLA_U32 },
+};
+
+void mt76_testmode_tx_pending(struct mt76_dev *dev)
+{
+ struct mt76_testmode_data *td = &dev->test;
+ struct mt76_wcid *wcid = &dev->global_wcid;
+ struct sk_buff *skb = td->tx_skb;
+ struct mt76_queue *q;
+ int qid;
+
+ if (!skb || !td->tx_pending)
+ return;
+
+ qid = skb_get_queue_mapping(skb);
+ q = dev->q_tx[qid];
+
+ spin_lock_bh(&q->lock);
+
+ while (td->tx_pending > 0 && td->tx_queued - td->tx_done < 1000 &&
+ q->queued < q->ndesc / 2) {
+ int ret;
+
+ ret = dev->queue_ops->tx_queue_skb(dev, qid, skb_get(skb), wcid, NULL);
+ if (ret < 0)
+ break;
+
+ td->tx_pending--;
+ td->tx_queued++;
+ }
+
+ dev->queue_ops->kick(dev, q);
+
+ spin_unlock_bh(&q->lock);
+}
+
+
+static int
+mt76_testmode_tx_init(struct mt76_dev *dev)
+{
+ struct mt76_testmode_data *td = &dev->test;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *skb;
+ u16 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
+ IEEE80211_FCTL_FROMDS;
+ struct ieee80211_tx_rate *rate;
+ u8 max_nss = hweight8(dev->phy.antenna_mask);
+
+ if (td->tx_antenna_mask)
+ max_nss = min_t(u8, max_nss, hweight8(td->tx_antenna_mask));
+
+ skb = alloc_skb(td->tx_msdu_len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ dev_kfree_skb(td->tx_skb);
+ td->tx_skb = skb;
+ hdr = __skb_put_zero(skb, td->tx_msdu_len);
+ hdr->frame_control = cpu_to_le16(fc);
+ memcpy(hdr->addr1, dev->macaddr, sizeof(dev->macaddr));
+ memcpy(hdr->addr2, dev->macaddr, sizeof(dev->macaddr));
+ memcpy(hdr->addr3, dev->macaddr, sizeof(dev->macaddr));
+
+ info = IEEE80211_SKB_CB(skb);
+ info->flags = IEEE80211_TX_CTL_INJECTED |
+ IEEE80211_TX_CTL_NO_ACK |
+ IEEE80211_TX_CTL_NO_PS_BUFFER;
+ rate = &info->control.rates[0];
+ rate->count = 1;
+ rate->idx = td->tx_rate_idx;
+
+ switch (td->tx_rate_mode) {
+ case MT76_TM_TX_MODE_CCK:
+ if (dev->phy.chandef.chan->band != NL80211_BAND_2GHZ)
+ return -EINVAL;
+
+ if (rate->idx > 4)
+ return -EINVAL;
+ break;
+ case MT76_TM_TX_MODE_OFDM:
+ if (dev->phy.chandef.chan->band != NL80211_BAND_2GHZ)
+ break;
+
+ if (rate->idx > 8)
+ return -EINVAL;
+
+ rate->idx += 4;
+ break;
+ case MT76_TM_TX_MODE_HT:
+ if (rate->idx > 8 * max_nss &&
+ !(rate->idx == 32 &&
+ dev->phy.chandef.width >= NL80211_CHAN_WIDTH_40))
+ return -EINVAL;
+
+ rate->flags |= IEEE80211_TX_RC_MCS;
+ break;
+ case MT76_TM_TX_MODE_VHT:
+ if (rate->idx > 9)
+ return -EINVAL;
+
+ if (td->tx_rate_nss > max_nss)
+ return -EINVAL;
+
+ ieee80211_rate_set_vht(rate, td->tx_rate_idx, td->tx_rate_nss);
+ rate->flags |= IEEE80211_TX_RC_VHT_MCS;
+ break;
+ default:
+ break;
+ }
+
+ if (td->tx_rate_sgi)
+ rate->flags |= IEEE80211_TX_RC_SHORT_GI;
+
+ if (td->tx_rate_ldpc)
+ info->flags |= IEEE80211_TX_CTL_LDPC;
+
+ if (td->tx_rate_mode >= MT76_TM_TX_MODE_HT) {
+ switch (dev->phy.chandef.width) {
+ case NL80211_CHAN_WIDTH_40:
+ rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ rate->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH;
+ break;
+ default:
+ break;
+ }
+ }
+
+ skb_set_queue_mapping(skb, IEEE80211_AC_BE);
+
+ return 0;
+}
+
+static void
+mt76_testmode_tx_start(struct mt76_dev *dev)
+{
+ struct mt76_testmode_data *td = &dev->test;
+
+ td->tx_queued = 0;
+ td->tx_done = 0;
+ td->tx_pending = td->tx_count;
+ mt76_worker_schedule(&dev->tx_worker);
+}
+
+static void
+mt76_testmode_tx_stop(struct mt76_dev *dev)
+{
+ struct mt76_testmode_data *td = &dev->test;
+
+ mt76_worker_disable(&dev->tx_worker);
+
+ td->tx_pending = 0;
+
+ mt76_worker_enable(&dev->tx_worker);
+
+ wait_event_timeout(dev->tx_wait, td->tx_done == td->tx_queued, 10 * HZ);
+
+ dev_kfree_skb(td->tx_skb);
+ td->tx_skb = NULL;
+}
+
+static inline void
+mt76_testmode_param_set(struct mt76_testmode_data *td, u16 idx)
+{
+ td->param_set[idx / 32] |= BIT(idx % 32);
+}
+
+static inline bool
+mt76_testmode_param_present(struct mt76_testmode_data *td, u16 idx)
+{
+ return td->param_set[idx / 32] & BIT(idx % 32);
+}
+
+static void
+mt76_testmode_init_defaults(struct mt76_dev *dev)
+{
+ struct mt76_testmode_data *td = &dev->test;
+
+ if (td->tx_msdu_len > 0)
+ return;
+
+ td->tx_msdu_len = 1024;
+ td->tx_count = 1;
+ td->tx_rate_mode = MT76_TM_TX_MODE_OFDM;
+ td->tx_rate_nss = 1;
+}
+
+static int
+__mt76_testmode_set_state(struct mt76_dev *dev, enum mt76_testmode_state state)
+{
+ enum mt76_testmode_state prev_state = dev->test.state;
+ int err;
+
+ if (prev_state == MT76_TM_STATE_TX_FRAMES)
+ mt76_testmode_tx_stop(dev);
+
+ if (state == MT76_TM_STATE_TX_FRAMES) {
+ err = mt76_testmode_tx_init(dev);
+ if (err)
+ return err;
+ }
+
+ err = dev->test_ops->set_state(dev, state);
+ if (err) {
+ if (state == MT76_TM_STATE_TX_FRAMES)
+ mt76_testmode_tx_stop(dev);
+
+ return err;
+ }
+
+ if (state == MT76_TM_STATE_TX_FRAMES)
+ mt76_testmode_tx_start(dev);
+ else if (state == MT76_TM_STATE_RX_FRAMES) {
+ memset(&dev->test.rx_stats, 0, sizeof(dev->test.rx_stats));
+ }
+
+ dev->test.state = state;
+
+ return 0;
+}
+
+int mt76_testmode_set_state(struct mt76_dev *dev, enum mt76_testmode_state state)
+{
+ struct mt76_testmode_data *td = &dev->test;
+ struct ieee80211_hw *hw = dev->phy.hw;
+
+ if (state == td->state && state == MT76_TM_STATE_OFF)
+ return 0;
+
+ if (state > MT76_TM_STATE_OFF &&
+ (!test_bit(MT76_STATE_RUNNING, &dev->phy.state) ||
+ !(hw->conf.flags & IEEE80211_CONF_MONITOR)))
+ return -ENOTCONN;
+
+ if (state != MT76_TM_STATE_IDLE &&
+ td->state != MT76_TM_STATE_IDLE) {
+ int ret;
+
+ ret = __mt76_testmode_set_state(dev, MT76_TM_STATE_IDLE);
+ if (ret)
+ return ret;
+ }
+
+ return __mt76_testmode_set_state(dev, state);
+
+}
+EXPORT_SYMBOL(mt76_testmode_set_state);
+
+static int
+mt76_tm_get_u8(struct nlattr *attr, u8 *dest, u8 min, u8 max)
+{
+ u8 val;
+
+ if (!attr)
+ return 0;
+
+ val = nla_get_u8(attr);
+ if (val < min || val > max)
+ return -EINVAL;
+
+ *dest = val;
+ return 0;
+}
+
+int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len)
+{
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
+ struct mt76_testmode_data *td = &dev->test;
+ struct nlattr *tb[NUM_MT76_TM_ATTRS];
+ u32 state;
+ int err;
+ int i;
+
+ if (!dev->test_ops)
+ return -EOPNOTSUPP;
+
+ err = nla_parse_deprecated(tb, MT76_TM_ATTR_MAX, data, len,
+ mt76_tm_policy, NULL);
+ if (err)
+ return err;
+
+ err = -EINVAL;
+
+ mutex_lock(&dev->mutex);
+
+ if (tb[MT76_TM_ATTR_RESET]) {
+ mt76_testmode_set_state(dev, MT76_TM_STATE_OFF);
+ memset(td, 0, sizeof(*td));
+ }
+
+ mt76_testmode_init_defaults(dev);
+
+ if (tb[MT76_TM_ATTR_TX_COUNT])
+ td->tx_count = nla_get_u32(tb[MT76_TM_ATTR_TX_COUNT]);
+
+ if (tb[MT76_TM_ATTR_TX_LENGTH]) {
+ u32 val = nla_get_u32(tb[MT76_TM_ATTR_TX_LENGTH]);
+
+ if (val > IEEE80211_MAX_FRAME_LEN ||
+ val < sizeof(struct ieee80211_hdr))
+ goto out;
+
+ td->tx_msdu_len = val;
+ }
+
+ if (tb[MT76_TM_ATTR_TX_RATE_IDX])
+ td->tx_rate_idx = nla_get_u8(tb[MT76_TM_ATTR_TX_RATE_IDX]);
+
+ if (mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_MODE], &td->tx_rate_mode,
+ 0, MT76_TM_TX_MODE_MAX) ||
+ mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_NSS], &td->tx_rate_nss,
+ 1, hweight8(phy->antenna_mask)) ||
+ mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_SGI], &td->tx_rate_sgi, 0, 1) ||
+ mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_LDPC], &td->tx_rate_ldpc, 0, 1) ||
+ mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_ANTENNA], &td->tx_antenna_mask, 1,
+ phy->antenna_mask) ||
+ mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_POWER_CONTROL],
+ &td->tx_power_control, 0, 1))
+ goto out;
+
+ if (tb[MT76_TM_ATTR_FREQ_OFFSET])
+ td->freq_offset = nla_get_u32(tb[MT76_TM_ATTR_FREQ_OFFSET]);
+
+ if (tb[MT76_TM_ATTR_STATE]) {
+ state = nla_get_u32(tb[MT76_TM_ATTR_STATE]);
+ if (state > MT76_TM_STATE_MAX)
+ goto out;
+ } else {
+ state = td->state;
+ }
+
+ if (tb[MT76_TM_ATTR_TX_POWER]) {
+ struct nlattr *cur;
+ int idx = 0;
+ int rem;
+
+ nla_for_each_nested(cur, tb[MT76_TM_ATTR_TX_POWER], rem) {
+ if (nla_len(cur) != 1 ||
+ idx >= ARRAY_SIZE(td->tx_power))
+ goto out;
+
+ td->tx_power[idx++] = nla_get_u8(cur);
+ }
+ }
+
+ if (dev->test_ops->set_params) {
+ err = dev->test_ops->set_params(dev, tb, state);
+ if (err)
+ goto out;
+ }
+
+ for (i = MT76_TM_ATTR_STATE; i < ARRAY_SIZE(tb); i++)
+ if (tb[i])
+ mt76_testmode_param_set(td, i);
+
+ err = 0;
+ if (tb[MT76_TM_ATTR_STATE])
+ err = mt76_testmode_set_state(dev, state);
+
+out:
+ mutex_unlock(&dev->mutex);
+
+ return err;
+}
+EXPORT_SYMBOL(mt76_testmode_cmd);
+
+static int
+mt76_testmode_dump_stats(struct mt76_dev *dev, struct sk_buff *msg)
+{
+ struct mt76_testmode_data *td = &dev->test;
+ u64 rx_packets = 0;
+ u64 rx_fcs_error = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(td->rx_stats.packets); i++) {
+ rx_packets += td->rx_stats.packets[i];
+ rx_fcs_error += td->rx_stats.fcs_error[i];
+ }
+
+ if (nla_put_u32(msg, MT76_TM_STATS_ATTR_TX_PENDING, td->tx_pending) ||
+ nla_put_u32(msg, MT76_TM_STATS_ATTR_TX_QUEUED, td->tx_queued) ||
+ nla_put_u32(msg, MT76_TM_STATS_ATTR_TX_DONE, td->tx_done) ||
+ nla_put_u64_64bit(msg, MT76_TM_STATS_ATTR_RX_PACKETS, rx_packets,
+ MT76_TM_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(msg, MT76_TM_STATS_ATTR_RX_FCS_ERROR, rx_fcs_error,
+ MT76_TM_STATS_ATTR_PAD))
+ return -EMSGSIZE;
+
+ if (dev->test_ops->dump_stats)
+ return dev->test_ops->dump_stats(dev, msg);
+
+ return 0;
+}
+
+int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
+ struct netlink_callback *cb, void *data, int len)
+{
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
+ struct mt76_testmode_data *td = &dev->test;
+ struct nlattr *tb[NUM_MT76_TM_ATTRS] = {};
+ int err = 0;
+ void *a;
+ int i;
+
+ if (!dev->test_ops)
+ return -EOPNOTSUPP;
+
+ if (cb->args[2]++ > 0)
+ return -ENOENT;
+
+ if (data) {
+ err = nla_parse_deprecated(tb, MT76_TM_ATTR_MAX, data, len,
+ mt76_tm_policy, NULL);
+ if (err)
+ return err;
+ }
+
+ mutex_lock(&dev->mutex);
+
+ if (tb[MT76_TM_ATTR_STATS]) {
+ err = -EINVAL;
+
+ a = nla_nest_start(msg, MT76_TM_ATTR_STATS);
+ if (a) {
+ err = mt76_testmode_dump_stats(dev, msg);
+ nla_nest_end(msg, a);
+ }
+
+ goto out;
+ }
+
+ mt76_testmode_init_defaults(dev);
+
+ err = -EMSGSIZE;
+ if (nla_put_u32(msg, MT76_TM_ATTR_STATE, td->state))
+ goto out;
+
+ if (td->mtd_name &&
+ (nla_put_string(msg, MT76_TM_ATTR_MTD_PART, td->mtd_name) ||
+ nla_put_u32(msg, MT76_TM_ATTR_MTD_OFFSET, td->mtd_offset)))
+ goto out;
+
+ if (nla_put_u32(msg, MT76_TM_ATTR_TX_COUNT, td->tx_count) ||
+ nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, td->tx_msdu_len) ||
+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_MODE, td->tx_rate_mode) ||
+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_NSS, td->tx_rate_nss) ||
+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, td->tx_rate_idx) ||
+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_SGI, td->tx_rate_sgi) ||
+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_LDPC, td->tx_rate_ldpc) ||
+ (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_ANTENNA) &&
+ nla_put_u8(msg, MT76_TM_ATTR_TX_ANTENNA, td->tx_antenna_mask)) ||
+ (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_POWER_CONTROL) &&
+ nla_put_u8(msg, MT76_TM_ATTR_TX_POWER_CONTROL, td->tx_power_control)) ||
+ (mt76_testmode_param_present(td, MT76_TM_ATTR_FREQ_OFFSET) &&
+ nla_put_u8(msg, MT76_TM_ATTR_FREQ_OFFSET, td->freq_offset)))
+ goto out;
+
+ if (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_POWER)) {
+ a = nla_nest_start(msg, MT76_TM_ATTR_TX_POWER);
+ if (!a)
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(td->tx_power); i++)
+ if (nla_put_u8(msg, i, td->tx_power[i]))
+ goto out;
+
+ nla_nest_end(msg, a);
+ }
+
+ err = 0;
+
+out:
+ mutex_unlock(&dev->mutex);
+
+ return err;
+}
+EXPORT_SYMBOL(mt76_testmode_dump);
diff --git a/drivers/net/wireless/mediatek/mt76/testmode.h b/drivers/net/wireless/mediatek/mt76/testmode.h
new file mode 100644
index 000000000..691fe5773
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/testmode.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2020 Felix Fietkau <nbd@nbd.name>
+ */
+#ifndef __MT76_TESTMODE_H
+#define __MT76_TESTMODE_H
+
+/**
+ * enum mt76_testmode_attr - testmode attributes inside NL80211_ATTR_TESTDATA
+ *
+ * @MT76_TM_ATTR_UNSPEC: (invalid attribute)
+ *
+ * @MT76_TM_ATTR_RESET: reset parameters to default (flag)
+ * @MT76_TM_ATTR_STATE: test state (u32), see &enum mt76_testmode_state
+ *
+ * @MT76_TM_ATTR_MTD_PART: mtd partition used for eeprom data (string)
+ * @MT76_TM_ATTR_MTD_OFFSET: offset of eeprom data within the partition (u32)
+ *
+ * @MT76_TM_ATTR_TX_COUNT: configured number of frames to send when setting
+ * state to MT76_TM_STATE_TX_FRAMES (u32)
+ * @MT76_TM_ATTR_TX_PENDING: pending frames during MT76_TM_STATE_TX_FRAMES (u32)
+ * @MT76_TM_ATTR_TX_LENGTH: packet tx msdu length (u32)
+ * @MT76_TM_ATTR_TX_RATE_MODE: packet tx mode (u8, see &enum mt76_testmode_tx_mode)
+ * @MT76_TM_ATTR_TX_RATE_NSS: packet tx number of spatial streams (u8)
+ * @MT76_TM_ATTR_TX_RATE_IDX: packet tx rate/MCS index (u8)
+ * @MT76_TM_ATTR_TX_RATE_SGI: packet tx use short guard interval (u8)
+ * @MT76_TM_ATTR_TX_RATE_LDPC: packet tx enable LDPC (u8)
+ *
+ * @MT76_TM_ATTR_TX_ANTENNA: tx antenna mask (u8)
+ * @MT76_TM_ATTR_TX_POWER_CONTROL: enable tx power control (u8)
+ * @MT76_TM_ATTR_TX_POWER: per-antenna tx power array (nested, u8 attrs)
+ *
+ * @MT76_TM_ATTR_FREQ_OFFSET: RF frequency offset (u32)
+ *
+ * @MT76_TM_ATTR_STATS: statistics (nested, see &enum mt76_testmode_stats_attr)
+ */
+enum mt76_testmode_attr {
+ MT76_TM_ATTR_UNSPEC,
+
+ MT76_TM_ATTR_RESET,
+ MT76_TM_ATTR_STATE,
+
+ MT76_TM_ATTR_MTD_PART,
+ MT76_TM_ATTR_MTD_OFFSET,
+
+ MT76_TM_ATTR_TX_COUNT,
+ MT76_TM_ATTR_TX_LENGTH,
+ MT76_TM_ATTR_TX_RATE_MODE,
+ MT76_TM_ATTR_TX_RATE_NSS,
+ MT76_TM_ATTR_TX_RATE_IDX,
+ MT76_TM_ATTR_TX_RATE_SGI,
+ MT76_TM_ATTR_TX_RATE_LDPC,
+
+ MT76_TM_ATTR_TX_ANTENNA,
+ MT76_TM_ATTR_TX_POWER_CONTROL,
+ MT76_TM_ATTR_TX_POWER,
+
+ MT76_TM_ATTR_FREQ_OFFSET,
+
+ MT76_TM_ATTR_STATS,
+
+ /* keep last */
+ NUM_MT76_TM_ATTRS,
+ MT76_TM_ATTR_MAX = NUM_MT76_TM_ATTRS - 1,
+};
+
+/**
+ * enum mt76_testmode_state - statistics attributes
+ *
+ * @MT76_TM_STATS_ATTR_TX_PENDING: pending tx frames (u32)
+ * @MT76_TM_STATS_ATTR_TX_QUEUED: queued tx frames (u32)
+ * @MT76_TM_STATS_ATTR_TX_QUEUED: completed tx frames (u32)
+ *
+ * @MT76_TM_STATS_ATTR_RX_PACKETS: number of rx packets (u64)
+ * @MT76_TM_STATS_ATTR_RX_FCS_ERROR: number of rx packets with FCS error (u64)
+ * @MT76_TM_STATS_ATTR_LAST_RX: information about the last received packet
+ * see &enum mt76_testmode_rx_attr
+ */
+enum mt76_testmode_stats_attr {
+ MT76_TM_STATS_ATTR_UNSPEC,
+ MT76_TM_STATS_ATTR_PAD,
+
+ MT76_TM_STATS_ATTR_TX_PENDING,
+ MT76_TM_STATS_ATTR_TX_QUEUED,
+ MT76_TM_STATS_ATTR_TX_DONE,
+
+ MT76_TM_STATS_ATTR_RX_PACKETS,
+ MT76_TM_STATS_ATTR_RX_FCS_ERROR,
+ MT76_TM_STATS_ATTR_LAST_RX,
+
+ /* keep last */
+ NUM_MT76_TM_STATS_ATTRS,
+ MT76_TM_STATS_ATTR_MAX = NUM_MT76_TM_STATS_ATTRS - 1,
+};
+
+
+/**
+ * enum mt76_testmode_rx_attr - packet rx information
+ *
+ * @MT76_TM_RX_ATTR_FREQ_OFFSET: frequency offset (s32)
+ * @MT76_TM_RX_ATTR_RCPI: received channel power indicator (array, u8)
+ * @MT76_TM_RX_ATTR_IB_RSSI: internal inband RSSI (s8)
+ * @MT76_TM_RX_ATTR_WB_RSSI: internal wideband RSSI (s8)
+ */
+enum mt76_testmode_rx_attr {
+ MT76_TM_RX_ATTR_UNSPEC,
+
+ MT76_TM_RX_ATTR_FREQ_OFFSET,
+ MT76_TM_RX_ATTR_RCPI,
+ MT76_TM_RX_ATTR_IB_RSSI,
+ MT76_TM_RX_ATTR_WB_RSSI,
+
+ /* keep last */
+ NUM_MT76_TM_RX_ATTRS,
+ MT76_TM_RX_ATTR_MAX = NUM_MT76_TM_RX_ATTRS - 1,
+};
+
+/**
+ * enum mt76_testmode_state - phy test state
+ *
+ * @MT76_TM_STATE_OFF: test mode disabled (normal operation)
+ * @MT76_TM_STATE_IDLE: test mode enabled, but idle
+ * @MT76_TM_STATE_TX_FRAMES: send a fixed number of test frames
+ * @MT76_TM_STATE_RX_FRAMES: receive packets and keep statistics
+ */
+enum mt76_testmode_state {
+ MT76_TM_STATE_OFF,
+ MT76_TM_STATE_IDLE,
+ MT76_TM_STATE_TX_FRAMES,
+ MT76_TM_STATE_RX_FRAMES,
+
+ /* keep last */
+ NUM_MT76_TM_STATES,
+ MT76_TM_STATE_MAX = NUM_MT76_TM_STATES - 1,
+};
+
+/**
+ * enum mt76_testmode_tx_mode - packet tx phy mode
+ *
+ * @MT76_TM_TX_MODE_CCK: legacy CCK mode
+ * @MT76_TM_TX_MODE_OFDM: legacy OFDM mode
+ * @MT76_TM_TX_MODE_HT: 802.11n MCS
+ * @MT76_TM_TX_MODE_VHT: 802.11ac MCS
+ */
+enum mt76_testmode_tx_mode {
+ MT76_TM_TX_MODE_CCK,
+ MT76_TM_TX_MODE_OFDM,
+ MT76_TM_TX_MODE_HT,
+ MT76_TM_TX_MODE_VHT,
+
+ /* keep last */
+ NUM_MT76_TM_TX_MODES,
+ MT76_TM_TX_MODE_MAX = NUM_MT76_TM_TX_MODES - 1,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/trace.c b/drivers/net/wireless/mediatek/mt76/trace.c
new file mode 100644
index 000000000..f199fcd2a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/trace.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(mac_txdone);
+EXPORT_TRACEPOINT_SYMBOL_GPL(dev_irq);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/trace.h b/drivers/net/wireless/mediatek/mt76/trace.h
new file mode 100644
index 000000000..c3d0ef8e2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/trace.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#if !defined(__MT76_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt76.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt76
+
+#define MAXNAME 32
+#define DEV_ENTRY __array(char, wiphy_name, 32)
+#define DEVICE_ASSIGN strlcpy(__entry->wiphy_name, \
+ wiphy_name(dev->hw->wiphy), MAXNAME)
+#define DEV_PR_FMT "%s"
+#define DEV_PR_ARG __entry->wiphy_name
+
+#define REG_ENTRY __field(u32, reg) __field(u32, val)
+#define REG_ASSIGN __entry->reg = reg; __entry->val = val
+#define REG_PR_FMT " %04x=%08x"
+#define REG_PR_ARG __entry->reg, __entry->val
+
+#define TXID_ENTRY __field(u8, wcid) __field(u8, pktid)
+#define TXID_ASSIGN __entry->wcid = wcid; __entry->pktid = pktid
+#define TXID_PR_FMT " [%d:%d]"
+#define TXID_PR_ARG __entry->wcid, __entry->pktid
+
+DECLARE_EVENT_CLASS(dev_reg_evt,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ REG_ENTRY
+ ),
+ TP_fast_assign(
+ DEVICE_ASSIGN;
+ REG_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT REG_PR_FMT,
+ DEV_PR_ARG, REG_PR_ARG
+ )
+);
+
+DEFINE_EVENT(dev_reg_evt, reg_rr,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_reg_evt, reg_wr,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+TRACE_EVENT(dev_irq,
+ TP_PROTO(struct mt76_dev *dev, u32 val, u32 mask),
+
+ TP_ARGS(dev, val, mask),
+
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, val)
+ __field(u32, mask)
+ ),
+
+ TP_fast_assign(
+ DEVICE_ASSIGN;
+ __entry->val = val;
+ __entry->mask = mask;
+ ),
+
+ TP_printk(
+ DEV_PR_FMT " %08x & %08x",
+ DEV_PR_ARG, __entry->val, __entry->mask
+ )
+);
+
+DECLARE_EVENT_CLASS(dev_txid_evt,
+ TP_PROTO(struct mt76_dev *dev, u8 wcid, u8 pktid),
+ TP_ARGS(dev, wcid, pktid),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ TXID_ENTRY
+ ),
+ TP_fast_assign(
+ DEVICE_ASSIGN;
+ TXID_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT TXID_PR_FMT,
+ DEV_PR_ARG, TXID_PR_ARG
+ )
+);
+
+DEFINE_EVENT(dev_txid_evt, mac_txdone,
+ TP_PROTO(struct mt76_dev *dev, u8 wcid, u8 pktid),
+ TP_ARGS(dev, wcid, pktid)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
new file mode 100644
index 000000000..073c29eb2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -0,0 +1,628 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include "mt76.h"
+
+static int
+mt76_txq_get_qid(struct ieee80211_txq *txq)
+{
+ if (!txq->sta)
+ return MT_TXQ_BE;
+
+ return txq->ac;
+}
+
+void
+mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_txq *txq;
+ struct mt76_txq *mtxq;
+ u8 tid;
+
+ if (!sta || !ieee80211_is_data_qos(hdr->frame_control) ||
+ !ieee80211_is_data_present(hdr->frame_control))
+ return;
+
+ tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
+ txq = sta->txq[tid];
+ mtxq = (struct mt76_txq *)txq->drv_priv;
+ if (!mtxq->aggr)
+ return;
+
+ mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
+}
+EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn);
+
+void
+mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
+ __acquires(&dev->status_list.lock)
+{
+ __skb_queue_head_init(list);
+ spin_lock_bh(&dev->status_list.lock);
+}
+EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
+
+void
+mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
+ __releases(&dev->status_list.lock)
+{
+ struct ieee80211_hw *hw;
+ struct sk_buff *skb;
+
+ spin_unlock_bh(&dev->status_list.lock);
+
+ while ((skb = __skb_dequeue(list)) != NULL) {
+ hw = mt76_tx_status_get_hw(dev, skb);
+ ieee80211_tx_status(hw, skb);
+ }
+
+}
+EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
+
+static void
+__mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
+ struct sk_buff_head *list)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
+ u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE;
+
+ flags |= cb->flags;
+ cb->flags = flags;
+
+ if ((flags & done) != done)
+ return;
+
+ __skb_unlink(skb, &dev->status_list);
+
+ /* Tx status can be unreliable. if it fails, mark the frame as ACKed */
+ if (flags & MT_TX_CB_TXS_FAILED) {
+ ieee80211_tx_info_clear_status(info);
+ info->status.rates[0].idx = -1;
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ }
+
+ __skb_queue_tail(list, skb);
+}
+
+void
+mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
+ struct sk_buff_head *list)
+{
+ __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list);
+}
+EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done);
+
+int
+mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
+ int pid;
+
+ if (!wcid)
+ return MT_PACKET_ID_NO_ACK;
+
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ return MT_PACKET_ID_NO_ACK;
+
+ if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
+ IEEE80211_TX_CTL_RATE_CTRL_PROBE)))
+ return MT_PACKET_ID_NO_SKB;
+
+ spin_lock_bh(&dev->status_list.lock);
+
+ memset(cb, 0, sizeof(*cb));
+ wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK;
+ if (wcid->packet_id == MT_PACKET_ID_NO_ACK ||
+ wcid->packet_id == MT_PACKET_ID_NO_SKB)
+ wcid->packet_id = MT_PACKET_ID_FIRST;
+
+ pid = wcid->packet_id;
+ cb->wcid = wcid->idx;
+ cb->pktid = pid;
+ cb->jiffies = jiffies;
+
+ __skb_queue_tail(&dev->status_list, skb);
+ spin_unlock_bh(&dev->status_list.lock);
+
+ return pid;
+}
+EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add);
+
+struct sk_buff *
+mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
+ struct sk_buff_head *list)
+{
+ struct sk_buff *skb, *tmp;
+
+ skb_queue_walk_safe(&dev->status_list, skb, tmp) {
+ struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
+
+ if (wcid && cb->wcid != wcid->idx)
+ continue;
+
+ if (cb->pktid == pktid)
+ return skb;
+
+ if (pktid >= 0 && !time_after(jiffies, cb->jiffies +
+ MT_TX_STATUS_SKB_TIMEOUT))
+ continue;
+
+ __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
+ MT_TX_CB_TXS_DONE, list);
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get);
+
+void
+mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush)
+{
+ struct sk_buff_head list;
+
+ mt76_tx_status_lock(dev, &list);
+ mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
+ mt76_tx_status_unlock(dev, &list);
+}
+EXPORT_SYMBOL_GPL(mt76_tx_status_check);
+
+static void
+mt76_tx_check_non_aql(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76_wcid *wcid;
+ int pending;
+
+ if (info->tx_time_est)
+ return;
+
+ if (wcid_idx >= ARRAY_SIZE(dev->wcid))
+ return;
+
+ rcu_read_lock();
+
+ wcid = rcu_dereference(dev->wcid[wcid_idx]);
+ if (wcid) {
+ pending = atomic_dec_return(&wcid->non_aql_packets);
+ if (pending < 0)
+ atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
+ }
+
+ rcu_read_unlock();
+}
+
+void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb)
+{
+ struct ieee80211_hw *hw;
+ struct sk_buff_head list;
+
+#ifdef CONFIG_NL80211_TESTMODE
+ if (skb == dev->test.tx_skb) {
+ dev->test.tx_done++;
+ if (dev->test.tx_queued == dev->test.tx_done)
+ wake_up(&dev->tx_wait);
+ }
+#endif
+
+ mt76_tx_check_non_aql(dev, wcid_idx, skb);
+
+ if (!skb->prev) {
+ hw = mt76_tx_status_get_hw(dev, skb);
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+
+ mt76_tx_status_lock(dev, &list);
+ __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
+ mt76_tx_status_unlock(dev, &list);
+}
+EXPORT_SYMBOL_GPL(mt76_tx_complete_skb);
+
+static int
+__mt76_tx_queue_skb(struct mt76_dev *dev, int qid, struct sk_buff *skb,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+ bool *stop)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76_queue *q;
+ bool non_aql;
+ int pending;
+ int idx;
+
+ non_aql = !info->tx_time_est;
+ idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, sta);
+ if (idx < 0 || !sta || !non_aql)
+ return idx;
+
+ wcid = (struct mt76_wcid *)sta->drv_priv;
+ q = dev->q_tx[qid];
+ q->entry[idx].wcid = wcid->idx;
+ pending = atomic_inc_return(&wcid->non_aql_packets);
+ if (stop && pending >= MT_MAX_NON_AQL_PKT)
+ *stop = true;
+
+ return idx;
+}
+
+void
+mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
+ struct mt76_wcid *wcid, struct sk_buff *skb)
+{
+ struct mt76_dev *dev = phy->dev;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct mt76_queue *q;
+ int qid = skb_get_queue_mapping(skb);
+ bool ext_phy = phy != &dev->phy;
+
+ if (mt76_testmode_enabled(dev)) {
+ ieee80211_free_txskb(phy->hw, skb);
+ return;
+ }
+
+ if (WARN_ON(qid >= MT_TXQ_PSD)) {
+ qid = MT_TXQ_BE;
+ skb_set_queue_mapping(skb, qid);
+ }
+
+ if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) &&
+ !ieee80211_is_data(hdr->frame_control) &&
+ !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
+ qid = MT_TXQ_PSD;
+ skb_set_queue_mapping(skb, qid);
+ }
+
+ if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET))
+ ieee80211_get_tx_rates(info->control.vif, sta, skb,
+ info->control.rates, 1);
+
+ if (ext_phy)
+ info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
+
+ q = dev->q_tx[qid];
+
+ spin_lock_bh(&q->lock);
+ __mt76_tx_queue_skb(dev, qid, skb, wcid, sta, NULL);
+ dev->queue_ops->kick(dev, q);
+
+ if (q->queued > q->ndesc - 8 && !q->stopped) {
+ ieee80211_stop_queue(phy->hw, skb_get_queue_mapping(skb));
+ q->stopped = true;
+ }
+
+ spin_unlock_bh(&q->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_tx);
+
+static struct sk_buff *
+mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq)
+{
+ struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
+ struct ieee80211_tx_info *info;
+ bool ext_phy = phy != &phy->dev->phy;
+ struct sk_buff *skb;
+
+ skb = ieee80211_tx_dequeue(phy->hw, txq);
+ if (!skb)
+ return NULL;
+
+ info = IEEE80211_SKB_CB(skb);
+ if (ext_phy)
+ info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
+
+ return skb;
+}
+
+static void
+mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
+ struct sk_buff *skb, bool last)
+{
+ struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
+ if (last)
+ info->flags |= IEEE80211_TX_STATUS_EOSP |
+ IEEE80211_TX_CTL_REQ_TX_STATUS;
+
+ mt76_skb_set_moredata(skb, !last);
+ __mt76_tx_queue_skb(dev, MT_TXQ_PSD, skb, wcid, sta, NULL);
+}
+
+void
+mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ u16 tids, int nframes,
+ enum ieee80211_frame_release_type reason,
+ bool more_data)
+{
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
+ struct sk_buff *last_skb = NULL;
+ struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD];
+ int i;
+
+ spin_lock_bh(&hwq->lock);
+ for (i = 0; tids && nframes; i++, tids >>= 1) {
+ struct ieee80211_txq *txq = sta->txq[i];
+ struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv;
+ struct sk_buff *skb;
+
+ if (!(tids & 1))
+ continue;
+
+ do {
+ skb = mt76_txq_dequeue(phy, mtxq);
+ if (!skb)
+ break;
+
+ nframes--;
+ if (last_skb)
+ mt76_queue_ps_skb(dev, sta, last_skb, false);
+
+ last_skb = skb;
+ } while (nframes);
+ }
+
+ if (last_skb) {
+ mt76_queue_ps_skb(dev, sta, last_skb, true);
+ dev->queue_ops->kick(dev, hwq);
+ } else {
+ ieee80211_sta_eosp(sta);
+ }
+
+ spin_unlock_bh(&hwq->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
+
+static int
+mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
+ struct mt76_txq *mtxq)
+{
+ struct mt76_dev *dev = phy->dev;
+ struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
+ enum mt76_txq_id qid = mt76_txq_get_qid(txq);
+ struct mt76_wcid *wcid = mtxq->wcid;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb;
+ int n_frames = 1;
+ bool stop = false;
+ int idx;
+
+ if (test_bit(MT_WCID_FLAG_PS, &wcid->flags))
+ return 0;
+
+ if (atomic_read(&wcid->non_aql_packets) >= MT_MAX_NON_AQL_PKT)
+ return 0;
+
+ skb = mt76_txq_dequeue(phy, mtxq);
+ if (!skb)
+ return 0;
+
+ info = IEEE80211_SKB_CB(skb);
+ if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
+ ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
+ info->control.rates, 1);
+
+ idx = __mt76_tx_queue_skb(dev, qid, skb, wcid, txq->sta, &stop);
+ if (idx < 0)
+ return idx;
+
+ do {
+ if (test_bit(MT76_STATE_PM, &phy->state) ||
+ test_bit(MT76_RESET, &phy->state))
+ return -EBUSY;
+
+ if (stop)
+ break;
+
+ if (q->queued + MT_TXQ_FREE_THR >= q->ndesc)
+ break;
+
+ skb = mt76_txq_dequeue(phy, mtxq);
+ if (!skb)
+ break;
+
+ info = IEEE80211_SKB_CB(skb);
+ if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
+ ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
+ info->control.rates, 1);
+
+ idx = __mt76_tx_queue_skb(dev, qid, skb, wcid, txq->sta, &stop);
+ if (idx < 0)
+ break;
+
+ n_frames++;
+ } while (1);
+
+ dev->queue_ops->kick(dev, q);
+
+ return n_frames;
+}
+
+static int
+mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
+{
+ struct mt76_dev *dev = phy->dev;
+ struct mt76_queue *q = dev->q_tx[qid];
+ struct ieee80211_txq *txq;
+ struct mt76_txq *mtxq;
+ struct mt76_wcid *wcid;
+ int ret = 0;
+
+ spin_lock_bh(&q->lock);
+ while (1) {
+ if (test_bit(MT76_STATE_PM, &phy->state) ||
+ test_bit(MT76_RESET, &phy->state)) {
+ ret = -EBUSY;
+ break;
+ }
+
+ if (q->queued + MT_TXQ_FREE_THR >= q->ndesc)
+ break;
+
+ txq = ieee80211_next_txq(phy->hw, qid);
+ if (!txq)
+ break;
+
+ mtxq = (struct mt76_txq *)txq->drv_priv;
+ wcid = mtxq->wcid;
+ if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags))
+ continue;
+
+ if (mtxq->send_bar && mtxq->aggr) {
+ struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
+ struct ieee80211_sta *sta = txq->sta;
+ struct ieee80211_vif *vif = txq->vif;
+ u16 agg_ssn = mtxq->agg_ssn;
+ u8 tid = txq->tid;
+
+ mtxq->send_bar = false;
+ spin_unlock_bh(&q->lock);
+ ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
+ spin_lock_bh(&q->lock);
+ }
+
+ ret += mt76_txq_send_burst(phy, q, mtxq);
+ ieee80211_return_txq(phy->hw, txq, false);
+ }
+ spin_unlock_bh(&q->lock);
+
+ return ret;
+}
+
+void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
+{
+ int len;
+
+ if (qid >= 4)
+ return;
+
+ rcu_read_lock();
+
+ do {
+ ieee80211_txq_schedule_start(phy->hw, qid);
+ len = mt76_txq_schedule_list(phy, qid);
+ ieee80211_txq_schedule_end(phy->hw, qid);
+ } while (len > 0);
+
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(mt76_txq_schedule);
+
+void mt76_txq_schedule_all(struct mt76_phy *phy)
+{
+ int i;
+
+ for (i = 0; i <= MT_TXQ_BK; i++)
+ mt76_txq_schedule(phy, i);
+}
+EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
+
+void mt76_tx_worker(struct mt76_worker *w)
+{
+ struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker);
+
+ mt76_txq_schedule_all(&dev->phy);
+ if (dev->phy2)
+ mt76_txq_schedule_all(dev->phy2);
+
+#ifdef CONFIG_NL80211_TESTMODE
+ if (dev->test.tx_pending)
+ mt76_testmode_tx_pending(dev);
+#endif
+}
+
+void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
+ bool send_bar)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+ struct ieee80211_txq *txq = sta->txq[i];
+ struct mt76_queue *hwq;
+ struct mt76_txq *mtxq;
+
+ if (!txq)
+ continue;
+
+ hwq = dev->q_tx[mt76_txq_get_qid(txq)];
+ mtxq = (struct mt76_txq *)txq->drv_priv;
+
+ spin_lock_bh(&hwq->lock);
+ mtxq->send_bar = mtxq->aggr && send_bar;
+ spin_unlock_bh(&hwq->lock);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
+
+void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+{
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
+
+ if (!test_bit(MT76_STATE_RUNNING, &phy->state))
+ return;
+
+ mt76_worker_schedule(&dev->tx_worker);
+}
+EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
+
+u8 mt76_ac_to_hwq(u8 ac)
+{
+ static const u8 wmm_queue_map[] = {
+ [IEEE80211_AC_BE] = 0,
+ [IEEE80211_AC_BK] = 1,
+ [IEEE80211_AC_VI] = 2,
+ [IEEE80211_AC_VO] = 3,
+ };
+
+ if (WARN_ON(ac >= IEEE80211_NUM_ACS))
+ return 0;
+
+ return wmm_queue_map[ac];
+}
+EXPORT_SYMBOL_GPL(mt76_ac_to_hwq);
+
+int mt76_skb_adjust_pad(struct sk_buff *skb, int pad)
+{
+ struct sk_buff *iter, *last = skb;
+
+ /* First packet of a A-MSDU burst keeps track of the whole burst
+ * length, need to update length of it and the last packet.
+ */
+ skb_walk_frags(skb, iter) {
+ last = iter;
+ if (!iter->next) {
+ skb->data_len += pad;
+ skb->len += pad;
+ break;
+ }
+ }
+
+ if (skb_pad(last, pad))
+ return -ENOMEM;
+
+ __skb_put(last, pad);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad);
+
+void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
+ struct mt76_queue_entry *e)
+{
+ if (e->skb)
+ dev->drv->tx_complete_skb(dev, e);
+
+ spin_lock_bh(&q->lock);
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
+ spin_unlock_bh(&q->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_queue_tx_complete);
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
new file mode 100644
index 000000000..f1ae9ff83
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -0,0 +1,1147 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include <linux/module.h>
+#include "mt76.h"
+#include "usb_trace.h"
+#include "dma.h"
+
+#define MT_VEND_REQ_MAX_RETRY 10
+#define MT_VEND_REQ_TOUT_MS 300
+
+static bool disable_usb_sg;
+module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
+MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
+
+static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
+ u8 req_type, u16 val, u16 offset,
+ void *buf, size_t len)
+{
+ struct usb_interface *uintf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(uintf);
+ unsigned int pipe;
+ int i, ret;
+
+ lockdep_assert_held(&dev->usb.usb_ctrl_mtx);
+
+ pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
+ : usb_sndctrlpipe(udev, 0);
+ for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
+ if (test_bit(MT76_REMOVED, &dev->phy.state))
+ return -EIO;
+
+ ret = usb_control_msg(udev, pipe, req, req_type, val,
+ offset, buf, len, MT_VEND_REQ_TOUT_MS);
+ if (ret == -ENODEV)
+ set_bit(MT76_REMOVED, &dev->phy.state);
+ if (ret >= 0 || ret == -ENODEV)
+ return ret;
+ usleep_range(5000, 10000);
+ }
+
+ dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
+ req, offset, ret);
+ return ret;
+}
+
+int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
+ u8 req_type, u16 val, u16 offset,
+ void *buf, size_t len)
+{
+ int ret;
+
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ret = __mt76u_vendor_request(dev, req, req_type,
+ val, offset, buf, len);
+ trace_usb_reg_wr(dev, offset, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76u_vendor_request);
+
+static u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u32 addr)
+{
+ struct mt76_usb *usb = &dev->usb;
+ u32 data = ~0;
+ int ret;
+
+ ret = __mt76u_vendor_request(dev, req,
+ USB_DIR_IN | USB_TYPE_VENDOR,
+ addr >> 16, addr, usb->data,
+ sizeof(__le32));
+ if (ret == sizeof(__le32))
+ data = get_unaligned_le32(usb->data);
+ trace_usb_reg_rr(dev, addr, data);
+
+ return data;
+}
+
+static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
+{
+ u8 req;
+
+ switch (addr & MT_VEND_TYPE_MASK) {
+ case MT_VEND_TYPE_EEPROM:
+ req = MT_VEND_READ_EEPROM;
+ break;
+ case MT_VEND_TYPE_CFG:
+ req = MT_VEND_READ_CFG;
+ break;
+ default:
+ req = MT_VEND_MULTI_READ;
+ break;
+ }
+
+ return ___mt76u_rr(dev, req, addr & ~MT_VEND_TYPE_MASK);
+}
+
+static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
+{
+ u32 ret;
+
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ret = __mt76u_rr(dev, addr);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return ret;
+}
+
+static u32 mt76u_rr_ext(struct mt76_dev *dev, u32 addr)
+{
+ u32 ret;
+
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ret = ___mt76u_rr(dev, MT_VEND_READ_EXT, addr);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return ret;
+}
+
+static void ___mt76u_wr(struct mt76_dev *dev, u8 req,
+ u32 addr, u32 val)
+{
+ struct mt76_usb *usb = &dev->usb;
+
+ put_unaligned_le32(val, usb->data);
+ __mt76u_vendor_request(dev, req,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ addr >> 16, addr, usb->data,
+ sizeof(__le32));
+ trace_usb_reg_wr(dev, addr, val);
+}
+
+static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+ u8 req;
+
+ switch (addr & MT_VEND_TYPE_MASK) {
+ case MT_VEND_TYPE_CFG:
+ req = MT_VEND_WRITE_CFG;
+ break;
+ default:
+ req = MT_VEND_MULTI_WRITE;
+ break;
+ }
+ ___mt76u_wr(dev, req, addr & ~MT_VEND_TYPE_MASK, val);
+}
+
+static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ __mt76u_wr(dev, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+
+static void mt76u_wr_ext(struct mt76_dev *dev, u32 addr, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ___mt76u_wr(dev, MT_VEND_WRITE_EXT, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+
+static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
+ u32 mask, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ val |= __mt76u_rr(dev, addr) & ~mask;
+ __mt76u_wr(dev, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return val;
+}
+
+static u32 mt76u_rmw_ext(struct mt76_dev *dev, u32 addr,
+ u32 mask, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ val |= ___mt76u_rr(dev, MT_VEND_READ_EXT, addr) & ~mask;
+ ___mt76u_wr(dev, MT_VEND_WRITE_EXT, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return val;
+}
+
+static void mt76u_copy(struct mt76_dev *dev, u32 offset,
+ const void *data, int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+ const u8 *val = data;
+ int ret;
+ int current_batch_size;
+ int i = 0;
+
+ /* Assure that always a multiple of 4 bytes are copied,
+ * otherwise beacons can be corrupted.
+ * See: "mt76: round up length on mt76_wr_copy"
+ * Commit 850e8f6fbd5d0003b0
+ */
+ len = round_up(len, 4);
+
+ mutex_lock(&usb->usb_ctrl_mtx);
+ while (i < len) {
+ current_batch_size = min_t(int, usb->data_len, len - i);
+ memcpy(usb->data, val + i, current_batch_size);
+ ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0, offset + i, usb->data,
+ current_batch_size);
+ if (ret < 0)
+ break;
+
+ i += current_batch_size;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+}
+
+static void mt76u_copy_ext(struct mt76_dev *dev, u32 offset,
+ const void *data, int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+ int ret, i = 0, batch_len;
+ const u8 *val = data;
+
+ len = round_up(len, 4);
+ mutex_lock(&usb->usb_ctrl_mtx);
+ while (i < len) {
+ batch_len = min_t(int, usb->data_len, len - i);
+ memcpy(usb->data, val + i, batch_len);
+ ret = __mt76u_vendor_request(dev, MT_VEND_WRITE_EXT,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ (offset + i) >> 16, offset + i,
+ usb->data, batch_len);
+ if (ret < 0)
+ break;
+
+ i += batch_len;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+}
+
+static void
+mt76u_read_copy_ext(struct mt76_dev *dev, u32 offset,
+ void *data, int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+ int i = 0, batch_len, ret;
+ u8 *val = data;
+
+ len = round_up(len, 4);
+ mutex_lock(&usb->usb_ctrl_mtx);
+ while (i < len) {
+ batch_len = min_t(int, usb->data_len, len - i);
+ ret = __mt76u_vendor_request(dev, MT_VEND_READ_EXT,
+ USB_DIR_IN | USB_TYPE_VENDOR,
+ (offset + i) >> 16, offset + i,
+ usb->data, batch_len);
+ if (ret < 0)
+ break;
+
+ memcpy(val + i, usb->data, batch_len);
+ i += batch_len;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+}
+
+void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
+ const u16 offset, const u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ __mt76u_vendor_request(dev, req,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ val & 0xffff, offset, NULL, 0);
+ __mt76u_vendor_request(dev, req,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ val >> 16, offset + 2, NULL, 0);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+EXPORT_SYMBOL_GPL(mt76u_single_wr);
+
+static int
+mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
+ const struct mt76_reg_pair *data, int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+
+ mutex_lock(&usb->usb_ctrl_mtx);
+ while (len > 0) {
+ __mt76u_wr(dev, base + data->reg, data->value);
+ len--;
+ data++;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+
+ return 0;
+}
+
+static int
+mt76u_wr_rp(struct mt76_dev *dev, u32 base,
+ const struct mt76_reg_pair *data, int n)
+{
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
+ return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
+ else
+ return mt76u_req_wr_rp(dev, base, data, n);
+}
+
+static int
+mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
+ int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+
+ mutex_lock(&usb->usb_ctrl_mtx);
+ while (len > 0) {
+ data->value = __mt76u_rr(dev, base + data->reg);
+ len--;
+ data++;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+
+ return 0;
+}
+
+static int
+mt76u_rd_rp(struct mt76_dev *dev, u32 base,
+ struct mt76_reg_pair *data, int n)
+{
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
+ return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
+ else
+ return mt76u_req_rd_rp(dev, base, data, n);
+}
+
+static bool mt76u_check_sg(struct mt76_dev *dev)
+{
+ struct usb_interface *uintf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(uintf);
+
+ return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
+ (udev->bus->no_sg_constraint ||
+ udev->speed == USB_SPEED_WIRELESS));
+}
+
+static int
+mt76u_set_endpoints(struct usb_interface *intf,
+ struct mt76_usb *usb)
+{
+ struct usb_host_interface *intf_desc = intf->cur_altsetting;
+ struct usb_endpoint_descriptor *ep_desc;
+ int i, in_ep = 0, out_ep = 0;
+
+ for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
+ ep_desc = &intf_desc->endpoint[i].desc;
+
+ if (usb_endpoint_is_bulk_in(ep_desc) &&
+ in_ep < __MT_EP_IN_MAX) {
+ usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
+ in_ep++;
+ } else if (usb_endpoint_is_bulk_out(ep_desc) &&
+ out_ep < __MT_EP_OUT_MAX) {
+ usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
+ out_ep++;
+ }
+ }
+
+ if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
+ return -EINVAL;
+ return 0;
+}
+
+static int
+mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
+ int nsgs, gfp_t gfp)
+{
+ int i;
+
+ for (i = 0; i < nsgs; i++) {
+ struct page *page;
+ void *data;
+ int offset;
+
+ data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
+ if (!data)
+ break;
+
+ page = virt_to_head_page(data);
+ offset = data - page_address(page);
+ sg_set_page(&urb->sg[i], page, q->buf_size, offset);
+ }
+
+ if (i < nsgs) {
+ int j;
+
+ for (j = nsgs; j < urb->num_sgs; j++)
+ skb_free_frag(sg_virt(&urb->sg[j]));
+ urb->num_sgs = i;
+ }
+
+ urb->num_sgs = max_t(int, i, urb->num_sgs);
+ urb->transfer_buffer_length = urb->num_sgs * q->buf_size;
+ sg_init_marker(urb->sg, urb->num_sgs);
+
+ return i ? : -ENOMEM;
+}
+
+static int
+mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
+ struct urb *urb, int nsgs, gfp_t gfp)
+{
+ enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
+
+ if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
+ return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
+
+ urb->transfer_buffer_length = q->buf_size;
+ urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
+
+ return urb->transfer_buffer ? 0 : -ENOMEM;
+}
+
+static int
+mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e,
+ int sg_max_size)
+{
+ unsigned int size = sizeof(struct urb);
+
+ if (dev->usb.sg_en)
+ size += sg_max_size * sizeof(struct scatterlist);
+
+ e->urb = kzalloc(size, GFP_KERNEL);
+ if (!e->urb)
+ return -ENOMEM;
+
+ usb_init_urb(e->urb);
+
+ if (dev->usb.sg_en && sg_max_size > 0)
+ e->urb->sg = (struct scatterlist *)(e->urb + 1);
+
+ return 0;
+}
+
+static int
+mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
+ struct mt76_queue_entry *e)
+{
+ enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
+ int err, sg_size;
+
+ sg_size = qid == MT_RXQ_MAIN ? MT_RX_SG_MAX_SIZE : 0;
+ err = mt76u_urb_alloc(dev, e, sg_size);
+ if (err)
+ return err;
+
+ return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL);
+}
+
+static void mt76u_urb_free(struct urb *urb)
+{
+ int i;
+
+ for (i = 0; i < urb->num_sgs; i++)
+ skb_free_frag(sg_virt(&urb->sg[i]));
+
+ if (urb->transfer_buffer)
+ skb_free_frag(urb->transfer_buffer);
+
+ usb_free_urb(urb);
+}
+
+static void
+mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
+ struct urb *urb, usb_complete_t complete_fn,
+ void *context)
+{
+ struct usb_interface *uintf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(uintf);
+ unsigned int pipe;
+
+ if (dir == USB_DIR_IN)
+ pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
+ else
+ pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
+
+ urb->dev = udev;
+ urb->pipe = pipe;
+ urb->complete = complete_fn;
+ urb->context = context;
+}
+
+static struct urb *
+mt76u_get_next_rx_entry(struct mt76_queue *q)
+{
+ struct urb *urb = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->lock, flags);
+ if (q->queued > 0) {
+ urb = q->entry[q->tail].urb;
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
+ }
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return urb;
+}
+
+static int
+mt76u_get_rx_entry_len(struct mt76_dev *dev, u8 *data,
+ u32 data_len)
+{
+ u16 dma_len, min_len;
+
+ dma_len = get_unaligned_le16(data);
+ if (dev->drv->drv_flags & MT_DRV_RX_DMA_HDR)
+ return dma_len;
+
+ min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN + MT_FCE_INFO_LEN;
+ if (data_len < min_len || !dma_len ||
+ dma_len + MT_DMA_HDR_LEN > data_len ||
+ (dma_len & 0x3))
+ return -EINVAL;
+ return dma_len;
+}
+
+static struct sk_buff *
+mt76u_build_rx_skb(struct mt76_dev *dev, void *data,
+ int len, int buf_size)
+{
+ int head_room, drv_flags = dev->drv->drv_flags;
+ struct sk_buff *skb;
+
+ head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
+ if (SKB_WITH_OVERHEAD(buf_size) < head_room + len) {
+ struct page *page;
+
+ /* slow path, not enough space for data and
+ * skb_shared_info
+ */
+ skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ skb_put_data(skb, data + head_room, MT_SKB_HEAD_LEN);
+ data += head_room + MT_SKB_HEAD_LEN;
+ page = virt_to_head_page(data);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ page, data - page_address(page),
+ len - MT_SKB_HEAD_LEN, buf_size);
+
+ return skb;
+ }
+
+ /* fast path */
+ skb = build_skb(data, buf_size);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, head_room);
+ __skb_put(skb, len);
+
+ return skb;
+}
+
+static int
+mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
+ int buf_size)
+{
+ u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
+ int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
+ int len, nsgs = 1, head_room, drv_flags = dev->drv->drv_flags;
+ struct sk_buff *skb;
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
+ return 0;
+
+ len = mt76u_get_rx_entry_len(dev, data, urb->actual_length);
+ if (len < 0)
+ return 0;
+
+ head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
+ data_len = min_t(int, len, data_len - head_room);
+ skb = mt76u_build_rx_skb(dev, data, data_len, buf_size);
+ if (!skb)
+ return 0;
+
+ len -= data_len;
+ while (len > 0 && nsgs < urb->num_sgs) {
+ data_len = min_t(int, len, urb->sg[nsgs].length);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ sg_page(&urb->sg[nsgs]),
+ urb->sg[nsgs].offset, data_len,
+ buf_size);
+ len -= data_len;
+ nsgs++;
+ }
+ dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
+
+ return nsgs;
+}
+
+static void mt76u_complete_rx(struct urb *urb)
+{
+ struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
+ struct mt76_queue *q = urb->context;
+ unsigned long flags;
+
+ trace_rx_urb(dev, urb);
+
+ switch (urb->status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case -ENOENT:
+ return;
+ default:
+ dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
+ urb->status);
+ fallthrough;
+ case 0:
+ break;
+ }
+
+ spin_lock_irqsave(&q->lock, flags);
+ if (WARN_ONCE(q->entry[q->head].urb != urb, "rx urb mismatch"))
+ goto out;
+
+ q->head = (q->head + 1) % q->ndesc;
+ q->queued++;
+ tasklet_schedule(&dev->usb.rx_tasklet);
+out:
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+
+static int
+mt76u_submit_rx_buf(struct mt76_dev *dev, enum mt76_rxq_id qid,
+ struct urb *urb)
+{
+ int ep = qid == MT_RXQ_MAIN ? MT_EP_IN_PKT_RX : MT_EP_IN_CMD_RESP;
+
+ mt76u_fill_bulk_urb(dev, USB_DIR_IN, ep, urb,
+ mt76u_complete_rx, &dev->q_rx[qid]);
+ trace_submit_urb(dev, urb);
+
+ return usb_submit_urb(urb, GFP_ATOMIC);
+}
+
+static void
+mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ int qid = q - &dev->q_rx[MT_RXQ_MAIN];
+ struct urb *urb;
+ int err, count;
+
+ while (true) {
+ urb = mt76u_get_next_rx_entry(q);
+ if (!urb)
+ break;
+
+ count = mt76u_process_rx_entry(dev, urb, q->buf_size);
+ if (count > 0) {
+ err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC);
+ if (err < 0)
+ break;
+ }
+ mt76u_submit_rx_buf(dev, qid, urb);
+ }
+ if (qid == MT_RXQ_MAIN)
+ mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
+}
+
+static void mt76u_rx_tasklet(unsigned long data)
+{
+ struct mt76_dev *dev = (struct mt76_dev *)data;
+ int i;
+
+ rcu_read_lock();
+ mt76_for_each_q_rx(dev, i)
+ mt76u_process_rx_queue(dev, &dev->q_rx[i]);
+ rcu_read_unlock();
+}
+
+static int
+mt76u_submit_rx_buffers(struct mt76_dev *dev, enum mt76_rxq_id qid)
+{
+ struct mt76_queue *q = &dev->q_rx[qid];
+ unsigned long flags;
+ int i, err = 0;
+
+ spin_lock_irqsave(&q->lock, flags);
+ for (i = 0; i < q->ndesc; i++) {
+ err = mt76u_submit_rx_buf(dev, qid, q->entry[i].urb);
+ if (err < 0)
+ break;
+ }
+ q->head = q->tail = 0;
+ q->queued = 0;
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return err;
+}
+
+static int
+mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
+{
+ struct mt76_queue *q = &dev->q_rx[qid];
+ int i, err;
+
+ spin_lock_init(&q->lock);
+ q->entry = devm_kcalloc(dev->dev,
+ MT_NUM_RX_ENTRIES, sizeof(*q->entry),
+ GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ q->ndesc = MT_NUM_RX_ENTRIES;
+ q->buf_size = PAGE_SIZE;
+
+ for (i = 0; i < q->ndesc; i++) {
+ err = mt76u_rx_urb_alloc(dev, q, &q->entry[i]);
+ if (err < 0)
+ return err;
+ }
+
+ return mt76u_submit_rx_buffers(dev, qid);
+}
+
+int mt76u_alloc_mcu_queue(struct mt76_dev *dev)
+{
+ return mt76u_alloc_rx_queue(dev, MT_RXQ_MCU);
+}
+EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
+
+static void
+mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ struct page *page;
+ int i;
+
+ for (i = 0; i < q->ndesc; i++)
+ mt76u_urb_free(q->entry[i].urb);
+
+ if (!q->rx_page.va)
+ return;
+
+ page = virt_to_page(q->rx_page.va);
+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
+ memset(&q->rx_page, 0, sizeof(q->rx_page));
+}
+
+static void mt76u_free_rx(struct mt76_dev *dev)
+{
+ int i;
+
+ mt76_for_each_q_rx(dev, i)
+ mt76u_free_rx_queue(dev, &dev->q_rx[i]);
+}
+
+void mt76u_stop_rx(struct mt76_dev *dev)
+{
+ int i;
+
+ mt76_for_each_q_rx(dev, i) {
+ struct mt76_queue *q = &dev->q_rx[i];
+ int j;
+
+ for (j = 0; j < q->ndesc; j++)
+ usb_poison_urb(q->entry[j].urb);
+ }
+
+ tasklet_kill(&dev->usb.rx_tasklet);
+}
+EXPORT_SYMBOL_GPL(mt76u_stop_rx);
+
+int mt76u_resume_rx(struct mt76_dev *dev)
+{
+ int i;
+
+ mt76_for_each_q_rx(dev, i) {
+ struct mt76_queue *q = &dev->q_rx[i];
+ int err, j;
+
+ for (j = 0; j < q->ndesc; j++)
+ usb_unpoison_urb(q->entry[j].urb);
+
+ err = mt76u_submit_rx_buffers(dev, i);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76u_resume_rx);
+
+static void mt76u_tx_worker(struct mt76_worker *w)
+{
+ struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker);
+ struct mt76_queue_entry entry;
+ struct mt76_queue *q;
+ bool wake;
+ int i;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = dev->q_tx[i];
+
+ while (q->queued > 0) {
+ if (!q->entry[q->tail].done)
+ break;
+
+ entry = q->entry[q->tail];
+ q->entry[q->tail].done = false;
+
+ mt76_queue_tx_complete(dev, q, &entry);
+ }
+
+ wake = q->stopped && q->queued < q->ndesc - 8;
+ if (wake)
+ q->stopped = false;
+
+ if (!q->queued)
+ wake_up(&dev->tx_wait);
+
+ mt76_txq_schedule(&dev->phy, i);
+
+ if (dev->drv->tx_status_data &&
+ !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
+ queue_work(dev->wq, &dev->usb.stat_work);
+ if (wake)
+ ieee80211_wake_queue(dev->hw, i);
+ }
+}
+
+static void mt76u_tx_status_data(struct work_struct *work)
+{
+ struct mt76_usb *usb;
+ struct mt76_dev *dev;
+ u8 update = 1;
+ u16 count = 0;
+
+ usb = container_of(work, struct mt76_usb, stat_work);
+ dev = container_of(usb, struct mt76_dev, usb);
+
+ while (true) {
+ if (test_bit(MT76_REMOVED, &dev->phy.state))
+ break;
+
+ if (!dev->drv->tx_status_data(dev, &update))
+ break;
+ count++;
+ }
+
+ if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
+ queue_work(dev->wq, &usb->stat_work);
+ else
+ clear_bit(MT76_READING_STATS, &dev->phy.state);
+}
+
+static void mt76u_complete_tx(struct urb *urb)
+{
+ struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
+ struct mt76_queue_entry *e = urb->context;
+
+ if (mt76u_urb_error(urb))
+ dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
+ e->done = true;
+
+ mt76_worker_schedule(&dev->tx_worker);
+}
+
+static int
+mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
+ struct urb *urb)
+{
+ urb->transfer_buffer_length = skb->len;
+
+ if (!dev->usb.sg_en) {
+ urb->transfer_buffer = skb->data;
+ return 0;
+ }
+
+ sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE);
+ urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
+ if (!urb->num_sgs)
+ return -ENOMEM;
+
+ return urb->num_sgs;
+}
+
+static int
+mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta)
+{
+ struct mt76_queue *q = dev->q_tx[qid];
+ struct mt76_tx_info tx_info = {
+ .skb = skb,
+ };
+ u16 idx = q->head;
+ int err;
+
+ if (q->queued == q->ndesc)
+ return -ENOSPC;
+
+ skb->prev = skb->next = NULL;
+ err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
+ if (err < 0)
+ return err;
+
+ err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
+ if (err < 0)
+ return err;
+
+ mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
+ q->entry[idx].urb, mt76u_complete_tx,
+ &q->entry[idx]);
+
+ q->head = (q->head + 1) % q->ndesc;
+ q->entry[idx].skb = tx_info.skb;
+ q->queued++;
+
+ return idx;
+}
+
+static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ struct urb *urb;
+ int err;
+
+ while (q->first != q->head) {
+ urb = q->entry[q->first].urb;
+
+ trace_submit_urb(dev, urb);
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err < 0) {
+ if (err == -ENODEV)
+ set_bit(MT76_REMOVED, &dev->phy.state);
+ else
+ dev_err(dev->dev, "tx urb submit failed:%d\n",
+ err);
+ break;
+ }
+ q->first = (q->first + 1) % q->ndesc;
+ }
+}
+
+static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac)
+{
+ if (mt76_chip(dev) == 0x7663) {
+ static const u8 lmac_queue_map[] = {
+ /* ac to lmac mapping */
+ [IEEE80211_AC_BK] = 0,
+ [IEEE80211_AC_BE] = 1,
+ [IEEE80211_AC_VI] = 2,
+ [IEEE80211_AC_VO] = 4,
+ };
+
+ if (WARN_ON(ac >= ARRAY_SIZE(lmac_queue_map)))
+ return 1; /* BE */
+
+ return lmac_queue_map[ac];
+ }
+
+ return mt76_ac_to_hwq(ac);
+}
+
+static int mt76u_alloc_tx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q;
+ int i, j, err;
+
+ for (i = 0; i <= MT_TXQ_PSD; i++) {
+ if (i >= IEEE80211_NUM_ACS) {
+ dev->q_tx[i] = dev->q_tx[0];
+ continue;
+ }
+
+ q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+
+ spin_lock_init(&q->lock);
+ q->hw_idx = mt76u_ac_to_hwq(dev, i);
+ dev->q_tx[i] = q;
+
+ q->entry = devm_kcalloc(dev->dev,
+ MT_NUM_TX_ENTRIES, sizeof(*q->entry),
+ GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ q->ndesc = MT_NUM_TX_ENTRIES;
+ for (j = 0; j < q->ndesc; j++) {
+ err = mt76u_urb_alloc(dev, &q->entry[j],
+ MT_TX_SG_MAX_SIZE);
+ if (err < 0)
+ return err;
+ }
+ }
+ return 0;
+}
+
+static void mt76u_free_tx(struct mt76_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ struct mt76_queue *q;
+ int j;
+
+ q = dev->q_tx[i];
+ if (!q)
+ continue;
+
+ for (j = 0; j < q->ndesc; j++)
+ usb_free_urb(q->entry[j].urb);
+ }
+}
+
+void mt76u_stop_tx(struct mt76_dev *dev)
+{
+ int ret;
+
+ ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy),
+ HZ / 5);
+ if (!ret) {
+ struct mt76_queue_entry entry;
+ struct mt76_queue *q;
+ int i, j;
+
+ dev_err(dev->dev, "timed out waiting for pending tx\n");
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = dev->q_tx[i];
+ if (!q)
+ continue;
+
+ for (j = 0; j < q->ndesc; j++)
+ usb_kill_urb(q->entry[j].urb);
+ }
+
+ mt76_worker_disable(&dev->tx_worker);
+
+ /* On device removal we maight queue skb's, but mt76u_tx_kick()
+ * will fail to submit urb, cleanup those skb's manually.
+ */
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = dev->q_tx[i];
+ if (!q)
+ continue;
+
+ while (q->queued > 0) {
+ entry = q->entry[q->tail];
+ q->entry[q->tail].done = false;
+ mt76_queue_tx_complete(dev, q, &entry);
+ }
+ }
+
+ mt76_worker_enable(&dev->tx_worker);
+ }
+
+ cancel_work_sync(&dev->usb.stat_work);
+ clear_bit(MT76_READING_STATS, &dev->phy.state);
+
+ mt76_tx_status_check(dev, NULL, true);
+}
+EXPORT_SYMBOL_GPL(mt76u_stop_tx);
+
+void mt76u_queues_deinit(struct mt76_dev *dev)
+{
+ mt76u_stop_rx(dev);
+ mt76u_stop_tx(dev);
+
+ mt76u_free_rx(dev);
+ mt76u_free_tx(dev);
+}
+EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
+
+int mt76u_alloc_queues(struct mt76_dev *dev)
+{
+ int err;
+
+ err = mt76u_alloc_rx_queue(dev, MT_RXQ_MAIN);
+ if (err < 0)
+ return err;
+
+ return mt76u_alloc_tx(dev);
+}
+EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
+
+static const struct mt76_queue_ops usb_queue_ops = {
+ .tx_queue_skb = mt76u_tx_queue_skb,
+ .kick = mt76u_tx_kick,
+};
+
+int mt76u_init(struct mt76_dev *dev,
+ struct usb_interface *intf, bool ext)
+{
+ static struct mt76_bus_ops mt76u_ops = {
+ .read_copy = mt76u_read_copy_ext,
+ .wr_rp = mt76u_wr_rp,
+ .rd_rp = mt76u_rd_rp,
+ .type = MT76_BUS_USB,
+ };
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct mt76_usb *usb = &dev->usb;
+ int err = -ENOMEM;
+
+ mt76u_ops.rr = ext ? mt76u_rr_ext : mt76u_rr;
+ mt76u_ops.wr = ext ? mt76u_wr_ext : mt76u_wr;
+ mt76u_ops.rmw = ext ? mt76u_rmw_ext : mt76u_rmw;
+ mt76u_ops.write_copy = ext ? mt76u_copy_ext : mt76u_copy;
+
+ dev->tx_worker.fn = mt76u_tx_worker;
+ tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
+ INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
+
+ usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0), 1);
+ if (usb->data_len < 32)
+ usb->data_len = 32;
+
+ usb->data = devm_kmalloc(dev->dev, usb->data_len, GFP_KERNEL);
+ if (!usb->data)
+ goto error;
+
+ mutex_init(&usb->usb_ctrl_mtx);
+ dev->bus = &mt76u_ops;
+ dev->queue_ops = &usb_queue_ops;
+
+ dev_set_drvdata(&udev->dev, dev);
+
+ usb->sg_en = mt76u_check_sg(dev);
+
+ err = mt76u_set_endpoints(intf, usb);
+ if (err < 0)
+ goto error;
+
+ return 0;
+
+error:
+ destroy_workqueue(dev->wq);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt76u_init);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/usb_trace.c b/drivers/net/wireless/mediatek/mt76/usb_trace.c
new file mode 100644
index 000000000..9942bdd61
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/usb_trace.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "usb_trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/usb_trace.h b/drivers/net/wireless/mediatek/mt76/usb_trace.h
new file mode 100644
index 000000000..f5ab3215a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/usb_trace.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#if !defined(__MT76_USB_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76_USB_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt76.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt76_usb
+
+#define MAXNAME 32
+#define DEV_ENTRY __array(char, wiphy_name, 32)
+#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \
+ wiphy_name(dev->hw->wiphy), MAXNAME)
+#define DEV_PR_FMT "%s "
+#define DEV_PR_ARG __entry->wiphy_name
+
+#define REG_ENTRY __field(u32, reg) __field(u32, val)
+#define REG_ASSIGN __entry->reg = reg; __entry->val = val
+#define REG_PR_FMT "reg:0x%04x=0x%08x"
+#define REG_PR_ARG __entry->reg, __entry->val
+
+DECLARE_EVENT_CLASS(dev_reg_evt,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ REG_ENTRY
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ REG_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT REG_PR_FMT,
+ DEV_PR_ARG, REG_PR_ARG
+ )
+);
+
+DEFINE_EVENT(dev_reg_evt, usb_reg_rr,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_reg_evt, usb_reg_wr,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+DECLARE_EVENT_CLASS(urb_transfer,
+ TP_PROTO(struct mt76_dev *dev, struct urb *u),
+ TP_ARGS(dev, u),
+ TP_STRUCT__entry(
+ DEV_ENTRY __field(unsigned int, pipe) __field(u32, len)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->pipe = u->pipe;
+ __entry->len = u->transfer_buffer_length;
+ ),
+ TP_printk(DEV_PR_FMT "p:%08x len:%u",
+ DEV_PR_ARG, __entry->pipe, __entry->len)
+);
+
+DEFINE_EVENT(urb_transfer, submit_urb,
+ TP_PROTO(struct mt76_dev *dev, struct urb *u),
+ TP_ARGS(dev, u)
+);
+
+DEFINE_EVENT(urb_transfer, rx_urb,
+ TP_PROTO(struct mt76_dev *dev, struct urb *u),
+ TP_ARGS(dev, u)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE usb_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c
new file mode 100644
index 000000000..581964425
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/util.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/module.h>
+#include "mt76.h"
+
+bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout)
+{
+ u32 cur;
+
+ timeout /= 10;
+ do {
+ cur = __mt76_rr(dev, offset) & mask;
+ if (cur == val)
+ return true;
+
+ udelay(10);
+ } while (timeout-- > 0);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(__mt76_poll);
+
+bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout)
+{
+ u32 cur;
+
+ timeout /= 10;
+ do {
+ cur = __mt76_rr(dev, offset) & mask;
+ if (cur == val)
+ return true;
+
+ usleep_range(10000, 20000);
+ } while (timeout-- > 0);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(__mt76_poll_msec);
+
+int mt76_wcid_alloc(u32 *mask, int size)
+{
+ int i, idx = 0, cur;
+
+ for (i = 0; i < DIV_ROUND_UP(size, 32); i++) {
+ idx = ffs(~mask[i]);
+ if (!idx)
+ continue;
+
+ idx--;
+ cur = i * 32 + idx;
+ if (cur >= size)
+ break;
+
+ mask[i] |= BIT(idx);
+ return cur;
+ }
+
+ return -1;
+}
+EXPORT_SYMBOL_GPL(mt76_wcid_alloc);
+
+int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy)
+{
+ struct mt76_wcid *wcid;
+ int i, j, min_rssi = 0;
+ s8 cur_rssi;
+
+ local_bh_disable();
+ rcu_read_lock();
+
+ for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
+ u32 mask = dev->wcid_mask[i];
+ u32 phy_mask = dev->wcid_phy_mask[i];
+
+ if (!mask)
+ continue;
+
+ for (j = i * 32; mask; j++, mask >>= 1, phy_mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+
+ if (!!(phy_mask & 1) != ext_phy)
+ continue;
+
+ wcid = rcu_dereference(dev->wcid[j]);
+ if (!wcid)
+ continue;
+
+ spin_lock(&dev->rx_lock);
+ if (wcid->inactive_count++ < 5)
+ cur_rssi = -ewma_signal_read(&wcid->rssi);
+ else
+ cur_rssi = 0;
+ spin_unlock(&dev->rx_lock);
+
+ if (cur_rssi < min_rssi)
+ min_rssi = cur_rssi;
+ }
+ }
+
+ rcu_read_unlock();
+ local_bh_enable();
+
+ return min_rssi;
+}
+EXPORT_SYMBOL_GPL(mt76_get_min_avg_rssi);
+
+int __mt76_worker_fn(void *ptr)
+{
+ struct mt76_worker *w = ptr;
+
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (kthread_should_park()) {
+ kthread_parkme();
+ continue;
+ }
+
+ if (!test_and_clear_bit(MT76_WORKER_SCHEDULED, &w->state)) {
+ schedule();
+ continue;
+ }
+
+ set_bit(MT76_WORKER_RUNNING, &w->state);
+ set_current_state(TASK_RUNNING);
+ w->fn(w);
+ cond_resched();
+ clear_bit(MT76_WORKER_RUNNING, &w->state);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__mt76_worker_fn);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/util.h b/drivers/net/wireless/mediatek/mt76/util.h
new file mode 100644
index 000000000..1c363ea9a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/util.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
+ */
+
+#ifndef __MT76_UTIL_H
+#define __MT76_UTIL_H
+
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <net/mac80211.h>
+
+struct mt76_worker
+{
+ struct task_struct *task;
+ void (*fn)(struct mt76_worker *);
+ unsigned long state;
+};
+
+enum {
+ MT76_WORKER_SCHEDULED,
+ MT76_WORKER_RUNNING,
+};
+
+#define MT76_INCR(_var, _size) \
+ (_var = (((_var) + 1) % (_size)))
+
+int mt76_wcid_alloc(u32 *mask, int size);
+
+static inline bool
+mt76_wcid_mask_test(u32 *mask, int idx)
+{
+ return mask[idx / 32] & BIT(idx % 32);
+}
+
+static inline void
+mt76_wcid_mask_set(u32 *mask, int idx)
+{
+ mask[idx / 32] |= BIT(idx % 32);
+}
+
+static inline void
+mt76_wcid_mask_clear(u32 *mask, int idx)
+{
+ mask[idx / 32] &= ~BIT(idx % 32);
+}
+
+static inline void
+mt76_skb_set_moredata(struct sk_buff *skb, bool enable)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (enable)
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ else
+ hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+}
+
+int __mt76_worker_fn(void *ptr);
+
+static inline int
+mt76_worker_setup(struct ieee80211_hw *hw, struct mt76_worker *w,
+ void (*fn)(struct mt76_worker *),
+ const char *name)
+{
+ const char *dev_name = wiphy_name(hw->wiphy);
+ int ret;
+
+ if (fn)
+ w->fn = fn;
+ w->task = kthread_create(__mt76_worker_fn, w, "mt76-%s %s",
+ name, dev_name);
+
+ ret = PTR_ERR_OR_ZERO(w->task);
+ if (ret) {
+ w->task = NULL;
+ return ret;
+ }
+
+ wake_up_process(w->task);
+
+ return 0;
+}
+
+static inline void mt76_worker_schedule(struct mt76_worker *w)
+{
+ if (!w->task)
+ return;
+
+ if (!test_and_set_bit(MT76_WORKER_SCHEDULED, &w->state) &&
+ !test_bit(MT76_WORKER_RUNNING, &w->state))
+ wake_up_process(w->task);
+}
+
+static inline void mt76_worker_disable(struct mt76_worker *w)
+{
+ if (!w->task)
+ return;
+
+ kthread_park(w->task);
+ WRITE_ONCE(w->state, 0);
+}
+
+static inline void mt76_worker_enable(struct mt76_worker *w)
+{
+ if (!w->task)
+ return;
+
+ kthread_unpark(w->task);
+ mt76_worker_schedule(w);
+}
+
+static inline void mt76_worker_teardown(struct mt76_worker *w)
+{
+ if (!w->task)
+ return;
+
+ kthread_stop(w->task);
+ w->task = NULL;
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/Kconfig b/drivers/net/wireless/mediatek/mt7601u/Kconfig
new file mode 100644
index 000000000..4a8b96280
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/Kconfig
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config MT7601U
+ tristate "MediaTek MT7601U (USB) support"
+ depends on MAC80211
+ depends on USB
+ help
+ This adds support for MT7601U-based wireless USB dongles.
diff --git a/drivers/net/wireless/mediatek/mt7601u/Makefile b/drivers/net/wireless/mediatek/mt7601u/Makefile
new file mode 100644
index 000000000..30f2391c7
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MT7601U) += mt7601u.o
+
+mt7601u-objs = \
+ usb.o init.o main.o mcu.o trace.o dma.o core.o eeprom.o phy.o \
+ mac.o util.o debugfs.o tx.o
+
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt7601u/core.c b/drivers/net/wireless/mediatek/mt7601u/core.c
new file mode 100644
index 000000000..907443b36
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/core.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#include "mt7601u.h"
+
+int mt7601u_wait_asic_ready(struct mt7601u_dev *dev)
+{
+ int i = 100;
+ u32 val;
+
+ do {
+ if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+ return -EIO;
+
+ val = mt7601u_rr(dev, MT_MAC_CSR0);
+ if (val && ~val)
+ return 0;
+
+ udelay(10);
+ } while (i--);
+
+ return -EIO;
+}
+
+bool mt76_poll(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout)
+{
+ u32 cur;
+
+ timeout /= 10;
+ do {
+ if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+ return false;
+
+ cur = mt7601u_rr(dev, offset) & mask;
+ if (cur == val)
+ return true;
+
+ udelay(10);
+ } while (timeout-- > 0);
+
+ dev_err(dev->dev, "Error: Time out with reg %08x\n", offset);
+
+ return false;
+}
+
+bool mt76_poll_msec(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout)
+{
+ u32 cur;
+
+ timeout /= 10;
+ do {
+ if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+ return false;
+
+ cur = mt7601u_rr(dev, offset) & mask;
+ if (cur == val)
+ return true;
+
+ msleep(10);
+ } while (timeout-- > 0);
+
+ dev_err(dev->dev, "Error: Time out with reg %08x\n", offset);
+
+ return false;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/debugfs.c b/drivers/net/wireless/mediatek/mt7601u/debugfs.c
new file mode 100644
index 000000000..20669eacb
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/debugfs.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#include <linux/debugfs.h>
+
+#include "mt7601u.h"
+#include "eeprom.h"
+
+static int
+mt76_reg_set(void *data, u64 val)
+{
+ struct mt7601u_dev *dev = data;
+
+ mt76_wr(dev, dev->debugfs_reg, val);
+ return 0;
+}
+
+static int
+mt76_reg_get(void *data, u64 *val)
+{
+ struct mt7601u_dev *dev = data;
+
+ *val = mt76_rr(dev, dev->debugfs_reg);
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n");
+
+static int
+mt7601u_ampdu_stat_show(struct seq_file *file, void *data)
+{
+ struct mt7601u_dev *dev = file->private;
+ int i, j;
+
+#define stat_printf(grp, off, name) \
+ seq_printf(file, #name ":\t%llu\n", dev->stats.grp[off])
+
+ stat_printf(rx_stat, 0, rx_crc_err);
+ stat_printf(rx_stat, 1, rx_phy_err);
+ stat_printf(rx_stat, 2, rx_false_cca);
+ stat_printf(rx_stat, 3, rx_plcp_err);
+ stat_printf(rx_stat, 4, rx_fifo_overflow);
+ stat_printf(rx_stat, 5, rx_duplicate);
+
+ stat_printf(tx_stat, 0, tx_fail_cnt);
+ stat_printf(tx_stat, 1, tx_bcn_cnt);
+ stat_printf(tx_stat, 2, tx_success);
+ stat_printf(tx_stat, 3, tx_retransmit);
+ stat_printf(tx_stat, 4, tx_zero_len);
+ stat_printf(tx_stat, 5, tx_underflow);
+
+ stat_printf(aggr_stat, 0, non_aggr_tx);
+ stat_printf(aggr_stat, 1, aggr_tx);
+
+ stat_printf(zero_len_del, 0, tx_zero_len_del);
+ stat_printf(zero_len_del, 1, rx_zero_len_del);
+#undef stat_printf
+
+ seq_puts(file, "Aggregations stats:\n");
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 8; j++)
+ seq_printf(file, "%08llx ",
+ dev->stats.aggr_n[i * 8 + j]);
+ seq_putc(file, '\n');
+ }
+
+ seq_printf(file, "recent average AMPDU len: %d\n",
+ atomic_read(&dev->avg_ampdu_len));
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mt7601u_ampdu_stat);
+
+static int
+mt7601u_eeprom_param_show(struct seq_file *file, void *data)
+{
+ struct mt7601u_dev *dev = file->private;
+ struct mt7601u_rate_power *rp = &dev->ee->power_rate_table;
+ struct tssi_data *td = &dev->ee->tssi_data;
+ int i;
+
+ seq_printf(file, "RF freq offset: %hhx\n", dev->ee->rf_freq_off);
+ seq_printf(file, "RSSI offset: %hhx %hhx\n",
+ dev->ee->rssi_offset[0], dev->ee->rssi_offset[1]);
+ seq_printf(file, "Reference temp: %hhx\n", dev->ee->ref_temp);
+ seq_printf(file, "LNA gain: %hhx\n", dev->ee->lna_gain);
+ seq_printf(file, "Reg channels: %hhu-%hhu\n", dev->ee->reg.start,
+ dev->ee->reg.start + dev->ee->reg.num - 1);
+
+ seq_puts(file, "Per rate power:\n");
+ for (i = 0; i < 2; i++)
+ seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n",
+ rp->cck[i].raw, rp->cck[i].bw20, rp->cck[i].bw40);
+ for (i = 0; i < 4; i++)
+ seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n",
+ rp->ofdm[i].raw, rp->ofdm[i].bw20, rp->ofdm[i].bw40);
+ for (i = 0; i < 4; i++)
+ seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n",
+ rp->ht[i].raw, rp->ht[i].bw20, rp->ht[i].bw40);
+
+ seq_puts(file, "Per channel power:\n");
+ for (i = 0; i < 7; i++)
+ seq_printf(file, "\t tx_power ch%u:%02hhx ch%u:%02hhx\n",
+ i * 2 + 1, dev->ee->chan_pwr[i * 2],
+ i * 2 + 2, dev->ee->chan_pwr[i * 2 + 1]);
+
+ if (!dev->ee->tssi_enabled)
+ return 0;
+
+ seq_puts(file, "TSSI:\n");
+ seq_printf(file, "\t slope:%02hhx\n", td->slope);
+ seq_printf(file, "\t offset=%02hhx %02hhx %02hhx\n",
+ td->offset[0], td->offset[1], td->offset[2]);
+ seq_printf(file, "\t delta_off:%08x\n", td->tx0_delta_offset);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mt7601u_eeprom_param);
+
+void mt7601u_init_debugfs(struct mt7601u_dev *dev)
+{
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("mt7601u", dev->hw->wiphy->debugfsdir);
+ if (!dir)
+ return;
+
+ debugfs_create_u8("temperature", 0400, dir, &dev->raw_temp);
+ debugfs_create_u32("temp_mode", 0400, dir, &dev->temp_mode);
+
+ debugfs_create_u32("regidx", 0600, dir, &dev->debugfs_reg);
+ debugfs_create_file("regval", 0600, dir, dev, &fops_regval);
+ debugfs_create_file("ampdu_stat", 0400, dir, dev, &mt7601u_ampdu_stat_fops);
+ debugfs_create_file("eeprom_param", 0400, dir, dev, &mt7601u_eeprom_param_fops);
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
new file mode 100644
index 000000000..8ba291abe
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
@@ -0,0 +1,540 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#include "mt7601u.h"
+#include "dma.h"
+#include "usb.h"
+#include "trace.h"
+
+static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
+ struct mt7601u_dma_buf_rx *e, gfp_t gfp);
+
+static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
+{
+ const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
+ unsigned int hdrlen;
+
+ if (unlikely(len < 10))
+ return 0;
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ if (unlikely(hdrlen > len))
+ return 0;
+ return hdrlen;
+}
+
+static struct sk_buff *
+mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
+ void *data, u32 seg_len, u32 truesize, struct page *p)
+{
+ struct sk_buff *skb;
+ u32 true_len, hdr_len = 0, copy, frag;
+
+ skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ true_len = mt76_mac_process_rx(dev, skb, data, rxwi);
+ if (!true_len || true_len > seg_len)
+ goto bad_frame;
+
+ hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len);
+ if (!hdr_len)
+ goto bad_frame;
+
+ if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
+ skb_put_data(skb, data, hdr_len);
+
+ data += hdr_len + 2;
+ true_len -= hdr_len;
+ hdr_len = 0;
+ }
+
+ /* If not doing paged RX allocated skb will always have enough space */
+ copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
+ frag = true_len - copy;
+
+ skb_put_data(skb, data, copy);
+ data += copy;
+
+ if (frag) {
+ skb_add_rx_frag(skb, 0, p, data - page_address(p),
+ frag, truesize);
+ get_page(p);
+ }
+
+ return skb;
+
+bad_frame:
+ dev_err_ratelimited(dev->dev, "Error: incorrect frame len:%u hdr:%u\n",
+ true_len, hdr_len);
+ dev_kfree_skb(skb);
+ return NULL;
+}
+
+static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
+ u32 seg_len, struct page *p)
+{
+ struct sk_buff *skb;
+ struct mt7601u_rxwi *rxwi;
+ u32 fce_info, truesize = seg_len;
+
+ /* DMA_INFO field at the beginning of the segment contains only some of
+ * the information, we need to read the FCE descriptor from the end.
+ */
+ fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
+ seg_len -= MT_FCE_INFO_LEN;
+
+ data += MT_DMA_HDR_LEN;
+ seg_len -= MT_DMA_HDR_LEN;
+
+ rxwi = (struct mt7601u_rxwi *) data;
+ data += sizeof(struct mt7601u_rxwi);
+ seg_len -= sizeof(struct mt7601u_rxwi);
+
+ if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2]))
+ dev_err_once(dev->dev, "Error: RXWI zero fields are set\n");
+ if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info)))
+ dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n");
+
+ trace_mt_rx(dev, rxwi, fce_info);
+
+ skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p);
+ if (!skb)
+ return;
+
+ spin_lock(&dev->mac_lock);
+ ieee80211_rx(dev->hw, skb);
+ spin_unlock(&dev->mac_lock);
+}
+
+static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
+{
+ u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
+ sizeof(struct mt7601u_rxwi) + MT_FCE_INFO_LEN;
+ u16 dma_len = get_unaligned_le16(data);
+
+ if (data_len < min_seg_len ||
+ WARN_ON_ONCE(!dma_len) ||
+ WARN_ON_ONCE(dma_len + MT_DMA_HDRS > data_len) ||
+ WARN_ON_ONCE(dma_len & 0x3) ||
+ WARN_ON_ONCE(dma_len < min_seg_len))
+ return 0;
+
+ return MT_DMA_HDRS + dma_len;
+}
+
+static void
+mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
+{
+ u32 seg_len, data_len = e->urb->actual_length;
+ u8 *data = page_address(e->p);
+ struct page *new_p = NULL;
+ int cnt = 0;
+
+ if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state))
+ return;
+
+ /* Copy if there is very little data in the buffer. */
+ if (data_len > 512)
+ new_p = dev_alloc_pages(MT_RX_ORDER);
+
+ while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) {
+ mt7601u_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL);
+
+ data_len -= seg_len;
+ data += seg_len;
+ cnt++;
+ }
+
+ if (cnt > 1)
+ trace_mt_rx_dma_aggr(dev, cnt, !!new_p);
+
+ if (new_p) {
+ /* we have one extra ref from the allocator */
+ put_page(e->p);
+ e->p = new_p;
+ }
+}
+
+static struct mt7601u_dma_buf_rx *
+mt7601u_rx_get_pending_entry(struct mt7601u_dev *dev)
+{
+ struct mt7601u_rx_queue *q = &dev->rx_q;
+ struct mt7601u_dma_buf_rx *buf = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->rx_lock, flags);
+
+ if (!q->pending)
+ goto out;
+
+ buf = &q->e[q->start];
+ q->pending--;
+ q->start = (q->start + 1) % q->entries;
+out:
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+
+ return buf;
+}
+
+static void mt7601u_complete_rx(struct urb *urb)
+{
+ struct mt7601u_dev *dev = urb->context;
+ struct mt7601u_rx_queue *q = &dev->rx_q;
+ unsigned long flags;
+
+ /* do no schedule rx tasklet if urb has been unlinked
+ * or the device has been removed
+ */
+ switch (urb->status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case -ENOENT:
+ return;
+ default:
+ dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
+ urb->status);
+ fallthrough;
+ case 0:
+ break;
+ }
+
+ spin_lock_irqsave(&dev->rx_lock, flags);
+ if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
+ goto out;
+
+ q->end = (q->end + 1) % q->entries;
+ q->pending++;
+ tasklet_schedule(&dev->rx_tasklet);
+out:
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+}
+
+static void mt7601u_rx_tasklet(unsigned long data)
+{
+ struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
+ struct mt7601u_dma_buf_rx *e;
+
+ while ((e = mt7601u_rx_get_pending_entry(dev))) {
+ if (e->urb->status)
+ continue;
+
+ mt7601u_rx_process_entry(dev, e);
+ mt7601u_submit_rx_buf(dev, e, GFP_ATOMIC);
+ }
+}
+
+static void mt7601u_complete_tx(struct urb *urb)
+{
+ struct mt7601u_tx_queue *q = urb->context;
+ struct mt7601u_dev *dev = q->dev;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ switch (urb->status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case -ENOENT:
+ return;
+ default:
+ dev_err_ratelimited(dev->dev, "tx urb failed: %d\n",
+ urb->status);
+ fallthrough;
+ case 0:
+ break;
+ }
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+ if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
+ goto out;
+
+ skb = q->e[q->start].skb;
+ q->e[q->start].skb = NULL;
+ trace_mt_tx_dma_done(dev, skb);
+
+ __skb_queue_tail(&dev->tx_skb_done, skb);
+ tasklet_schedule(&dev->tx_tasklet);
+
+ if (q->used == q->entries - q->entries / 8)
+ ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
+
+ q->start = (q->start + 1) % q->entries;
+ q->used--;
+out:
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
+
+static void mt7601u_tx_tasklet(unsigned long data)
+{
+ struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
+ struct sk_buff_head skbs;
+ unsigned long flags;
+
+ __skb_queue_head_init(&skbs);
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+
+ set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
+ if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
+ queue_delayed_work(dev->stat_wq, &dev->stat_work,
+ msecs_to_jiffies(10));
+
+ skb_queue_splice_init(&dev->tx_skb_done, &skbs);
+
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+ while (!skb_queue_empty(&skbs)) {
+ struct sk_buff *skb = __skb_dequeue(&skbs);
+
+ mt7601u_tx_status(dev, skb);
+ }
+}
+
+static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
+ struct sk_buff *skb, u8 ep)
+{
+ struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+ unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]);
+ struct mt7601u_dma_buf_tx *e;
+ struct mt7601u_tx_queue *q = &dev->tx_q[ep];
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+
+ if (WARN_ON(q->entries <= q->used)) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ e = &q->e[q->end];
+ usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
+ mt7601u_complete_tx, q);
+ ret = usb_submit_urb(e->urb, GFP_ATOMIC);
+ if (ret) {
+ /* Special-handle ENODEV from TX urb submission because it will
+ * often be the first ENODEV we see after device is removed.
+ */
+ if (ret == -ENODEV)
+ set_bit(MT7601U_STATE_REMOVED, &dev->state);
+ else
+ dev_err(dev->dev, "Error: TX urb submit failed:%d\n",
+ ret);
+ goto out;
+ }
+
+ q->end = (q->end + 1) % q->entries;
+ q->used++;
+ e->skb = skb;
+
+ if (q->used >= q->entries)
+ ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
+out:
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+ return ret;
+}
+
+/* Map hardware Q to USB endpoint number */
+static u8 q2ep(u8 qid)
+{
+ /* TODO: take management packets to queue 5 */
+ return qid + 1;
+}
+
+/* Map USB endpoint number to Q id in the DMA engine */
+static enum mt76_qsel ep2dmaq(u8 ep)
+{
+ if (ep == 5)
+ return MT_QSEL_MGMT;
+ return MT_QSEL_EDCA;
+}
+
+int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
+ struct mt76_wcid *wcid, int hw_q)
+{
+ u8 ep = q2ep(hw_q);
+ u32 dma_flags;
+ int ret;
+
+ dma_flags = MT_TXD_PKT_INFO_80211;
+ if (wcid->hw_key_idx == 0xff)
+ dma_flags |= MT_TXD_PKT_INFO_WIV;
+
+ ret = mt7601u_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
+ if (ret)
+ return ret;
+
+ ret = mt7601u_dma_submit_tx(dev, skb, ep);
+ if (ret) {
+ ieee80211_free_txskb(dev->hw, skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mt7601u_kill_rx(struct mt7601u_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->rx_q.entries; i++)
+ usb_poison_urb(dev->rx_q.e[i].urb);
+}
+
+static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
+ struct mt7601u_dma_buf_rx *e, gfp_t gfp)
+{
+ struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+ u8 *buf = page_address(e->p);
+ unsigned pipe;
+ int ret;
+
+ pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[MT_EP_IN_PKT_RX]);
+
+ usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
+ mt7601u_complete_rx, dev);
+
+ trace_mt_submit_urb(dev, e->urb);
+ ret = usb_submit_urb(e->urb, gfp);
+ if (ret)
+ dev_err(dev->dev, "Error: submit RX URB failed:%d\n", ret);
+
+ return ret;
+}
+
+static int mt7601u_submit_rx(struct mt7601u_dev *dev)
+{
+ int i, ret;
+
+ for (i = 0; i < dev->rx_q.entries; i++) {
+ ret = mt7601u_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mt7601u_free_rx(struct mt7601u_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->rx_q.entries; i++) {
+ __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
+ usb_free_urb(dev->rx_q.e[i].urb);
+ }
+}
+
+static int mt7601u_alloc_rx(struct mt7601u_dev *dev)
+{
+ int i;
+
+ memset(&dev->rx_q, 0, sizeof(dev->rx_q));
+ dev->rx_q.dev = dev;
+ dev->rx_q.entries = N_RX_ENTRIES;
+
+ for (i = 0; i < N_RX_ENTRIES; i++) {
+ dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+ dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
+
+ if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
+{
+ int i;
+
+ for (i = 0; i < q->entries; i++) {
+ usb_poison_urb(q->e[i].urb);
+ if (q->e[i].skb)
+ mt7601u_tx_status(q->dev, q->e[i].skb);
+ usb_free_urb(q->e[i].urb);
+ }
+}
+
+static void mt7601u_free_tx(struct mt7601u_dev *dev)
+{
+ int i;
+
+ if (!dev->tx_q)
+ return;
+
+ for (i = 0; i < __MT_EP_OUT_MAX; i++)
+ mt7601u_free_tx_queue(&dev->tx_q[i]);
+}
+
+static int mt7601u_alloc_tx_queue(struct mt7601u_dev *dev,
+ struct mt7601u_tx_queue *q)
+{
+ int i;
+
+ q->dev = dev;
+ q->entries = N_TX_ENTRIES;
+
+ for (i = 0; i < N_TX_ENTRIES; i++) {
+ q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!q->e[i].urb)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int mt7601u_alloc_tx(struct mt7601u_dev *dev)
+{
+ int i;
+
+ dev->tx_q = devm_kcalloc(dev->dev, __MT_EP_OUT_MAX,
+ sizeof(*dev->tx_q), GFP_KERNEL);
+ if (!dev->tx_q)
+ return -ENOMEM;
+
+ for (i = 0; i < __MT_EP_OUT_MAX; i++)
+ if (mt7601u_alloc_tx_queue(dev, &dev->tx_q[i]))
+ return -ENOMEM;
+
+ return 0;
+}
+
+int mt7601u_dma_init(struct mt7601u_dev *dev)
+{
+ int ret = -ENOMEM;
+
+ tasklet_init(&dev->tx_tasklet, mt7601u_tx_tasklet, (unsigned long) dev);
+ tasklet_init(&dev->rx_tasklet, mt7601u_rx_tasklet, (unsigned long) dev);
+
+ ret = mt7601u_alloc_tx(dev);
+ if (ret)
+ goto err;
+ ret = mt7601u_alloc_rx(dev);
+ if (ret)
+ goto err;
+
+ ret = mt7601u_submit_rx(dev);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ mt7601u_dma_cleanup(dev);
+ return ret;
+}
+
+void mt7601u_dma_cleanup(struct mt7601u_dev *dev)
+{
+ mt7601u_kill_rx(dev);
+
+ tasklet_kill(&dev->rx_tasklet);
+
+ mt7601u_free_rx(dev);
+ mt7601u_free_tx(dev);
+
+ tasklet_kill(&dev->tx_tasklet);
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.h b/drivers/net/wireless/mediatek/mt7601u/dma.h
new file mode 100644
index 000000000..81e559ec1
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#ifndef __MT7601U_DMA_H
+#define __MT7601U_DMA_H
+
+#include <asm/unaligned.h>
+#include <linux/skbuff.h>
+
+#define MT_DMA_HDR_LEN 4
+#define MT_RX_INFO_LEN 4
+#define MT_FCE_INFO_LEN 4
+#define MT_DMA_HDRS (MT_DMA_HDR_LEN + MT_RX_INFO_LEN)
+
+/* Common Tx DMA descriptor fields */
+#define MT_TXD_INFO_LEN GENMASK(15, 0)
+#define MT_TXD_INFO_D_PORT GENMASK(29, 27)
+#define MT_TXD_INFO_TYPE GENMASK(31, 30)
+
+enum mt76_msg_port {
+ WLAN_PORT,
+ CPU_RX_PORT,
+ CPU_TX_PORT,
+ HOST_PORT,
+ VIRTUAL_CPU_RX_PORT,
+ VIRTUAL_CPU_TX_PORT,
+ DISCARD,
+};
+
+enum mt76_info_type {
+ DMA_PACKET,
+ DMA_COMMAND,
+};
+
+/* Tx DMA packet specific flags */
+#define MT_TXD_PKT_INFO_NEXT_VLD BIT(16)
+#define MT_TXD_PKT_INFO_TX_BURST BIT(17)
+#define MT_TXD_PKT_INFO_80211 BIT(19)
+#define MT_TXD_PKT_INFO_TSO BIT(20)
+#define MT_TXD_PKT_INFO_CSO BIT(21)
+#define MT_TXD_PKT_INFO_WIV BIT(24)
+#define MT_TXD_PKT_INFO_QSEL GENMASK(26, 25)
+
+enum mt76_qsel {
+ MT_QSEL_MGMT,
+ MT_QSEL_HCCA,
+ MT_QSEL_EDCA,
+ MT_QSEL_EDCA_2,
+};
+
+/* Tx DMA MCU command specific flags */
+#define MT_TXD_CMD_INFO_SEQ GENMASK(19, 16)
+#define MT_TXD_CMD_INFO_TYPE GENMASK(26, 20)
+
+static inline int mt7601u_dma_skb_wrap(struct sk_buff *skb,
+ enum mt76_msg_port d_port,
+ enum mt76_info_type type, u32 flags)
+{
+ u32 info;
+
+ /* Buffer layout:
+ * | 4B | xfer len | pad | 4B |
+ * | TXINFO | pkt/cmd | zero pad to 4B | zero |
+ *
+ * length field of TXINFO should be set to 'xfer len'.
+ */
+
+ info = flags |
+ FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
+ FIELD_PREP(MT_TXD_INFO_D_PORT, d_port) |
+ FIELD_PREP(MT_TXD_INFO_TYPE, type);
+
+ put_unaligned_le32(info, skb_push(skb, sizeof(info)));
+ return skb_put_padto(skb, round_up(skb->len, 4) + 4);
+}
+
+static inline int
+mt7601u_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags)
+{
+ flags |= FIELD_PREP(MT_TXD_PKT_INFO_QSEL, qsel);
+ return mt7601u_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags);
+}
+
+/* Common Rx DMA descriptor fields */
+#define MT_RXD_INFO_LEN GENMASK(13, 0)
+#define MT_RXD_INFO_PCIE_INTR BIT(24)
+#define MT_RXD_INFO_QSEL GENMASK(26, 25)
+#define MT_RXD_INFO_PORT GENMASK(29, 27)
+#define MT_RXD_INFO_TYPE GENMASK(31, 30)
+
+/* Rx DMA packet specific flags */
+#define MT_RXD_PKT_INFO_UDP_ERR BIT(16)
+#define MT_RXD_PKT_INFO_TCP_ERR BIT(17)
+#define MT_RXD_PKT_INFO_IP_ERR BIT(18)
+#define MT_RXD_PKT_INFO_PKT_80211 BIT(19)
+#define MT_RXD_PKT_INFO_L3L4_DONE BIT(20)
+#define MT_RXD_PKT_INFO_MAC_LEN GENMASK(23, 21)
+
+/* Rx DMA MCU command specific flags */
+#define MT_RXD_CMD_INFO_SELF_GEN BIT(15)
+#define MT_RXD_CMD_INFO_CMD_SEQ GENMASK(19, 16)
+#define MT_RXD_CMD_INFO_EVT_TYPE GENMASK(23, 20)
+
+enum mt76_evt_type {
+ CMD_DONE,
+ CMD_ERROR,
+ CMD_RETRY,
+ EVENT_PWR_RSP,
+ EVENT_WOW_RSP,
+ EVENT_CARRIER_DETECT_RSP,
+ EVENT_DFS_DETECT_RSP,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
new file mode 100644
index 000000000..aa3b64902
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+#include "mt7601u.h"
+#include "eeprom.h"
+#include "mac.h"
+
+static bool
+field_valid(u8 val)
+{
+ return val != 0xff;
+}
+
+static s8
+field_validate(u8 val)
+{
+ if (!field_valid(val))
+ return 0;
+
+ return val;
+}
+
+static int
+mt7601u_efuse_read(struct mt7601u_dev *dev, u16 addr, u8 *data,
+ enum mt7601u_eeprom_access_modes mode)
+{
+ u32 val;
+ int i;
+
+ val = mt76_rr(dev, MT_EFUSE_CTRL);
+ val &= ~(MT_EFUSE_CTRL_AIN |
+ MT_EFUSE_CTRL_MODE);
+ val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf) |
+ FIELD_PREP(MT_EFUSE_CTRL_MODE, mode) |
+ MT_EFUSE_CTRL_KICK;
+ mt76_wr(dev, MT_EFUSE_CTRL, val);
+
+ if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
+ return -ETIMEDOUT;
+
+ val = mt76_rr(dev, MT_EFUSE_CTRL);
+ if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
+ /* Parts of eeprom not in the usage map (0x80-0xc0,0xf0)
+ * will not return valid data but it's ok.
+ */
+ memset(data, 0xff, 16);
+ return 0;
+ }
+
+ for (i = 0; i < 4; i++) {
+ val = mt76_rr(dev, MT_EFUSE_DATA(i));
+ put_unaligned_le32(val, data + 4 * i);
+ }
+
+ return 0;
+}
+
+static int
+mt7601u_efuse_physical_size_check(struct mt7601u_dev *dev)
+{
+ const int map_reads = DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16);
+ u8 data[round_up(MT_EFUSE_USAGE_MAP_SIZE, 16)];
+ int ret, i;
+ u32 start = 0, end = 0, cnt_free;
+
+ for (i = 0; i < map_reads; i++) {
+ ret = mt7601u_efuse_read(dev, MT_EE_USAGE_MAP_START + i * 16,
+ data + i * 16, MT_EE_PHYSICAL_READ);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < MT_EFUSE_USAGE_MAP_SIZE; i++)
+ if (!data[i]) {
+ if (!start)
+ start = MT_EE_USAGE_MAP_START + i;
+ end = MT_EE_USAGE_MAP_START + i;
+ }
+ cnt_free = end - start + 1;
+
+ if (MT_EFUSE_USAGE_MAP_SIZE - cnt_free < 5) {
+ dev_err(dev->dev, "Error: your device needs default EEPROM file and this driver doesn't support it!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool
+mt7601u_has_tssi(struct mt7601u_dev *dev, u8 *eeprom)
+{
+ u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
+
+ return (u16)~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN);
+}
+
+static void
+mt7601u_set_chip_cap(struct mt7601u_dev *dev, u8 *eeprom)
+{
+ u16 nic_conf0 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_0);
+ u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
+
+ if (!field_valid(nic_conf1 & 0xff))
+ nic_conf1 &= 0xff00;
+
+ dev->ee->tssi_enabled = mt7601u_has_tssi(dev, eeprom) &&
+ !(nic_conf1 & MT_EE_NIC_CONF_1_TEMP_TX_ALC);
+
+ if (nic_conf1 & MT_EE_NIC_CONF_1_HW_RF_CTRL)
+ dev_err(dev->dev,
+ "Error: this driver does not support HW RF ctrl\n");
+
+ if (!field_valid(nic_conf0 >> 8))
+ return;
+
+ if (FIELD_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 ||
+ FIELD_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1)
+ dev_err(dev->dev,
+ "Error: device has more than 1 RX/TX stream!\n");
+}
+
+static void mt7601u_set_channel_target_power(struct mt7601u_dev *dev,
+ u8 *eeprom, u8 max_pwr)
+{
+ u8 trgt_pwr = eeprom[MT_EE_TX_TSSI_TARGET_POWER];
+
+ if (trgt_pwr > max_pwr || !trgt_pwr) {
+ dev_warn(dev->dev, "Error: EEPROM trgt power invalid %hhx!\n",
+ trgt_pwr);
+ trgt_pwr = 0x20;
+ }
+
+ memset(dev->ee->chan_pwr, trgt_pwr, sizeof(dev->ee->chan_pwr));
+}
+
+static void
+mt7601u_set_channel_power(struct mt7601u_dev *dev, u8 *eeprom)
+{
+ u32 i, val;
+ u8 max_pwr;
+
+ val = mt7601u_rr(dev, MT_TX_ALC_CFG_0);
+ max_pwr = FIELD_GET(MT_TX_ALC_CFG_0_LIMIT_0, val);
+
+ if (mt7601u_has_tssi(dev, eeprom)) {
+ mt7601u_set_channel_target_power(dev, eeprom, max_pwr);
+ return;
+ }
+
+ for (i = 0; i < 14; i++) {
+ s8 power = field_validate(eeprom[MT_EE_TX_POWER_OFFSET + i]);
+
+ if (power > max_pwr || power < 0)
+ power = MT7601U_DEFAULT_TX_POWER;
+
+ dev->ee->chan_pwr[i] = power;
+ }
+}
+
+static void
+mt7601u_set_country_reg(struct mt7601u_dev *dev, u8 *eeprom)
+{
+ /* Note: - region 31 is not valid for mt7601u (see rtmp_init.c)
+ * - comments in rtmp_def.h are incorrect (see rt_channel.c)
+ */
+ static const struct reg_channel_bounds chan_bounds[] = {
+ /* EEPROM country regions 0 - 7 */
+ { 1, 11 }, { 1, 13 }, { 10, 2 }, { 10, 4 },
+ { 14, 1 }, { 1, 14 }, { 3, 7 }, { 5, 9 },
+ /* EEPROM country regions 32 - 33 */
+ { 1, 11 }, { 1, 14 }
+ };
+ u8 val = eeprom[MT_EE_COUNTRY_REGION];
+ int idx = -1;
+
+ if (val < 8)
+ idx = val;
+ if (val > 31 && val < 33)
+ idx = val - 32 + 8;
+
+ if (idx != -1)
+ dev_info(dev->dev,
+ "EEPROM country region %02hhx (channels %hhd-%hhd)\n",
+ val, chan_bounds[idx].start,
+ chan_bounds[idx].start + chan_bounds[idx].num - 1);
+ else
+ idx = 5; /* channels 1 - 14 */
+
+ dev->ee->reg = chan_bounds[idx];
+
+ /* TODO: country region 33 is special - phy should be set to B-mode
+ * before entering channel 14 (see sta/connect.c)
+ */
+}
+
+static void
+mt7601u_set_rf_freq_off(struct mt7601u_dev *dev, u8 *eeprom)
+{
+ u8 comp;
+
+ dev->ee->rf_freq_off = field_validate(eeprom[MT_EE_FREQ_OFFSET]);
+ comp = field_validate(eeprom[MT_EE_FREQ_OFFSET_COMPENSATION]);
+
+ if (comp & BIT(7))
+ dev->ee->rf_freq_off -= comp & 0x7f;
+ else
+ dev->ee->rf_freq_off += comp;
+}
+
+static void
+mt7601u_set_rssi_offset(struct mt7601u_dev *dev, u8 *eeprom)
+{
+ int i;
+ s8 *rssi_offset = dev->ee->rssi_offset;
+
+ for (i = 0; i < 2; i++) {
+ rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET + i];
+
+ if (rssi_offset[i] < -10 || rssi_offset[i] > 10) {
+ dev_warn(dev->dev,
+ "Warning: EEPROM RSSI is invalid %02hhx\n",
+ rssi_offset[i]);
+ rssi_offset[i] = 0;
+ }
+ }
+}
+
+static void
+mt7601u_extra_power_over_mac(struct mt7601u_dev *dev)
+{
+ u32 val;
+
+ val = ((mt7601u_rr(dev, MT_TX_PWR_CFG_1) & 0x0000ff00) >> 8);
+ val |= ((mt7601u_rr(dev, MT_TX_PWR_CFG_2) & 0x0000ff00) << 8);
+ mt7601u_wr(dev, MT_TX_PWR_CFG_7, val);
+
+ val = ((mt7601u_rr(dev, MT_TX_PWR_CFG_4) & 0x0000ff00) >> 8);
+ mt7601u_wr(dev, MT_TX_PWR_CFG_9, val);
+}
+
+static void
+mt7601u_set_power_rate(struct power_per_rate *rate, s8 delta, u8 value)
+{
+ /* Invalid? Note: vendor driver does not handle this */
+ if (value == 0xff)
+ return;
+
+ rate->raw = s6_validate(value);
+ rate->bw20 = s6_to_int(value);
+ /* Note: vendor driver does cap the value to s6 right away */
+ rate->bw40 = rate->bw20 + delta;
+}
+
+static void
+mt7601u_save_power_rate(struct mt7601u_dev *dev, s8 delta, u32 val, int i)
+{
+ struct mt7601u_rate_power *t = &dev->ee->power_rate_table;
+
+ switch (i) {
+ case 0:
+ mt7601u_set_power_rate(&t->cck[0], delta, (val >> 0) & 0xff);
+ mt7601u_set_power_rate(&t->cck[1], delta, (val >> 8) & 0xff);
+ /* Save cck bw20 for fixups of channel 14 */
+ dev->ee->real_cck_bw20[0] = t->cck[0].bw20;
+ dev->ee->real_cck_bw20[1] = t->cck[1].bw20;
+
+ mt7601u_set_power_rate(&t->ofdm[0], delta, (val >> 16) & 0xff);
+ mt7601u_set_power_rate(&t->ofdm[1], delta, (val >> 24) & 0xff);
+ break;
+ case 1:
+ mt7601u_set_power_rate(&t->ofdm[2], delta, (val >> 0) & 0xff);
+ mt7601u_set_power_rate(&t->ofdm[3], delta, (val >> 8) & 0xff);
+ mt7601u_set_power_rate(&t->ht[0], delta, (val >> 16) & 0xff);
+ mt7601u_set_power_rate(&t->ht[1], delta, (val >> 24) & 0xff);
+ break;
+ case 2:
+ mt7601u_set_power_rate(&t->ht[2], delta, (val >> 0) & 0xff);
+ mt7601u_set_power_rate(&t->ht[3], delta, (val >> 8) & 0xff);
+ break;
+ }
+}
+
+static s8
+get_delta(u8 val)
+{
+ s8 ret;
+
+ if (!field_valid(val) || !(val & BIT(7)))
+ return 0;
+
+ ret = val & 0x1f;
+ if (ret > 8)
+ ret = 8;
+ if (val & BIT(6))
+ ret = -ret;
+
+ return ret;
+}
+
+static void
+mt7601u_config_tx_power_per_rate(struct mt7601u_dev *dev, u8 *eeprom)
+{
+ u32 val;
+ s8 bw40_delta;
+ int i;
+
+ bw40_delta = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40]);
+
+ for (i = 0; i < 5; i++) {
+ val = get_unaligned_le32(eeprom + MT_EE_TX_POWER_BYRATE(i));
+
+ mt7601u_save_power_rate(dev, bw40_delta, val, i);
+
+ if (~val)
+ mt7601u_wr(dev, MT_TX_PWR_CFG_0 + i * 4, val);
+ }
+
+ mt7601u_extra_power_over_mac(dev);
+}
+
+static void
+mt7601u_init_tssi_params(struct mt7601u_dev *dev, u8 *eeprom)
+{
+ struct tssi_data *d = &dev->ee->tssi_data;
+
+ if (!dev->ee->tssi_enabled)
+ return;
+
+ d->slope = eeprom[MT_EE_TX_TSSI_SLOPE];
+ d->tx0_delta_offset = eeprom[MT_EE_TX_TSSI_OFFSET] * 1024;
+ d->offset[0] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP];
+ d->offset[1] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP + 1];
+ d->offset[2] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP + 2];
+}
+
+int
+mt7601u_eeprom_init(struct mt7601u_dev *dev)
+{
+ u8 *eeprom;
+ int i, ret;
+
+ ret = mt7601u_efuse_physical_size_check(dev);
+ if (ret)
+ return ret;
+
+ dev->ee = devm_kzalloc(dev->dev, sizeof(*dev->ee), GFP_KERNEL);
+ if (!dev->ee)
+ return -ENOMEM;
+
+ eeprom = kmalloc(MT7601U_EEPROM_SIZE, GFP_KERNEL);
+ if (!eeprom)
+ return -ENOMEM;
+
+ for (i = 0; i + 16 <= MT7601U_EEPROM_SIZE; i += 16) {
+ ret = mt7601u_efuse_read(dev, i, eeprom + i, MT_EE_READ);
+ if (ret)
+ goto out;
+ }
+
+ if (eeprom[MT_EE_VERSION_EE] > MT7601U_EE_MAX_VER)
+ dev_warn(dev->dev,
+ "Warning: unsupported EEPROM version %02hhx\n",
+ eeprom[MT_EE_VERSION_EE]);
+ dev_info(dev->dev, "EEPROM ver:%02hhx fae:%02hhx\n",
+ eeprom[MT_EE_VERSION_EE], eeprom[MT_EE_VERSION_FAE]);
+
+ mt7601u_set_macaddr(dev, eeprom + MT_EE_MAC_ADDR);
+ mt7601u_set_chip_cap(dev, eeprom);
+ mt7601u_set_channel_power(dev, eeprom);
+ mt7601u_set_country_reg(dev, eeprom);
+ mt7601u_set_rf_freq_off(dev, eeprom);
+ mt7601u_set_rssi_offset(dev, eeprom);
+ dev->ee->ref_temp = eeprom[MT_EE_REF_TEMP];
+ dev->ee->lna_gain = eeprom[MT_EE_LNA_GAIN];
+
+ mt7601u_config_tx_power_per_rate(dev, eeprom);
+
+ mt7601u_init_tssi_params(dev, eeprom);
+out:
+ kfree(eeprom);
+ return ret;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.h b/drivers/net/wireless/mediatek/mt7601u/eeprom.h
new file mode 100644
index 000000000..4d3fd49f2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#ifndef __MT7601U_EEPROM_H
+#define __MT7601U_EEPROM_H
+
+struct mt7601u_dev;
+
+#define MT7601U_EE_MAX_VER 0x0d
+#define MT7601U_EEPROM_SIZE 256
+
+#define MT7601U_DEFAULT_TX_POWER 6
+
+enum mt76_eeprom_field {
+ MT_EE_CHIP_ID = 0x00,
+ MT_EE_VERSION_FAE = 0x02,
+ MT_EE_VERSION_EE = 0x03,
+ MT_EE_MAC_ADDR = 0x04,
+ MT_EE_NIC_CONF_0 = 0x34,
+ MT_EE_NIC_CONF_1 = 0x36,
+ MT_EE_COUNTRY_REGION = 0x39,
+ MT_EE_FREQ_OFFSET = 0x3a,
+ MT_EE_NIC_CONF_2 = 0x42,
+
+ MT_EE_LNA_GAIN = 0x44,
+ MT_EE_RSSI_OFFSET = 0x46,
+
+ MT_EE_TX_POWER_DELTA_BW40 = 0x50,
+ MT_EE_TX_POWER_OFFSET = 0x52,
+
+ MT_EE_TX_TSSI_SLOPE = 0x6e,
+ MT_EE_TX_TSSI_OFFSET_GROUP = 0x6f,
+ MT_EE_TX_TSSI_OFFSET = 0x76,
+
+ MT_EE_TX_TSSI_TARGET_POWER = 0xd0,
+ MT_EE_REF_TEMP = 0xd1,
+ MT_EE_FREQ_OFFSET_COMPENSATION = 0xdb,
+ MT_EE_TX_POWER_BYRATE_BASE = 0xde,
+
+ MT_EE_USAGE_MAP_START = 0x1e0,
+ MT_EE_USAGE_MAP_END = 0x1fc,
+};
+
+#define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0)
+#define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4)
+#define MT_EE_NIC_CONF_0_BOARD_TYPE GENMASK(13, 12)
+
+#define MT_EE_NIC_CONF_1_HW_RF_CTRL BIT(0)
+#define MT_EE_NIC_CONF_1_TEMP_TX_ALC BIT(1)
+#define MT_EE_NIC_CONF_1_LNA_EXT_2G BIT(2)
+#define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3)
+#define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13)
+
+#define MT_EE_NIC_CONF_2_RX_STREAM GENMASK(3, 0)
+#define MT_EE_NIC_CONF_2_TX_STREAM GENMASK(7, 4)
+#define MT_EE_NIC_CONF_2_HW_ANTDIV BIT(8)
+#define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9)
+#define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11)
+#define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13)
+
+#define MT_EE_TX_POWER_BYRATE(i) (MT_EE_TX_POWER_BYRATE_BASE + \
+ (i) * 4)
+
+#define MT_EFUSE_USAGE_MAP_SIZE (MT_EE_USAGE_MAP_END - \
+ MT_EE_USAGE_MAP_START + 1)
+
+enum mt7601u_eeprom_access_modes {
+ MT_EE_READ = 0,
+ MT_EE_PHYSICAL_READ = 1,
+};
+
+struct power_per_rate {
+ u8 raw; /* validated s6 value */
+ s8 bw20; /* sign-extended int */
+ s8 bw40; /* sign-extended int */
+};
+
+/* Power per rate - one value per two rates */
+struct mt7601u_rate_power {
+ struct power_per_rate cck[2];
+ struct power_per_rate ofdm[4];
+ struct power_per_rate ht[4];
+};
+
+struct reg_channel_bounds {
+ u8 start;
+ u8 num;
+};
+
+struct mt7601u_eeprom_params {
+ bool tssi_enabled;
+ u8 rf_freq_off;
+ s8 rssi_offset[2];
+ s8 ref_temp;
+ s8 lna_gain;
+
+ u8 chan_pwr[14];
+ struct mt7601u_rate_power power_rate_table;
+ s8 real_cck_bw20[2];
+
+ /* TSSI stuff - only with internal TX ALC */
+ struct tssi_data {
+ int tx0_delta_offset;
+ u8 slope;
+ u8 offset[3];
+ } tssi_data;
+
+ struct reg_channel_bounds reg;
+};
+
+int mt7601u_eeprom_init(struct mt7601u_dev *dev);
+
+static inline u32 s6_validate(u32 reg)
+{
+ WARN_ON(reg & ~GENMASK(5, 0));
+ return reg & GENMASK(5, 0);
+}
+
+static inline int s6_to_int(u32 reg)
+{
+ int s6;
+
+ s6 = s6_validate(reg);
+ if (s6 & BIT(5))
+ s6 -= BIT(6);
+
+ return s6;
+}
+
+static inline u32 int_to_s6(int val)
+{
+ if (val < -0x20)
+ return 0x20;
+ if (val > 0x1f)
+ return 0x1f;
+
+ return val & 0x3f;
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c
new file mode 100644
index 000000000..cada48800
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/init.c
@@ -0,0 +1,630 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#include "mt7601u.h"
+#include "eeprom.h"
+#include "trace.h"
+#include "mcu.h"
+
+#include "initvals.h"
+
+static void
+mt7601u_set_wlan_state(struct mt7601u_dev *dev, u32 val, bool enable)
+{
+ int i;
+
+ /* Note: we don't turn off WLAN_CLK because that makes the device
+ * not respond properly on the probe path.
+ * In case anyone (PSM?) wants to use this function we can
+ * bring the clock stuff back and fixup the probe path.
+ */
+
+ if (enable)
+ val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
+ MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+ else
+ val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN);
+
+ mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ if (enable) {
+ set_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state);
+ } else {
+ clear_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state);
+ return;
+ }
+
+ for (i = 200; i; i--) {
+ val = mt7601u_rr(dev, MT_CMB_CTRL);
+
+ if (val & MT_CMB_CTRL_XTAL_RDY && val & MT_CMB_CTRL_PLL_LD)
+ break;
+
+ udelay(20);
+ }
+
+ /* Note: vendor driver tries to disable/enable wlan here and retry
+ * but the code which does it is so buggy it must have never
+ * triggered, so don't bother.
+ */
+ if (!i)
+ dev_err(dev->dev, "Error: PLL and XTAL check failed!\n");
+}
+
+static void mt7601u_chip_onoff(struct mt7601u_dev *dev, bool enable, bool reset)
+{
+ u32 val;
+
+ mutex_lock(&dev->hw_atomic_mutex);
+
+ val = mt7601u_rr(dev, MT_WLAN_FUN_CTRL);
+
+ if (reset) {
+ val |= MT_WLAN_FUN_CTRL_GPIO_OUT_EN;
+ val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
+
+ if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
+ val |= (MT_WLAN_FUN_CTRL_WLAN_RESET |
+ MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
+ mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ val &= ~(MT_WLAN_FUN_CTRL_WLAN_RESET |
+ MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
+ }
+ }
+
+ mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ mt7601u_set_wlan_state(dev, val, enable);
+
+ mutex_unlock(&dev->hw_atomic_mutex);
+}
+
+static void mt7601u_reset_csr_bbp(struct mt7601u_dev *dev)
+{
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, (MT_MAC_SYS_CTRL_RESET_CSR |
+ MT_MAC_SYS_CTRL_RESET_BBP));
+ mt7601u_wr(dev, MT_USB_DMA_CFG, 0);
+ msleep(1);
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0);
+}
+
+static void mt7601u_init_usb_dma(struct mt7601u_dev *dev)
+{
+ u32 val;
+
+ val = FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, MT_USB_AGGR_TIMEOUT) |
+ FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_LMT,
+ MT_USB_AGGR_SIZE_LIMIT) |
+ MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN;
+ if (dev->in_max_packet == 512)
+ val |= MT_USB_DMA_CFG_RX_BULK_AGG_EN;
+ mt7601u_wr(dev, MT_USB_DMA_CFG, val);
+
+ val |= MT_USB_DMA_CFG_UDMA_RX_WL_DROP;
+ mt7601u_wr(dev, MT_USB_DMA_CFG, val);
+ val &= ~MT_USB_DMA_CFG_UDMA_RX_WL_DROP;
+ mt7601u_wr(dev, MT_USB_DMA_CFG, val);
+}
+
+static int mt7601u_init_bbp(struct mt7601u_dev *dev)
+{
+ int ret;
+
+ ret = mt7601u_wait_bbp_ready(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, bbp_common_vals,
+ ARRAY_SIZE(bbp_common_vals));
+ if (ret)
+ return ret;
+
+ return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, bbp_chip_vals,
+ ARRAY_SIZE(bbp_chip_vals));
+}
+
+static void
+mt76_init_beacon_offsets(struct mt7601u_dev *dev)
+{
+ u16 base = MT_BEACON_BASE;
+ u32 regs[4] = {};
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ u16 addr = dev->beacon_offsets[i];
+
+ regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4));
+ }
+
+ for (i = 0; i < 4; i++)
+ mt7601u_wr(dev, MT_BCN_OFFSET(i), regs[i]);
+}
+
+static int mt7601u_write_mac_initvals(struct mt7601u_dev *dev)
+{
+ int ret;
+
+ ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN, mac_common_vals,
+ ARRAY_SIZE(mac_common_vals));
+ if (ret)
+ return ret;
+ ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN,
+ mac_chip_vals, ARRAY_SIZE(mac_chip_vals));
+ if (ret)
+ return ret;
+
+ mt76_init_beacon_offsets(dev);
+
+ mt7601u_wr(dev, MT_AUX_CLK_CFG, 0);
+
+ return 0;
+}
+
+static int mt7601u_init_wcid_mem(struct mt7601u_dev *dev)
+{
+ u32 *vals;
+ int i, ret;
+
+ vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
+ if (!vals)
+ return -ENOMEM;
+
+ for (i = 0; i < N_WCIDS; i++) {
+ vals[i * 2] = 0xffffffff;
+ vals[i * 2 + 1] = 0x00ffffff;
+ }
+
+ ret = mt7601u_burst_write_regs(dev, MT_WCID_ADDR_BASE,
+ vals, N_WCIDS * 2);
+ kfree(vals);
+
+ return ret;
+}
+
+static int mt7601u_init_key_mem(struct mt7601u_dev *dev)
+{
+ u32 vals[4] = {};
+
+ return mt7601u_burst_write_regs(dev, MT_SKEY_MODE_BASE_0,
+ vals, ARRAY_SIZE(vals));
+}
+
+static int mt7601u_init_wcid_attr_mem(struct mt7601u_dev *dev)
+{
+ u32 *vals;
+ int i, ret;
+
+ vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
+ if (!vals)
+ return -ENOMEM;
+
+ for (i = 0; i < N_WCIDS * 2; i++)
+ vals[i] = 1;
+
+ ret = mt7601u_burst_write_regs(dev, MT_WCID_ATTR_BASE,
+ vals, N_WCIDS * 2);
+ kfree(vals);
+
+ return ret;
+}
+
+static void mt7601u_reset_counters(struct mt7601u_dev *dev)
+{
+ mt7601u_rr(dev, MT_RX_STA_CNT0);
+ mt7601u_rr(dev, MT_RX_STA_CNT1);
+ mt7601u_rr(dev, MT_RX_STA_CNT2);
+ mt7601u_rr(dev, MT_TX_STA_CNT0);
+ mt7601u_rr(dev, MT_TX_STA_CNT1);
+ mt7601u_rr(dev, MT_TX_STA_CNT2);
+}
+
+int mt7601u_mac_start(struct mt7601u_dev *dev)
+{
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+
+ if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 200000))
+ return -ETIMEDOUT;
+
+ dev->rxfilter = MT_RX_FILTR_CFG_CRC_ERR |
+ MT_RX_FILTR_CFG_PHY_ERR | MT_RX_FILTR_CFG_PROMISC |
+ MT_RX_FILTR_CFG_VER_ERR | MT_RX_FILTR_CFG_DUP |
+ MT_RX_FILTR_CFG_CFACK | MT_RX_FILTR_CFG_CFEND |
+ MT_RX_FILTR_CFG_ACK | MT_RX_FILTR_CFG_CTS |
+ MT_RX_FILTR_CFG_RTS | MT_RX_FILTR_CFG_PSPOLL |
+ MT_RX_FILTR_CFG_BA | MT_RX_FILTR_CFG_CTRL_RSV;
+ mt7601u_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
+
+ if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 50))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void mt7601u_mac_stop_hw(struct mt7601u_dev *dev)
+{
+ int i, ok;
+
+ if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+ return;
+
+ mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_BEACON_TX);
+
+ if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000))
+ dev_warn(dev->dev, "Warning: TX DMA did not stop!\n");
+
+ /* Page count on TxQ */
+ i = 200;
+ while (i-- && ((mt76_rr(dev, 0x0438) & 0xffffffff) ||
+ (mt76_rr(dev, 0x0a30) & 0x000000ff) ||
+ (mt76_rr(dev, 0x0a34) & 0x00ff00ff)))
+ msleep(10);
+
+ if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX, 0, 1000))
+ dev_warn(dev->dev, "Warning: MAC TX did not stop!\n");
+
+ mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX |
+ MT_MAC_SYS_CTRL_ENABLE_TX);
+
+ /* Page count on RxQ */
+ ok = 0;
+ i = 200;
+ while (i--) {
+ if (!(mt76_rr(dev, MT_RXQ_STA) & 0x00ff0000) &&
+ !mt76_rr(dev, 0x0a30) &&
+ !mt76_rr(dev, 0x0a34)) {
+ if (ok++ > 5)
+ break;
+ continue;
+ }
+ msleep(1);
+ }
+
+ if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 1000))
+ dev_warn(dev->dev, "Warning: MAC RX did not stop!\n");
+
+ if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_RX_BUSY, 0, 1000))
+ dev_warn(dev->dev, "Warning: RX DMA did not stop!\n");
+}
+
+void mt7601u_mac_stop(struct mt7601u_dev *dev)
+{
+ mt7601u_mac_stop_hw(dev);
+ flush_delayed_work(&dev->stat_work);
+ cancel_delayed_work_sync(&dev->stat_work);
+}
+
+static void mt7601u_stop_hardware(struct mt7601u_dev *dev)
+{
+ mt7601u_chip_onoff(dev, false, false);
+}
+
+int mt7601u_init_hardware(struct mt7601u_dev *dev)
+{
+ static const u16 beacon_offsets[16] = {
+ /* 512 byte per beacon */
+ 0xc000, 0xc200, 0xc400, 0xc600,
+ 0xc800, 0xca00, 0xcc00, 0xce00,
+ 0xd000, 0xd200, 0xd400, 0xd600,
+ 0xd800, 0xda00, 0xdc00, 0xde00
+ };
+ int ret;
+
+ dev->beacon_offsets = beacon_offsets;
+
+ mt7601u_chip_onoff(dev, true, false);
+
+ ret = mt7601u_wait_asic_ready(dev);
+ if (ret)
+ goto err;
+ ret = mt7601u_mcu_init(dev);
+ if (ret)
+ goto err;
+
+ if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100)) {
+ ret = -EIO;
+ goto err;
+ }
+
+ /* Wait for ASIC ready after FW load. */
+ ret = mt7601u_wait_asic_ready(dev);
+ if (ret)
+ goto err;
+
+ mt7601u_reset_csr_bbp(dev);
+ mt7601u_init_usb_dma(dev);
+
+ ret = mt7601u_mcu_cmd_init(dev);
+ if (ret)
+ goto err;
+ ret = mt7601u_dma_init(dev);
+ if (ret)
+ goto err_mcu;
+ ret = mt7601u_write_mac_initvals(dev);
+ if (ret)
+ goto err_rx;
+
+ if (!mt76_poll_msec(dev, MT_MAC_STATUS,
+ MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, 0, 100)) {
+ ret = -EIO;
+ goto err_rx;
+ }
+
+ ret = mt7601u_init_bbp(dev);
+ if (ret)
+ goto err_rx;
+ ret = mt7601u_init_wcid_mem(dev);
+ if (ret)
+ goto err_rx;
+ ret = mt7601u_init_key_mem(dev);
+ if (ret)
+ goto err_rx;
+ ret = mt7601u_init_wcid_attr_mem(dev);
+ if (ret)
+ goto err_rx;
+
+ mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_BEACON_TX));
+
+ mt7601u_reset_counters(dev);
+
+ mt7601u_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
+
+ mt7601u_wr(dev, MT_TXOP_CTRL_CFG,
+ FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
+ FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
+
+ ret = mt7601u_eeprom_init(dev);
+ if (ret)
+ goto err_rx;
+
+ ret = mt7601u_phy_init(dev);
+ if (ret)
+ goto err_rx;
+
+ mt7601u_set_rx_path(dev, 0);
+ mt7601u_set_tx_dac(dev, 0);
+
+ mt7601u_mac_set_ctrlch(dev, false);
+ mt7601u_bbp_set_ctrlch(dev, false);
+ mt7601u_bbp_set_bw(dev, MT_BW_20);
+
+ return 0;
+
+err_rx:
+ mt7601u_dma_cleanup(dev);
+err_mcu:
+ mt7601u_mcu_cmd_deinit(dev);
+err:
+ mt7601u_chip_onoff(dev, false, false);
+ return ret;
+}
+
+void mt7601u_cleanup(struct mt7601u_dev *dev)
+{
+ if (!test_and_clear_bit(MT7601U_STATE_INITIALIZED, &dev->state))
+ return;
+
+ mt7601u_stop_hardware(dev);
+ mt7601u_dma_cleanup(dev);
+ mt7601u_mcu_cmd_deinit(dev);
+}
+
+struct mt7601u_dev *mt7601u_alloc_device(struct device *pdev)
+{
+ struct ieee80211_hw *hw;
+ struct mt7601u_dev *dev;
+
+ hw = ieee80211_alloc_hw(sizeof(*dev), &mt7601u_ops);
+ if (!hw)
+ return NULL;
+
+ dev = hw->priv;
+ dev->dev = pdev;
+ dev->hw = hw;
+ mutex_init(&dev->vendor_req_mutex);
+ mutex_init(&dev->reg_atomic_mutex);
+ mutex_init(&dev->hw_atomic_mutex);
+ mutex_init(&dev->mutex);
+ spin_lock_init(&dev->tx_lock);
+ spin_lock_init(&dev->rx_lock);
+ spin_lock_init(&dev->lock);
+ spin_lock_init(&dev->mac_lock);
+ spin_lock_init(&dev->con_mon_lock);
+ atomic_set(&dev->avg_ampdu_len, 1);
+ skb_queue_head_init(&dev->tx_skb_done);
+
+ dev->stat_wq = alloc_workqueue("mt7601u", WQ_UNBOUND, 0);
+ if (!dev->stat_wq) {
+ ieee80211_free_hw(hw);
+ return NULL;
+ }
+
+ return dev;
+}
+
+#define CHAN2G(_idx, _freq) { \
+ .band = NL80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 30, \
+}
+
+static const struct ieee80211_channel mt76_channels_2ghz[] = {
+ CHAN2G(1, 2412),
+ CHAN2G(2, 2417),
+ CHAN2G(3, 2422),
+ CHAN2G(4, 2427),
+ CHAN2G(5, 2432),
+ CHAN2G(6, 2437),
+ CHAN2G(7, 2442),
+ CHAN2G(8, 2447),
+ CHAN2G(9, 2452),
+ CHAN2G(10, 2457),
+ CHAN2G(11, 2462),
+ CHAN2G(12, 2467),
+ CHAN2G(13, 2472),
+ CHAN2G(14, 2484),
+};
+
+#define CCK_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
+ .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \
+}
+
+#define OFDM_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+}
+
+static struct ieee80211_rate mt76_rates[] = {
+ CCK_RATE(0, 10),
+ CCK_RATE(1, 20),
+ CCK_RATE(2, 55),
+ CCK_RATE(3, 110),
+ OFDM_RATE(0, 60),
+ OFDM_RATE(1, 90),
+ OFDM_RATE(2, 120),
+ OFDM_RATE(3, 180),
+ OFDM_RATE(4, 240),
+ OFDM_RATE(5, 360),
+ OFDM_RATE(6, 480),
+ OFDM_RATE(7, 540),
+};
+
+static int
+mt76_init_sband(struct mt7601u_dev *dev, struct ieee80211_supported_band *sband,
+ const struct ieee80211_channel *chan, int n_chan,
+ struct ieee80211_rate *rates, int n_rates)
+{
+ struct ieee80211_sta_ht_cap *ht_cap;
+ void *chanlist;
+ int size;
+
+ size = n_chan * sizeof(*chan);
+ chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
+ if (!chanlist)
+ return -ENOMEM;
+
+ sband->channels = chanlist;
+ sband->n_channels = n_chan;
+ sband->bitrates = rates;
+ sband->n_bitrates = n_rates;
+
+ ht_cap = &sband->ht_cap;
+ ht_cap->ht_supported = true;
+ ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40 |
+ (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+ ht_cap->mcs.rx_mask[0] = 0xff;
+ ht_cap->mcs.rx_mask[4] = 0x1;
+ ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_2;
+
+ dev->chandef.chan = &sband->channels[0];
+
+ return 0;
+}
+
+static int
+mt76_init_sband_2g(struct mt7601u_dev *dev)
+{
+ dev->sband_2g = devm_kzalloc(dev->dev, sizeof(*dev->sband_2g),
+ GFP_KERNEL);
+ if (!dev->sband_2g)
+ return -ENOMEM;
+
+ dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = dev->sband_2g;
+
+ WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num >
+ ARRAY_SIZE(mt76_channels_2ghz));
+
+ return mt76_init_sband(dev, dev->sband_2g,
+ &mt76_channels_2ghz[dev->ee->reg.start - 1],
+ dev->ee->reg.num,
+ mt76_rates, ARRAY_SIZE(mt76_rates));
+}
+
+int mt7601u_register_device(struct mt7601u_dev *dev)
+{
+ struct ieee80211_hw *hw = dev->hw;
+ struct wiphy *wiphy = hw->wiphy;
+ int ret;
+
+ /* Reserve WCID 0 for mcast - thanks to this APs WCID will go to
+ * entry no. 1 like it does in the vendor driver.
+ */
+ dev->wcid_mask[0] |= 1;
+
+ /* init fake wcid for monitor interfaces */
+ dev->mon_wcid = devm_kmalloc(dev->dev, sizeof(*dev->mon_wcid),
+ GFP_KERNEL);
+ if (!dev->mon_wcid)
+ return -ENOMEM;
+ dev->mon_wcid->idx = 0xff;
+ dev->mon_wcid->hw_key_idx = -1;
+
+ SET_IEEE80211_DEV(hw, dev->dev);
+
+ hw->queues = 4;
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+ hw->max_rates = 1;
+ hw->max_report_rates = 7;
+ hw->max_rate_tries = 1;
+
+ hw->sta_data_size = sizeof(struct mt76_sta);
+ hw->vif_data_size = sizeof(struct mt76_vif);
+
+ SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
+
+ wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
+ ret = mt76_init_sband_2g(dev);
+ if (ret)
+ return ret;
+
+ INIT_DELAYED_WORK(&dev->mac_work, mt7601u_mac_work);
+ INIT_DELAYED_WORK(&dev->stat_work, mt7601u_tx_stat);
+
+ ret = ieee80211_register_hw(hw);
+ if (ret)
+ return ret;
+
+ mt7601u_init_debugfs(dev);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/initvals.h b/drivers/net/wireless/mediatek/mt7601u/initvals.h
new file mode 100644
index 000000000..59acbf570
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/initvals.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#ifndef __MT7601U_INITVALS_H
+#define __MT7601U_INITVALS_H
+
+static const struct mt76_reg_pair bbp_common_vals[] = {
+ { 65, 0x2c },
+ { 66, 0x38 },
+ { 68, 0x0b },
+ { 69, 0x12 },
+ { 70, 0x0a },
+ { 73, 0x10 },
+ { 81, 0x37 },
+ { 82, 0x62 },
+ { 83, 0x6a },
+ { 84, 0x99 },
+ { 86, 0x00 },
+ { 91, 0x04 },
+ { 92, 0x00 },
+ { 103, 0x00 },
+ { 105, 0x05 },
+ { 106, 0x35 },
+};
+
+static const struct mt76_reg_pair bbp_chip_vals[] = {
+ { 1, 0x04 }, { 4, 0x40 }, { 20, 0x06 }, { 31, 0x08 },
+ /* CCK Tx Control */
+ { 178, 0xff },
+ /* AGC/Sync controls */
+ { 66, 0x14 }, { 68, 0x8b }, { 69, 0x12 }, { 70, 0x09 },
+ { 73, 0x11 }, { 75, 0x60 }, { 76, 0x44 }, { 84, 0x9a },
+ { 86, 0x38 }, { 91, 0x07 }, { 92, 0x02 },
+ /* Rx Path Controls */
+ { 99, 0x50 }, { 101, 0x00 }, { 103, 0xc0 }, { 104, 0x92 },
+ { 105, 0x3c }, { 106, 0x03 }, { 128, 0x12 },
+ /* Change RXWI content: Gain Report */
+ { 142, 0x04 }, { 143, 0x37 },
+ /* Change RXWI content: Antenna Report */
+ { 142, 0x03 }, { 143, 0x99 },
+ /* Calibration Index Register */
+ /* CCK Receiver Control */
+ { 160, 0xeb }, { 161, 0xc4 }, { 162, 0x77 }, { 163, 0xf9 },
+ { 164, 0x88 }, { 165, 0x80 }, { 166, 0xff }, { 167, 0xe4 },
+ /* Added AGC controls - these AGC/GLRT registers are accessed
+ * through R195 and R196.
+ */
+ { 195, 0x00 }, { 196, 0x00 },
+ { 195, 0x01 }, { 196, 0x04 },
+ { 195, 0x02 }, { 196, 0x20 },
+ { 195, 0x03 }, { 196, 0x0a },
+ { 195, 0x06 }, { 196, 0x16 },
+ { 195, 0x07 }, { 196, 0x05 },
+ { 195, 0x08 }, { 196, 0x37 },
+ { 195, 0x0a }, { 196, 0x15 },
+ { 195, 0x0b }, { 196, 0x17 },
+ { 195, 0x0c }, { 196, 0x06 },
+ { 195, 0x0d }, { 196, 0x09 },
+ { 195, 0x0e }, { 196, 0x05 },
+ { 195, 0x0f }, { 196, 0x09 },
+ { 195, 0x10 }, { 196, 0x20 },
+ { 195, 0x20 }, { 196, 0x17 },
+ { 195, 0x21 }, { 196, 0x06 },
+ { 195, 0x22 }, { 196, 0x09 },
+ { 195, 0x23 }, { 196, 0x17 },
+ { 195, 0x24 }, { 196, 0x06 },
+ { 195, 0x25 }, { 196, 0x09 },
+ { 195, 0x26 }, { 196, 0x17 },
+ { 195, 0x27 }, { 196, 0x06 },
+ { 195, 0x28 }, { 196, 0x09 },
+ { 195, 0x29 }, { 196, 0x05 },
+ { 195, 0x2a }, { 196, 0x09 },
+ { 195, 0x80 }, { 196, 0x8b },
+ { 195, 0x81 }, { 196, 0x12 },
+ { 195, 0x82 }, { 196, 0x09 },
+ { 195, 0x83 }, { 196, 0x17 },
+ { 195, 0x84 }, { 196, 0x11 },
+ { 195, 0x85 }, { 196, 0x00 },
+ { 195, 0x86 }, { 196, 0x00 },
+ { 195, 0x87 }, { 196, 0x18 },
+ { 195, 0x88 }, { 196, 0x60 },
+ { 195, 0x89 }, { 196, 0x44 },
+ { 195, 0x8a }, { 196, 0x8b },
+ { 195, 0x8b }, { 196, 0x8b },
+ { 195, 0x8c }, { 196, 0x8b },
+ { 195, 0x8d }, { 196, 0x8b },
+ { 195, 0x8e }, { 196, 0x09 },
+ { 195, 0x8f }, { 196, 0x09 },
+ { 195, 0x90 }, { 196, 0x09 },
+ { 195, 0x91 }, { 196, 0x09 },
+ { 195, 0x92 }, { 196, 0x11 },
+ { 195, 0x93 }, { 196, 0x11 },
+ { 195, 0x94 }, { 196, 0x11 },
+ { 195, 0x95 }, { 196, 0x11 },
+ /* PPAD */
+ { 47, 0x80 }, { 60, 0x80 }, { 150, 0xd2 }, { 151, 0x32 },
+ { 152, 0x23 }, { 153, 0x41 }, { 154, 0x00 }, { 155, 0x4f },
+ { 253, 0x7e }, { 195, 0x30 }, { 196, 0x32 }, { 195, 0x31 },
+ { 196, 0x23 }, { 195, 0x32 }, { 196, 0x45 }, { 195, 0x35 },
+ { 196, 0x4a }, { 195, 0x36 }, { 196, 0x5a }, { 195, 0x37 },
+ { 196, 0x5a },
+};
+
+static const struct mt76_reg_pair mac_common_vals[] = {
+ { MT_LEGACY_BASIC_RATE, 0x0000013f },
+ { MT_HT_BASIC_RATE, 0x00008003 },
+ { MT_MAC_SYS_CTRL, 0x00000000 },
+ { MT_RX_FILTR_CFG, 0x00017f97 },
+ { MT_BKOFF_SLOT_CFG, 0x00000209 },
+ { MT_TX_SW_CFG0, 0x00000000 },
+ { MT_TX_SW_CFG1, 0x00080606 },
+ { MT_TX_LINK_CFG, 0x00001020 },
+ { MT_TX_TIMEOUT_CFG, 0x000a2090 },
+ { MT_MAX_LEN_CFG, 0x00003fff },
+ { MT_PBF_TX_MAX_PCNT, 0x1fbf1f1f },
+ { MT_PBF_RX_MAX_PCNT, 0x0000009f },
+ { MT_TX_RETRY_CFG, 0x47d01f0f },
+ { MT_AUTO_RSP_CFG, 0x00000013 },
+ { MT_CCK_PROT_CFG, 0x05740003 },
+ { MT_OFDM_PROT_CFG, 0x05740003 },
+ { MT_MM40_PROT_CFG, 0x03f44084 },
+ { MT_GF20_PROT_CFG, 0x01744004 },
+ { MT_GF40_PROT_CFG, 0x03f44084 },
+ { MT_MM20_PROT_CFG, 0x01744004 },
+ { MT_TXOP_CTRL_CFG, 0x0000583f },
+ { MT_TX_RTS_CFG, 0x01092b20 },
+ { MT_EXP_ACK_TIME, 0x002400ca },
+ { MT_TXOP_HLDR_ET, 0x00000002 },
+ { MT_XIFS_TIME_CFG, 0x33a41010 },
+ { MT_PWR_PIN_CFG, 0x00000000 },
+ { MT_PN_PAD_MODE, 0x00000001 },
+};
+
+static const struct mt76_reg_pair mac_chip_vals[] = {
+ { MT_TSO_CTRL, 0x00006050 },
+ { MT_BCN_OFFSET(0), 0x18100800 },
+ { MT_BCN_OFFSET(1), 0x38302820 },
+ { MT_PBF_SYS_CTRL, 0x00080c00 },
+ { MT_PBF_CFG, 0x7f723c1f },
+ { MT_FCE_PSE_CTRL, 0x00000001 },
+ { MT_PAUSE_ENABLE_CONTROL1, 0x00000000 },
+ { MT_TX0_RF_GAIN_CORR, 0x003b0005 },
+ { MT_TX0_RF_GAIN_ATTEN, 0x00006900 },
+ { MT_TX0_BB_GAIN_ATTEN, 0x00000400 },
+ { MT_TX_ALC_VGA3, 0x00060006 },
+ { MT_TX_SW_CFG0, 0x00000402 },
+ { MT_TX_SW_CFG1, 0x00000000 },
+ { MT_TX_SW_CFG2, 0x00000000 },
+ { MT_HEADER_TRANS_CTRL_REG, 0x00000000 },
+ { MT_FCE_CSO, 0x0000030f },
+ { MT_FCE_PARAMETERS, 0x00256f0f },
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/initvals_phy.h b/drivers/net/wireless/mediatek/mt7601u/initvals_phy.h
new file mode 100644
index 000000000..55cfedb24
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/initvals_phy.h
@@ -0,0 +1,283 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#ifndef __MT7601U_PHY_INITVALS_H
+#define __MT7601U_PHY_INITVALS_H
+
+#define RF_REG_PAIR(bank, reg, value) \
+ { MT_MCU_MEMMAP_RF | (bank) << 16 | (reg), value }
+
+static const struct mt76_reg_pair rf_central[] = {
+ /* Bank 0 - for central blocks: BG, PLL, XTAL, LO, ADC/DAC */
+ RF_REG_PAIR(0, 0, 0x02),
+ RF_REG_PAIR(0, 1, 0x01),
+ RF_REG_PAIR(0, 2, 0x11),
+ RF_REG_PAIR(0, 3, 0xff),
+ RF_REG_PAIR(0, 4, 0x0a),
+ RF_REG_PAIR(0, 5, 0x20),
+ RF_REG_PAIR(0, 6, 0x00),
+ /* B/G */
+ RF_REG_PAIR(0, 7, 0x00),
+ RF_REG_PAIR(0, 8, 0x00),
+ RF_REG_PAIR(0, 9, 0x00),
+ RF_REG_PAIR(0, 10, 0x00),
+ RF_REG_PAIR(0, 11, 0x21),
+ /* XO */
+ RF_REG_PAIR(0, 13, 0x00), /* 40mhz xtal */
+ /* RF_REG_PAIR(0, 13, 0x13), */ /* 20mhz xtal */
+ RF_REG_PAIR(0, 14, 0x7c),
+ RF_REG_PAIR(0, 15, 0x22),
+ RF_REG_PAIR(0, 16, 0x80),
+ /* PLL */
+ RF_REG_PAIR(0, 17, 0x99),
+ RF_REG_PAIR(0, 18, 0x99),
+ RF_REG_PAIR(0, 19, 0x09),
+ RF_REG_PAIR(0, 20, 0x50),
+ RF_REG_PAIR(0, 21, 0xb0),
+ RF_REG_PAIR(0, 22, 0x00),
+ RF_REG_PAIR(0, 23, 0xc5),
+ RF_REG_PAIR(0, 24, 0xfc),
+ RF_REG_PAIR(0, 25, 0x40),
+ RF_REG_PAIR(0, 26, 0x4d),
+ RF_REG_PAIR(0, 27, 0x02),
+ RF_REG_PAIR(0, 28, 0x72),
+ RF_REG_PAIR(0, 29, 0x01),
+ RF_REG_PAIR(0, 30, 0x00),
+ RF_REG_PAIR(0, 31, 0x00),
+ /* test ports */
+ RF_REG_PAIR(0, 32, 0x00),
+ RF_REG_PAIR(0, 33, 0x00),
+ RF_REG_PAIR(0, 34, 0x23),
+ RF_REG_PAIR(0, 35, 0x01), /* change setting to reduce spurs */
+ RF_REG_PAIR(0, 36, 0x00),
+ RF_REG_PAIR(0, 37, 0x00),
+ /* ADC/DAC */
+ RF_REG_PAIR(0, 38, 0x00),
+ RF_REG_PAIR(0, 39, 0x20),
+ RF_REG_PAIR(0, 40, 0x00),
+ RF_REG_PAIR(0, 41, 0xd0),
+ RF_REG_PAIR(0, 42, 0x1b),
+ RF_REG_PAIR(0, 43, 0x02),
+ RF_REG_PAIR(0, 44, 0x00),
+};
+
+static const struct mt76_reg_pair rf_channel[] = {
+ RF_REG_PAIR(4, 0, 0x01),
+ RF_REG_PAIR(4, 1, 0x00),
+ RF_REG_PAIR(4, 2, 0x00),
+ RF_REG_PAIR(4, 3, 0x00),
+ /* LDO */
+ RF_REG_PAIR(4, 4, 0x00),
+ RF_REG_PAIR(4, 5, 0x08),
+ RF_REG_PAIR(4, 6, 0x00),
+ /* RX */
+ RF_REG_PAIR(4, 7, 0x5b),
+ RF_REG_PAIR(4, 8, 0x52),
+ RF_REG_PAIR(4, 9, 0xb6),
+ RF_REG_PAIR(4, 10, 0x57),
+ RF_REG_PAIR(4, 11, 0x33),
+ RF_REG_PAIR(4, 12, 0x22),
+ RF_REG_PAIR(4, 13, 0x3d),
+ RF_REG_PAIR(4, 14, 0x3e),
+ RF_REG_PAIR(4, 15, 0x13),
+ RF_REG_PAIR(4, 16, 0x22),
+ RF_REG_PAIR(4, 17, 0x23),
+ RF_REG_PAIR(4, 18, 0x02),
+ RF_REG_PAIR(4, 19, 0xa4),
+ RF_REG_PAIR(4, 20, 0x01),
+ RF_REG_PAIR(4, 21, 0x12),
+ RF_REG_PAIR(4, 22, 0x80),
+ RF_REG_PAIR(4, 23, 0xb3),
+ RF_REG_PAIR(4, 24, 0x00), /* reserved */
+ RF_REG_PAIR(4, 25, 0x00), /* reserved */
+ RF_REG_PAIR(4, 26, 0x00), /* reserved */
+ RF_REG_PAIR(4, 27, 0x00), /* reserved */
+ /* LOGEN */
+ RF_REG_PAIR(4, 28, 0x18),
+ RF_REG_PAIR(4, 29, 0xee),
+ RF_REG_PAIR(4, 30, 0x6b),
+ RF_REG_PAIR(4, 31, 0x31),
+ RF_REG_PAIR(4, 32, 0x5d),
+ RF_REG_PAIR(4, 33, 0x00), /* reserved */
+ /* TX */
+ RF_REG_PAIR(4, 34, 0x96),
+ RF_REG_PAIR(4, 35, 0x55),
+ RF_REG_PAIR(4, 36, 0x08),
+ RF_REG_PAIR(4, 37, 0xbb),
+ RF_REG_PAIR(4, 38, 0xb3),
+ RF_REG_PAIR(4, 39, 0xb3),
+ RF_REG_PAIR(4, 40, 0x03),
+ RF_REG_PAIR(4, 41, 0x00), /* reserved */
+ RF_REG_PAIR(4, 42, 0x00), /* reserved */
+ RF_REG_PAIR(4, 43, 0xc5),
+ RF_REG_PAIR(4, 44, 0xc5),
+ RF_REG_PAIR(4, 45, 0xc5),
+ RF_REG_PAIR(4, 46, 0x07),
+ RF_REG_PAIR(4, 47, 0xa8),
+ RF_REG_PAIR(4, 48, 0xef),
+ RF_REG_PAIR(4, 49, 0x1a),
+ /* PA */
+ RF_REG_PAIR(4, 54, 0x07),
+ RF_REG_PAIR(4, 55, 0xa7),
+ RF_REG_PAIR(4, 56, 0xcc),
+ RF_REG_PAIR(4, 57, 0x14),
+ RF_REG_PAIR(4, 58, 0x07),
+ RF_REG_PAIR(4, 59, 0xa8),
+ RF_REG_PAIR(4, 60, 0xd7),
+ RF_REG_PAIR(4, 61, 0x10),
+ RF_REG_PAIR(4, 62, 0x1c),
+ RF_REG_PAIR(4, 63, 0x00), /* reserved */
+};
+
+static const struct mt76_reg_pair rf_vga[] = {
+ RF_REG_PAIR(5, 0, 0x47),
+ RF_REG_PAIR(5, 1, 0x00),
+ RF_REG_PAIR(5, 2, 0x00),
+ RF_REG_PAIR(5, 3, 0x08),
+ RF_REG_PAIR(5, 4, 0x04),
+ RF_REG_PAIR(5, 5, 0x20),
+ RF_REG_PAIR(5, 6, 0x3a),
+ RF_REG_PAIR(5, 7, 0x3a),
+ RF_REG_PAIR(5, 8, 0x00),
+ RF_REG_PAIR(5, 9, 0x00),
+ RF_REG_PAIR(5, 10, 0x10),
+ RF_REG_PAIR(5, 11, 0x10),
+ RF_REG_PAIR(5, 12, 0x10),
+ RF_REG_PAIR(5, 13, 0x10),
+ RF_REG_PAIR(5, 14, 0x10),
+ RF_REG_PAIR(5, 15, 0x20),
+ RF_REG_PAIR(5, 16, 0x22),
+ RF_REG_PAIR(5, 17, 0x7c),
+ RF_REG_PAIR(5, 18, 0x00),
+ RF_REG_PAIR(5, 19, 0x00),
+ RF_REG_PAIR(5, 20, 0x00),
+ RF_REG_PAIR(5, 21, 0xf1),
+ RF_REG_PAIR(5, 22, 0x11),
+ RF_REG_PAIR(5, 23, 0x02),
+ RF_REG_PAIR(5, 24, 0x41),
+ RF_REG_PAIR(5, 25, 0x20),
+ RF_REG_PAIR(5, 26, 0x00),
+ RF_REG_PAIR(5, 27, 0xd7),
+ RF_REG_PAIR(5, 28, 0xa2),
+ RF_REG_PAIR(5, 29, 0x20),
+ RF_REG_PAIR(5, 30, 0x49),
+ RF_REG_PAIR(5, 31, 0x20),
+ RF_REG_PAIR(5, 32, 0x04),
+ RF_REG_PAIR(5, 33, 0xf1),
+ RF_REG_PAIR(5, 34, 0xa1),
+ RF_REG_PAIR(5, 35, 0x01),
+ RF_REG_PAIR(5, 41, 0x00),
+ RF_REG_PAIR(5, 42, 0x00),
+ RF_REG_PAIR(5, 43, 0x00),
+ RF_REG_PAIR(5, 44, 0x00),
+ RF_REG_PAIR(5, 45, 0x00),
+ RF_REG_PAIR(5, 46, 0x00),
+ RF_REG_PAIR(5, 47, 0x00),
+ RF_REG_PAIR(5, 48, 0x00),
+ RF_REG_PAIR(5, 49, 0x00),
+ RF_REG_PAIR(5, 50, 0x00),
+ RF_REG_PAIR(5, 51, 0x00),
+ RF_REG_PAIR(5, 52, 0x00),
+ RF_REG_PAIR(5, 53, 0x00),
+ RF_REG_PAIR(5, 54, 0x00),
+ RF_REG_PAIR(5, 55, 0x00),
+ RF_REG_PAIR(5, 56, 0x00),
+ RF_REG_PAIR(5, 57, 0x00),
+ RF_REG_PAIR(5, 58, 0x31),
+ RF_REG_PAIR(5, 59, 0x31),
+ RF_REG_PAIR(5, 60, 0x0a),
+ RF_REG_PAIR(5, 61, 0x02),
+ RF_REG_PAIR(5, 62, 0x00),
+ RF_REG_PAIR(5, 63, 0x00),
+};
+
+/* TODO: BBP178 is set to 0xff for "CCK CH14 OBW" which overrides the settings
+ * from channel switching. Seems stupid at best.
+ */
+static const struct mt76_reg_pair bbp_high_temp[] = {
+ { 75, 0x60 },
+ { 92, 0x02 },
+ { 178, 0xff }, /* For CCK CH14 OBW */
+ { 195, 0x88 }, { 196, 0x60 },
+}, bbp_high_temp_bw20[] = {
+ { 69, 0x12 },
+ { 91, 0x07 },
+ { 195, 0x23 }, { 196, 0x17 },
+ { 195, 0x24 }, { 196, 0x06 },
+ { 195, 0x81 }, { 196, 0x12 },
+ { 195, 0x83 }, { 196, 0x17 },
+}, bbp_high_temp_bw40[] = {
+ { 69, 0x15 },
+ { 91, 0x04 },
+ { 195, 0x23 }, { 196, 0x12 },
+ { 195, 0x24 }, { 196, 0x08 },
+ { 195, 0x81 }, { 196, 0x15 },
+ { 195, 0x83 }, { 196, 0x16 },
+}, bbp_low_temp[] = {
+ { 178, 0xff }, /* For CCK CH14 OBW */
+}, bbp_low_temp_bw20[] = {
+ { 69, 0x12 },
+ { 75, 0x5e },
+ { 91, 0x07 },
+ { 92, 0x02 },
+ { 195, 0x23 }, { 196, 0x17 },
+ { 195, 0x24 }, { 196, 0x06 },
+ { 195, 0x81 }, { 196, 0x12 },
+ { 195, 0x83 }, { 196, 0x17 },
+ { 195, 0x88 }, { 196, 0x5e },
+}, bbp_low_temp_bw40[] = {
+ { 69, 0x15 },
+ { 75, 0x5c },
+ { 91, 0x04 },
+ { 92, 0x03 },
+ { 195, 0x23 }, { 196, 0x10 },
+ { 195, 0x24 }, { 196, 0x08 },
+ { 195, 0x81 }, { 196, 0x15 },
+ { 195, 0x83 }, { 196, 0x16 },
+ { 195, 0x88 }, { 196, 0x5b },
+}, bbp_normal_temp[] = {
+ { 75, 0x60 },
+ { 92, 0x02 },
+ { 178, 0xff }, /* For CCK CH14 OBW */
+ { 195, 0x88 }, { 196, 0x60 },
+}, bbp_normal_temp_bw20[] = {
+ { 69, 0x12 },
+ { 91, 0x07 },
+ { 195, 0x23 }, { 196, 0x17 },
+ { 195, 0x24 }, { 196, 0x06 },
+ { 195, 0x81 }, { 196, 0x12 },
+ { 195, 0x83 }, { 196, 0x17 },
+}, bbp_normal_temp_bw40[] = {
+ { 69, 0x15 },
+ { 91, 0x04 },
+ { 195, 0x23 }, { 196, 0x12 },
+ { 195, 0x24 }, { 196, 0x08 },
+ { 195, 0x81 }, { 196, 0x15 },
+ { 195, 0x83 }, { 196, 0x16 },
+};
+
+#define BBP_TABLE(arr) { arr, ARRAY_SIZE(arr), }
+
+static const struct reg_table {
+ const struct mt76_reg_pair *regs;
+ size_t n;
+} bbp_mode_table[3][3] = {
+ {
+ BBP_TABLE(bbp_normal_temp_bw20),
+ BBP_TABLE(bbp_normal_temp_bw40),
+ BBP_TABLE(bbp_normal_temp),
+ }, {
+ BBP_TABLE(bbp_high_temp_bw20),
+ BBP_TABLE(bbp_high_temp_bw40),
+ BBP_TABLE(bbp_high_temp),
+ }, {
+ BBP_TABLE(bbp_low_temp_bw20),
+ BBP_TABLE(bbp_low_temp_bw40),
+ BBP_TABLE(bbp_low_temp),
+ }
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.c b/drivers/net/wireless/mediatek/mt7601u/mac.c
new file mode 100644
index 000000000..d2ee1aaa3
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/mac.c
@@ -0,0 +1,593 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#include "mt7601u.h"
+#include "trace.h"
+#include <linux/etherdevice.h>
+
+void mt7601u_set_macaddr(struct mt7601u_dev *dev, const u8 *addr)
+{
+ ether_addr_copy(dev->macaddr, addr);
+
+ if (!is_valid_ether_addr(dev->macaddr)) {
+ eth_random_addr(dev->macaddr);
+ dev_info(dev->dev,
+ "Invalid MAC address, using random address %pM\n",
+ dev->macaddr);
+ }
+
+ mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr));
+ mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dev->macaddr + 4) |
+ FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
+}
+
+static void
+mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate)
+{
+ u8 idx = FIELD_GET(MT_TXWI_RATE_MCS, rate);
+
+ txrate->idx = 0;
+ txrate->flags = 0;
+ txrate->count = 1;
+
+ switch (FIELD_GET(MT_TXWI_RATE_PHY_MODE, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ txrate->idx = idx + 4;
+ return;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8)
+ idx -= 8;
+
+ txrate->idx = idx;
+ return;
+ case MT_PHY_TYPE_HT_GF:
+ txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ fallthrough;
+ case MT_PHY_TYPE_HT:
+ txrate->flags |= IEEE80211_TX_RC_MCS;
+ txrate->idx = idx;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ if (FIELD_GET(MT_TXWI_RATE_BW, rate) == MT_PHY_BW_40)
+ txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+
+ if (rate & MT_TXWI_RATE_SGI)
+ txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
+}
+
+static void
+mt76_mac_fill_tx_status(struct mt7601u_dev *dev, struct ieee80211_tx_info *info,
+ struct mt76_tx_status *st)
+{
+ struct ieee80211_tx_rate *rate = info->status.rates;
+ int cur_idx, last_rate;
+ int i;
+
+ last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
+ mt76_mac_process_tx_rate(&rate[last_rate], st->rate);
+ if (last_rate < IEEE80211_TX_MAX_RATES - 1)
+ rate[last_rate + 1].idx = -1;
+
+ cur_idx = rate[last_rate].idx + st->retry;
+ for (i = 0; i <= last_rate; i++) {
+ rate[i].flags = rate[last_rate].flags;
+ rate[i].idx = max_t(int, 0, cur_idx - i);
+ rate[i].count = 1;
+ }
+
+ if (last_rate > 0)
+ rate[last_rate - 1].count = st->retry + 1 - last_rate;
+
+ info->status.ampdu_len = 1;
+ info->status.ampdu_ack_len = st->success;
+
+ if (st->is_probe)
+ info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+
+ if (st->aggr)
+ info->flags |= IEEE80211_TX_CTL_AMPDU |
+ IEEE80211_TX_STAT_AMPDU;
+
+ if (!st->ack_req)
+ info->flags |= IEEE80211_TX_CTL_NO_ACK;
+ else if (st->success)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+}
+
+u16 mt76_mac_tx_rate_val(struct mt7601u_dev *dev,
+ const struct ieee80211_tx_rate *rate, u8 *nss_val)
+{
+ u16 rateval;
+ u8 phy, rate_idx;
+ u8 nss = 1;
+ u8 bw = 0;
+
+ if (rate->flags & IEEE80211_TX_RC_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 3);
+ phy = MT_PHY_TYPE_HT;
+ if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+ phy = MT_PHY_TYPE_HT_GF;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = 1;
+ } else {
+ const struct ieee80211_rate *r;
+ int band = dev->chandef.chan->band;
+ u16 val;
+
+ r = &dev->hw->wiphy->bands[band]->bitrates[rate->idx];
+ if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ val = r->hw_value_short;
+ else
+ val = r->hw_value;
+
+ phy = val >> 8;
+ rate_idx = val & 0xff;
+ bw = 0;
+ }
+
+ rateval = FIELD_PREP(MT_RXWI_RATE_MCS, rate_idx);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ rateval |= MT_RXWI_RATE_SGI;
+
+ *nss_val = nss;
+ return rateval;
+}
+
+void mt76_mac_wcid_set_rate(struct mt7601u_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ wcid->tx_rate = mt76_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
+ wcid->tx_rate_set = true;
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+struct mt76_tx_status mt7601u_mac_fetch_tx_status(struct mt7601u_dev *dev)
+{
+ struct mt76_tx_status stat = {};
+ u32 val;
+
+ val = mt7601u_rr(dev, MT_TX_STAT_FIFO);
+ stat.valid = !!(val & MT_TX_STAT_FIFO_VALID);
+ stat.success = !!(val & MT_TX_STAT_FIFO_SUCCESS);
+ stat.aggr = !!(val & MT_TX_STAT_FIFO_AGGR);
+ stat.ack_req = !!(val & MT_TX_STAT_FIFO_ACKREQ);
+ stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_PID_TYPE, val);
+ stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, val);
+ stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, val);
+
+ return stat;
+}
+
+void mt76_send_tx_status(struct mt7601u_dev *dev, struct mt76_tx_status *stat)
+{
+ struct ieee80211_tx_info info = {};
+ struct ieee80211_sta *sta = NULL;
+ struct mt76_wcid *wcid = NULL;
+ void *msta;
+
+ rcu_read_lock();
+ if (stat->wcid < ARRAY_SIZE(dev->wcid))
+ wcid = rcu_dereference(dev->wcid[stat->wcid]);
+
+ if (wcid) {
+ msta = container_of(wcid, struct mt76_sta, wcid);
+ sta = container_of(msta, struct ieee80211_sta,
+ drv_priv);
+ }
+
+ mt76_mac_fill_tx_status(dev, &info, stat);
+
+ spin_lock_bh(&dev->mac_lock);
+ ieee80211_tx_status_noskb(dev->hw, sta, &info);
+ spin_unlock_bh(&dev->mac_lock);
+
+ rcu_read_unlock();
+}
+
+void mt7601u_mac_set_protection(struct mt7601u_dev *dev, bool legacy_prot,
+ int ht_mode)
+{
+ int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+ bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+ u32 prot[6];
+ bool ht_rts[4] = {};
+ int i;
+
+ prot[0] = MT_PROT_NAV_SHORT |
+ MT_PROT_TXOP_ALLOW_ALL |
+ MT_PROT_RTS_THR_EN;
+ prot[1] = prot[0];
+ if (legacy_prot)
+ prot[1] |= MT_PROT_CTRL_CTS2SELF;
+
+ prot[2] = prot[4] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_BW20;
+ prot[3] = prot[5] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_ALL;
+
+ if (legacy_prot) {
+ prot[2] |= MT_PROT_RATE_CCK_11;
+ prot[3] |= MT_PROT_RATE_CCK_11;
+ prot[4] |= MT_PROT_RATE_CCK_11;
+ prot[5] |= MT_PROT_RATE_CCK_11;
+ } else {
+ prot[2] |= MT_PROT_RATE_OFDM_24;
+ prot[3] |= MT_PROT_RATE_DUP_OFDM_24;
+ prot[4] |= MT_PROT_RATE_OFDM_24;
+ prot[5] |= MT_PROT_RATE_DUP_OFDM_24;
+ }
+
+ switch (mode) {
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
+ break;
+
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+ ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
+ break;
+
+ case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+ ht_rts[1] = ht_rts[3] = true;
+ break;
+
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+ ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
+ break;
+ }
+
+ if (non_gf)
+ ht_rts[2] = ht_rts[3] = true;
+
+ for (i = 0; i < 4; i++)
+ if (ht_rts[i])
+ prot[i + 2] |= MT_PROT_CTRL_RTS_CTS;
+
+ for (i = 0; i < 6; i++)
+ mt7601u_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
+}
+
+void mt7601u_mac_set_short_preamble(struct mt7601u_dev *dev, bool short_preamb)
+{
+ if (short_preamb)
+ mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+ else
+ mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+}
+
+void mt7601u_mac_config_tsf(struct mt7601u_dev *dev, bool enable, int interval)
+{
+ u32 val = mt7601u_rr(dev, MT_BEACON_TIME_CFG);
+
+ val &= ~(MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN);
+
+ if (!enable) {
+ mt7601u_wr(dev, MT_BEACON_TIME_CFG, val);
+ return;
+ }
+
+ val &= ~MT_BEACON_TIME_CFG_INTVAL;
+ val |= FIELD_PREP(MT_BEACON_TIME_CFG_INTVAL, interval << 4) |
+ MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN;
+}
+
+static void mt7601u_check_mac_err(struct mt7601u_dev *dev)
+{
+ u32 val = mt7601u_rr(dev, 0x10f4);
+
+ if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
+ return;
+
+ dev_err(dev->dev, "Error: MAC specific condition occurred\n");
+
+ mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
+ udelay(10);
+ mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
+}
+
+void mt7601u_mac_work(struct work_struct *work)
+{
+ struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
+ mac_work.work);
+ struct {
+ u32 addr_base;
+ u32 span;
+ u64 *stat_base;
+ } spans[] = {
+ { MT_RX_STA_CNT0, 3, dev->stats.rx_stat },
+ { MT_TX_STA_CNT0, 3, dev->stats.tx_stat },
+ { MT_TX_AGG_STAT, 1, dev->stats.aggr_stat },
+ { MT_MPDU_DENSITY_CNT, 1, dev->stats.zero_len_del },
+ { MT_TX_AGG_CNT_BASE0, 8, &dev->stats.aggr_n[0] },
+ { MT_TX_AGG_CNT_BASE1, 8, &dev->stats.aggr_n[16] },
+ };
+ u32 sum, n;
+ int i, j, k;
+
+ /* Note: using MCU_RANDOM_READ is actually slower then reading all the
+ * registers by hand. MCU takes ca. 20ms to complete read of 24
+ * registers while reading them one by one will takes roughly
+ * 24*200us =~ 5ms.
+ */
+
+ k = 0;
+ n = 0;
+ sum = 0;
+ for (i = 0; i < ARRAY_SIZE(spans); i++)
+ for (j = 0; j < spans[i].span; j++) {
+ u32 val = mt7601u_rr(dev, spans[i].addr_base + j * 4);
+
+ spans[i].stat_base[j * 2] += val & 0xffff;
+ spans[i].stat_base[j * 2 + 1] += val >> 16;
+
+ /* Calculate average AMPDU length */
+ if (spans[i].addr_base != MT_TX_AGG_CNT_BASE0 &&
+ spans[i].addr_base != MT_TX_AGG_CNT_BASE1)
+ continue;
+
+ n += (val >> 16) + (val & 0xffff);
+ sum += (val & 0xffff) * (1 + k * 2) +
+ (val >> 16) * (2 + k * 2);
+ k++;
+ }
+
+ atomic_set(&dev->avg_ampdu_len, n ? DIV_ROUND_CLOSEST(sum, n) : 1);
+
+ mt7601u_check_mac_err(dev);
+
+ ieee80211_queue_delayed_work(dev->hw, &dev->mac_work, 10 * HZ);
+}
+
+void
+mt7601u_mac_wcid_setup(struct mt7601u_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
+{
+ u8 zmac[ETH_ALEN] = {};
+ u32 attr;
+
+ attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+ FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+
+ mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+
+ if (mac)
+ memcpy(zmac, mac, sizeof(zmac));
+
+ mt7601u_addr_wr(dev, MT_WCID_ADDR(idx), zmac);
+}
+
+void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev)
+{
+ struct ieee80211_sta *sta;
+ struct mt76_wcid *wcid;
+ void *msta;
+ u8 min_factor = 3;
+ int i;
+
+ rcu_read_lock();
+ for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) {
+ wcid = rcu_dereference(dev->wcid[i]);
+ if (!wcid)
+ continue;
+
+ msta = container_of(wcid, struct mt76_sta, wcid);
+ sta = container_of(msta, struct ieee80211_sta, drv_priv);
+
+ min_factor = min(min_factor, sta->ht_cap.ampdu_factor);
+ }
+ rcu_read_unlock();
+
+ mt7601u_wr(dev, MT_MAX_LEN_CFG, 0xa0fff |
+ FIELD_PREP(MT_MAX_LEN_CFG_AMPDU, min_factor));
+}
+
+static void
+mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
+{
+ u8 idx = FIELD_GET(MT_RXWI_RATE_MCS, rate);
+
+ switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ if (WARN_ON(idx >= 8))
+ idx = 0;
+ idx += 4;
+
+ status->rate_idx = idx;
+ return;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8) {
+ idx -= 8;
+ status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+ }
+
+ if (WARN_ON(idx >= 4))
+ idx = 0;
+
+ status->rate_idx = idx;
+ return;
+ case MT_PHY_TYPE_HT_GF:
+ status->enc_flags |= RX_ENC_FLAG_HT_GF;
+ fallthrough;
+ case MT_PHY_TYPE_HT:
+ status->encoding = RX_ENC_HT;
+ status->rate_idx = idx;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ if (rate & MT_RXWI_RATE_SGI)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ if (rate & MT_RXWI_RATE_STBC)
+ status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
+
+ if (rate & MT_RXWI_RATE_BW)
+ status->bw = RATE_INFO_BW_40;
+}
+
+static void
+mt7601u_rx_monitor_beacon(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
+ u16 rate, int rssi)
+{
+ dev->bcn_freq_off = rxwi->freq_off;
+ dev->bcn_phy_mode = FIELD_GET(MT_RXWI_RATE_PHY, rate);
+ ewma_rssi_add(&dev->avg_rssi, -rssi);
+}
+
+static int
+mt7601u_rx_is_our_beacon(struct mt7601u_dev *dev, u8 *data)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
+
+ return ieee80211_is_beacon(hdr->frame_control) &&
+ ether_addr_equal(hdr->addr2, dev->ap_bssid);
+}
+
+u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb,
+ u8 *data, void *rxi)
+{
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct mt7601u_rxwi *rxwi = rxi;
+ u32 len, ctl = le32_to_cpu(rxwi->ctl);
+ u16 rate = le16_to_cpu(rxwi->rate);
+ int rssi;
+
+ len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+ if (len < 10)
+ return 0;
+
+ if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_DECRYPT)) {
+ status->flag |= RX_FLAG_DECRYPTED;
+ status->flag |= RX_FLAG_MMIC_STRIPPED;
+ status->flag |= RX_FLAG_MIC_STRIPPED;
+ status->flag |= RX_FLAG_ICV_STRIPPED;
+ status->flag |= RX_FLAG_IV_STRIPPED;
+ }
+ /* let mac80211 take care of PN validation since apparently
+ * the hardware does not support it
+ */
+ if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_PN_LEN))
+ status->flag &= ~RX_FLAG_IV_STRIPPED;
+
+ status->chains = BIT(0);
+ rssi = mt7601u_phy_get_rssi(dev, rxwi, rate);
+ status->chain_signal[0] = status->signal = rssi;
+ status->freq = dev->chandef.chan->center_freq;
+ status->band = dev->chandef.chan->band;
+
+ mt76_mac_process_rate(status, rate);
+
+ spin_lock_bh(&dev->con_mon_lock);
+ if (mt7601u_rx_is_our_beacon(dev, data))
+ mt7601u_rx_monitor_beacon(dev, rxwi, rate, rssi);
+ else if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_U2M))
+ ewma_rssi_add(&dev->avg_rssi, -rssi);
+ spin_unlock_bh(&dev->con_mon_lock);
+
+ return len;
+}
+
+static enum mt76_cipher_type
+mt76_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+ memset(key_data, 0, 32);
+ if (!key)
+ return MT_CIPHER_NONE;
+
+ if (key->keylen > 32)
+ return MT_CIPHER_NONE;
+
+ memcpy(key_data, key->key, key->keylen);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MT_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MT_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return MT_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MT_CIPHER_AES_CCMP;
+ default:
+ return MT_CIPHER_NONE;
+ }
+}
+
+int mt76_mac_wcid_set_key(struct mt7601u_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76_cipher_type cipher;
+ u8 key_data[32];
+ u8 iv_data[8];
+ u32 val;
+
+ cipher = mt76_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EINVAL;
+
+ trace_set_key(dev, idx);
+
+ mt7601u_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+
+ memset(iv_data, 0, sizeof(iv_data));
+ if (key) {
+ iv_data[3] = key->keyidx << 6;
+ if (cipher >= MT_CIPHER_TKIP) {
+ /* Note: start with 1 to comply with spec,
+ * (see comment on common/cmm_wpa.c:4291).
+ */
+ iv_data[0] |= 1;
+ iv_data[3] |= 0x20;
+ }
+ }
+ mt7601u_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
+
+ val = mt7601u_rr(dev, MT_WCID_ATTR(idx));
+ val &= ~MT_WCID_ATTR_PKEY_MODE & ~MT_WCID_ATTR_PKEY_MODE_EXT;
+ val |= FIELD_PREP(MT_WCID_ATTR_PKEY_MODE, cipher & 7) |
+ FIELD_PREP(MT_WCID_ATTR_PKEY_MODE_EXT, cipher >> 3);
+ val &= ~MT_WCID_ATTR_PAIRWISE;
+ val |= MT_WCID_ATTR_PAIRWISE *
+ !!(key && key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+ mt7601u_wr(dev, MT_WCID_ATTR(idx), val);
+
+ return 0;
+}
+
+int mt76_mac_shared_key_setup(struct mt7601u_dev *dev, u8 vif_idx, u8 key_idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76_cipher_type cipher;
+ u8 key_data[32];
+ u32 val;
+
+ cipher = mt76_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EINVAL;
+
+ trace_set_shared_key(dev, vif_idx, key_idx);
+
+ mt7601u_wr_copy(dev, MT_SKEY(vif_idx, key_idx),
+ key_data, sizeof(key_data));
+
+ val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
+ val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
+ val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
+ mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.h b/drivers/net/wireless/mediatek/mt7601u/mac.h
new file mode 100644
index 000000000..54bd4fac5
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/mac.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#ifndef __MT76_MAC_H
+#define __MT76_MAC_H
+
+struct mt76_tx_status {
+ u8 valid:1;
+ u8 success:1;
+ u8 aggr:1;
+ u8 ack_req:1;
+ u8 is_probe:1;
+ u8 wcid;
+ u8 pktid;
+ u8 retry;
+ u16 rate;
+} __packed __aligned(2);
+
+/* Note: values in original "RSSI" and "SNR" fields are not actually what they
+ * are called for MT7601U, names used by this driver are educated guesses
+ * (see vendor mac/ral_omac.c).
+ */
+struct mt7601u_rxwi {
+ __le32 rxinfo;
+
+ __le32 ctl;
+
+ __le16 frag_sn;
+ __le16 rate;
+
+ u8 unknown;
+ u8 zero[3];
+
+ u8 snr;
+ u8 ant;
+ u8 gain;
+ u8 freq_off;
+
+ __le32 resv2;
+ __le32 expert_ant;
+} __packed __aligned(4);
+
+#define MT_RXINFO_BA BIT(0)
+#define MT_RXINFO_DATA BIT(1)
+#define MT_RXINFO_NULL BIT(2)
+#define MT_RXINFO_FRAG BIT(3)
+#define MT_RXINFO_U2M BIT(4)
+#define MT_RXINFO_MULTICAST BIT(5)
+#define MT_RXINFO_BROADCAST BIT(6)
+#define MT_RXINFO_MYBSS BIT(7)
+#define MT_RXINFO_CRCERR BIT(8)
+#define MT_RXINFO_ICVERR BIT(9)
+#define MT_RXINFO_MICERR BIT(10)
+#define MT_RXINFO_AMSDU BIT(11)
+#define MT_RXINFO_HTC BIT(12)
+#define MT_RXINFO_RSSI BIT(13)
+#define MT_RXINFO_L2PAD BIT(14)
+#define MT_RXINFO_AMPDU BIT(15)
+#define MT_RXINFO_DECRYPT BIT(16)
+#define MT_RXINFO_BSSIDX3 BIT(17)
+#define MT_RXINFO_WAPI_KEY BIT(18)
+#define MT_RXINFO_PN_LEN GENMASK(21, 19)
+#define MT_RXINFO_SW_PKT_80211 BIT(22)
+#define MT_RXINFO_TCP_SUM_BYPASS BIT(28)
+#define MT_RXINFO_IP_SUM_BYPASS BIT(29)
+#define MT_RXINFO_TCP_SUM_ERR BIT(30)
+#define MT_RXINFO_IP_SUM_ERR BIT(31)
+
+#define MT_RXWI_CTL_WCID GENMASK(7, 0)
+#define MT_RXWI_CTL_KEY_IDX GENMASK(9, 8)
+#define MT_RXWI_CTL_BSS_IDX GENMASK(12, 10)
+#define MT_RXWI_CTL_UDF GENMASK(15, 13)
+#define MT_RXWI_CTL_MPDU_LEN GENMASK(27, 16)
+#define MT_RXWI_CTL_TID GENMASK(31, 28)
+
+#define MT_RXWI_FRAG GENMASK(3, 0)
+#define MT_RXWI_SN GENMASK(15, 4)
+
+#define MT_RXWI_RATE_MCS GENMASK(6, 0)
+#define MT_RXWI_RATE_BW BIT(7)
+#define MT_RXWI_RATE_SGI BIT(8)
+#define MT_RXWI_RATE_STBC GENMASK(10, 9)
+#define MT_RXWI_RATE_ETXBF BIT(11)
+#define MT_RXWI_RATE_SND BIT(12)
+#define MT_RXWI_RATE_ITXBF BIT(13)
+#define MT_RXWI_RATE_PHY GENMASK(15, 14)
+
+#define MT_RXWI_GAIN_RSSI_VAL GENMASK(5, 0)
+#define MT_RXWI_GAIN_RSSI_LNA_ID GENMASK(7, 6)
+#define MT_RXWI_ANT_AUX_LNA BIT(7)
+
+#define MT_RXWI_EANT_ENC_ANT_ID GENMASK(7, 0)
+
+enum mt76_phy_type {
+ MT_PHY_TYPE_CCK,
+ MT_PHY_TYPE_OFDM,
+ MT_PHY_TYPE_HT,
+ MT_PHY_TYPE_HT_GF,
+};
+
+enum mt76_phy_bandwidth {
+ MT_PHY_BW_20,
+ MT_PHY_BW_40,
+};
+
+struct mt76_txwi {
+ __le16 flags;
+ __le16 rate_ctl;
+
+ u8 ack_ctl;
+ u8 wcid;
+ __le16 len_ctl;
+
+ __le32 iv;
+
+ __le32 eiv;
+
+ u8 aid;
+ u8 txstream;
+ __le16 ctl;
+} __packed __aligned(4);
+
+#define MT_TXWI_FLAGS_FRAG BIT(0)
+#define MT_TXWI_FLAGS_MMPS BIT(1)
+#define MT_TXWI_FLAGS_CFACK BIT(2)
+#define MT_TXWI_FLAGS_TS BIT(3)
+#define MT_TXWI_FLAGS_AMPDU BIT(4)
+#define MT_TXWI_FLAGS_MPDU_DENSITY GENMASK(7, 5)
+#define MT_TXWI_FLAGS_TXOP GENMASK(9, 8)
+#define MT_TXWI_FLAGS_CWMIN GENMASK(12, 10)
+#define MT_TXWI_FLAGS_NO_RATE_FALLBACK BIT(13)
+#define MT_TXWI_FLAGS_TX_RPT BIT(14)
+#define MT_TXWI_FLAGS_TX_RATE_LUT BIT(15)
+
+#define MT_TXWI_RATE_MCS GENMASK(6, 0)
+#define MT_TXWI_RATE_BW BIT(7)
+#define MT_TXWI_RATE_SGI BIT(8)
+#define MT_TXWI_RATE_STBC GENMASK(10, 9)
+#define MT_TXWI_RATE_PHY_MODE GENMASK(15, 14)
+
+#define MT_TXWI_ACK_CTL_REQ BIT(0)
+#define MT_TXWI_ACK_CTL_NSEQ BIT(1)
+#define MT_TXWI_ACK_CTL_BA_WINDOW GENMASK(7, 2)
+
+#define MT_TXWI_LEN_BYTE_CNT GENMASK(11, 0)
+#define MT_TXWI_LEN_PKTID GENMASK(15, 12)
+
+#define MT_TXWI_CTL_TX_POWER_ADJ GENMASK(3, 0)
+#define MT_TXWI_CTL_CHAN_CHECK_PKT BIT(4)
+#define MT_TXWI_CTL_PIFS_REV BIT(6)
+
+u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb,
+ u8 *data, void *rxi);
+int mt76_mac_wcid_set_key(struct mt7601u_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key);
+void mt76_mac_wcid_set_rate(struct mt7601u_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate);
+
+int mt76_mac_shared_key_setup(struct mt7601u_dev *dev, u8 vif_idx, u8 key_idx,
+ struct ieee80211_key_conf *key);
+u16 mt76_mac_tx_rate_val(struct mt7601u_dev *dev,
+ const struct ieee80211_tx_rate *rate, u8 *nss_val);
+struct mt76_tx_status
+mt7601u_mac_fetch_tx_status(struct mt7601u_dev *dev);
+void mt76_send_tx_status(struct mt7601u_dev *dev, struct mt76_tx_status *stat);
+void mt7601u_set_macaddr(struct mt7601u_dev *dev, const u8 *addr);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
new file mode 100644
index 000000000..671d8897a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#include "mt7601u.h"
+#include "mac.h"
+#include <linux/etherdevice.h>
+
+static int mt7601u_start(struct ieee80211_hw *hw)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ int ret;
+
+ mutex_lock(&dev->mutex);
+
+ ret = mt7601u_mac_start(dev);
+ if (ret)
+ goto out;
+
+ ieee80211_queue_delayed_work(dev->hw, &dev->mac_work,
+ MT_CALIBRATE_INTERVAL);
+ ieee80211_queue_delayed_work(dev->hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+out:
+ mutex_unlock(&dev->mutex);
+ return ret;
+}
+
+static void mt7601u_stop(struct ieee80211_hw *hw)
+{
+ struct mt7601u_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+
+ cancel_delayed_work_sync(&dev->cal_work);
+ cancel_delayed_work_sync(&dev->mac_work);
+ mt7601u_mac_stop(dev);
+
+ mutex_unlock(&dev->mutex);
+}
+
+static int mt7601u_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ unsigned int idx = 0;
+ unsigned int wcid = GROUP_WCID(idx);
+
+ /* Note: for AP do the AP-STA things mt76 does:
+ * - beacon offsets
+ * - do mac address tricks
+ * - shift vif idx
+ */
+ mvif->idx = idx;
+
+ if (!ether_addr_equal(dev->macaddr, vif->addr))
+ mt7601u_set_macaddr(dev, vif->addr);
+
+ if (dev->wcid_mask[wcid / BITS_PER_LONG] & BIT(wcid % BITS_PER_LONG))
+ return -ENOSPC;
+ dev->wcid_mask[wcid / BITS_PER_LONG] |= BIT(wcid % BITS_PER_LONG);
+ mvif->group_wcid.idx = wcid;
+ mvif->group_wcid.hw_key_idx = -1;
+
+ return 0;
+}
+
+static void mt7601u_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ unsigned int wcid = mvif->group_wcid.idx;
+
+ dev->wcid_mask[wcid / BITS_PER_LONG] &= ~BIT(wcid % BITS_PER_LONG);
+}
+
+static int mt7601u_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ int ret = 0;
+
+ mutex_lock(&dev->mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ieee80211_stop_queues(hw);
+ ret = mt7601u_phy_set_channel(dev, &hw->conf.chandef);
+ ieee80211_wake_queues(hw);
+ }
+
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+
+static void
+mt76_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+ flags |= *total_flags & FIF_##_flag; \
+ dev->rxfilter &= ~(_hw); \
+ dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ } while (0)
+
+ mutex_lock(&dev->mutex);
+
+ dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
+
+ MT76_FILTER(OTHER_BSS, MT_RX_FILTR_CFG_PROMISC);
+ MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
+ MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
+ MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
+ MT_RX_FILTR_CFG_CTS |
+ MT_RX_FILTR_CFG_CFEND |
+ MT_RX_FILTR_CFG_CFACK |
+ MT_RX_FILTR_CFG_BA |
+ MT_RX_FILTR_CFG_CTRL_RSV);
+ MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
+
+ *total_flags = flags;
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+ mutex_unlock(&dev->mutex);
+}
+
+static void
+mt7601u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
+{
+ struct mt7601u_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+
+ if (changed & BSS_CHANGED_ASSOC)
+ mt7601u_phy_con_cal_onoff(dev, info);
+
+ if (changed & BSS_CHANGED_BSSID) {
+ mt7601u_addr_wr(dev, MT_MAC_BSSID_DW0, info->bssid);
+
+ /* Note: this is a hack because beacon_int is not changed
+ * on leave nor is any more appropriate event generated.
+ * rt2x00 doesn't seem to be bothered though.
+ */
+ if (is_zero_ether_addr(info->bssid))
+ mt7601u_mac_config_tsf(dev, false, 0);
+ }
+
+ if (changed & BSS_CHANGED_BASIC_RATES) {
+ mt7601u_wr(dev, MT_LEGACY_BASIC_RATE, info->basic_rates);
+ mt7601u_wr(dev, MT_HT_FBK_CFG0, 0x65432100);
+ mt7601u_wr(dev, MT_HT_FBK_CFG1, 0xedcba980);
+ mt7601u_wr(dev, MT_LG_FBK_CFG0, 0xedcba988);
+ mt7601u_wr(dev, MT_LG_FBK_CFG1, 0x00002100);
+ }
+
+ if (changed & BSS_CHANGED_BEACON_INT)
+ mt7601u_mac_config_tsf(dev, true, info->beacon_int);
+
+ if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT)
+ mt7601u_mac_set_protection(dev, info->use_cts_prot,
+ info->ht_operation_mode);
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE)
+ mt7601u_mac_set_short_preamble(dev, info->use_short_preamble);
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ int slottime = info->use_short_slot ? 9 : 20;
+
+ mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
+ MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC)
+ mt7601u_phy_recalibrate_after_assoc(dev);
+
+ mutex_unlock(&dev->mutex);
+}
+
+static int
+mt76_wcid_alloc(struct mt7601u_dev *dev)
+{
+ int i, idx = 0;
+
+ for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
+ idx = ffs(~dev->wcid_mask[i]);
+ if (!idx)
+ continue;
+
+ idx--;
+ dev->wcid_mask[i] |= BIT(idx);
+ break;
+ }
+
+ idx = i * BITS_PER_LONG + idx;
+ if (idx > 119)
+ return -1;
+
+ return idx;
+}
+
+static int
+mt7601u_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ int ret = 0;
+ int idx = 0;
+
+ mutex_lock(&dev->mutex);
+
+ idx = mt76_wcid_alloc(dev);
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ msta->wcid.idx = idx;
+ msta->wcid.hw_key_idx = -1;
+ mt7601u_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
+ mt76_clear(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
+ rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
+ mt7601u_mac_set_ampdu_factor(dev);
+
+out:
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+
+static int
+mt7601u_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+ int idx = msta->wcid.idx;
+
+ mutex_lock(&dev->mutex);
+ rcu_assign_pointer(dev->wcid[idx], NULL);
+ mt76_set(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
+ dev->wcid_mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG);
+ mt7601u_mac_wcid_setup(dev, idx, 0, NULL);
+ mt7601u_mac_set_ampdu_factor(dev);
+ mutex_unlock(&dev->mutex);
+
+ return 0;
+}
+
+static void
+mt7601u_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
+{
+}
+
+static void
+mt7601u_sw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
+{
+ struct mt7601u_dev *dev = hw->priv;
+
+ mt7601u_agc_save(dev);
+ set_bit(MT7601U_STATE_SCANNING, &dev->state);
+}
+
+static void
+mt7601u_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7601u_dev *dev = hw->priv;
+
+ mt7601u_agc_restore(dev);
+ clear_bit(MT7601U_STATE_SCANNING, &dev->state);
+
+ ieee80211_queue_delayed_work(dev->hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+ if (dev->freq_cal.enabled)
+ ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work,
+ MT_FREQ_CAL_INIT_DELAY);
+}
+
+static int
+mt7601u_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ struct mt76_sta *msta = sta ? (struct mt76_sta *) sta->drv_priv : NULL;
+ struct mt76_wcid *wcid = msta ? &msta->wcid : &mvif->group_wcid;
+ int idx = key->keyidx;
+ int ret;
+
+ /* fall back to sw encryption for unsupported ciphers */
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (cmd == SET_KEY) {
+ key->hw_key_idx = wcid->idx;
+ wcid->hw_key_idx = idx;
+ } else {
+ if (idx == wcid->hw_key_idx)
+ wcid->hw_key_idx = -1;
+
+ key = NULL;
+ }
+
+ if (!msta) {
+ if (key || wcid->hw_key_idx == idx) {
+ ret = mt76_mac_wcid_set_key(dev, wcid->idx, key);
+ if (ret)
+ return ret;
+ }
+
+ return mt76_mac_shared_key_setup(dev, mvif->idx, idx, key);
+ }
+
+ return mt76_mac_wcid_set_key(dev, msta->wcid.idx, key);
+}
+
+static int mt7601u_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct mt7601u_dev *dev = hw->priv;
+
+ mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, value);
+
+ return 0;
+}
+
+static int
+mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 ssn = params->ssn;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+
+ WARN_ON(msta->wcid.idx > GROUP_WCID(0));
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
+ BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ieee80211_send_bar(vif, sta->addr, tid, msta->agg_ssn[tid]);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ msta->agg_ssn[tid] = ssn << 4;
+ return IEEE80211_AMPDU_TX_START_IMMEDIATE;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
+
+ return 0;
+}
+
+static void
+mt76_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+ struct ieee80211_sta_rates *rates;
+ struct ieee80211_tx_rate rate = {};
+
+ rcu_read_lock();
+ rates = rcu_dereference(sta->rates);
+
+ if (!rates)
+ goto out;
+
+ rate.idx = rates->rate[0].idx;
+ rate.flags = rates->rate[0].flags;
+ mt76_mac_wcid_set_rate(dev, &msta->wcid, &rate);
+
+out:
+ rcu_read_unlock();
+}
+
+const struct ieee80211_ops mt7601u_ops = {
+ .tx = mt7601u_tx,
+ .start = mt7601u_start,
+ .stop = mt7601u_stop,
+ .add_interface = mt7601u_add_interface,
+ .remove_interface = mt7601u_remove_interface,
+ .config = mt7601u_config,
+ .configure_filter = mt76_configure_filter,
+ .bss_info_changed = mt7601u_bss_info_changed,
+ .sta_add = mt7601u_sta_add,
+ .sta_remove = mt7601u_sta_remove,
+ .sta_notify = mt7601u_sta_notify,
+ .set_key = mt7601u_set_key,
+ .conf_tx = mt7601u_conf_tx,
+ .sw_scan_start = mt7601u_sw_scan,
+ .sw_scan_complete = mt7601u_sw_scan_complete,
+ .ampdu_action = mt76_ampdu_action,
+ .sta_rate_tbl_update = mt76_sta_rate_tbl_update,
+ .set_rts_threshold = mt7601u_set_rts_threshold,
+};
diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.c b/drivers/net/wireless/mediatek/mt7601u/mcu.c
new file mode 100644
index 000000000..1b5cc271a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/mcu.c
@@ -0,0 +1,535 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/usb.h>
+#include <linux/skbuff.h>
+
+#include "mt7601u.h"
+#include "dma.h"
+#include "mcu.h"
+#include "usb.h"
+#include "trace.h"
+
+#define MCU_FW_URB_MAX_PAYLOAD 0x3800
+#define MCU_FW_URB_SIZE (MCU_FW_URB_MAX_PAYLOAD + 12)
+#define MCU_RESP_URB_SIZE 1024
+
+static inline int firmware_running(struct mt7601u_dev *dev)
+{
+ return mt7601u_rr(dev, MT_MCU_COM_REG0) == 1;
+}
+
+static inline void skb_put_le32(struct sk_buff *skb, u32 val)
+{
+ put_unaligned_le32(val, skb_put(skb, 4));
+}
+
+static inline void mt7601u_dma_skb_wrap_cmd(struct sk_buff *skb,
+ u8 seq, enum mcu_cmd cmd)
+{
+ WARN_ON(mt7601u_dma_skb_wrap(skb, CPU_TX_PORT, DMA_COMMAND,
+ FIELD_PREP(MT_TXD_CMD_INFO_SEQ, seq) |
+ FIELD_PREP(MT_TXD_CMD_INFO_TYPE, cmd)));
+}
+
+static inline void trace_mt_mcu_msg_send_cs(struct mt7601u_dev *dev,
+ struct sk_buff *skb, bool need_resp)
+{
+ u32 i, csum = 0;
+
+ for (i = 0; i < skb->len / 4; i++)
+ csum ^= get_unaligned_le32(skb->data + i * 4);
+
+ trace_mt_mcu_msg_send(dev, skb, csum, need_resp);
+}
+
+static struct sk_buff *mt7601u_mcu_msg_alloc(const void *data, int len)
+{
+ struct sk_buff *skb;
+
+ WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */
+
+ skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (skb) {
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+ skb_put_data(skb, data, len);
+ }
+
+ return skb;
+}
+
+static int mt7601u_mcu_wait_resp(struct mt7601u_dev *dev, u8 seq)
+{
+ struct urb *urb = dev->mcu.resp.urb;
+ u32 rxfce;
+ int urb_status, ret, i = 5;
+
+ while (i--) {
+ if (!wait_for_completion_timeout(&dev->mcu.resp_cmpl,
+ msecs_to_jiffies(300))) {
+ dev_warn(dev->dev, "Warning: %s retrying\n", __func__);
+ continue;
+ }
+
+ /* Make copies of important data before reusing the urb */
+ rxfce = get_unaligned_le32(dev->mcu.resp.buf);
+ urb_status = urb->status * mt7601u_urb_has_error(urb);
+
+ ret = mt7601u_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
+ &dev->mcu.resp, GFP_KERNEL,
+ mt7601u_complete_urb,
+ &dev->mcu.resp_cmpl);
+ if (ret)
+ return ret;
+
+ if (urb_status)
+ dev_err(dev->dev, "Error: MCU resp urb failed:%d\n",
+ urb_status);
+
+ if (FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq &&
+ FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE)
+ return 0;
+
+ dev_err(dev->dev, "Error: MCU resp evt:%lx seq:%hhx-%lx!\n",
+ FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce),
+ seq, FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce));
+ }
+
+ dev_err(dev->dev, "Error: %s timed out\n", __func__);
+ return -ETIMEDOUT;
+}
+
+static int
+mt7601u_mcu_msg_send(struct mt7601u_dev *dev, struct sk_buff *skb,
+ enum mcu_cmd cmd, bool wait_resp)
+{
+ struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+ unsigned cmd_pipe = usb_sndbulkpipe(usb_dev,
+ dev->out_eps[MT_EP_OUT_INBAND_CMD]);
+ int sent, ret;
+ u8 seq = 0;
+
+ if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) {
+ consume_skb(skb);
+ return 0;
+ }
+
+ mutex_lock(&dev->mcu.mutex);
+
+ if (wait_resp)
+ while (!seq)
+ seq = ++dev->mcu.msg_seq & 0xf;
+
+ mt7601u_dma_skb_wrap_cmd(skb, seq, cmd);
+
+ if (dev->mcu.resp_cmpl.done)
+ dev_err(dev->dev, "Error: MCU response pre-completed!\n");
+
+ trace_mt_mcu_msg_send_cs(dev, skb, wait_resp);
+ trace_mt_submit_urb_sync(dev, cmd_pipe, skb->len);
+ ret = usb_bulk_msg(usb_dev, cmd_pipe, skb->data, skb->len, &sent, 500);
+ if (ret) {
+ dev_err(dev->dev, "Error: send MCU cmd failed:%d\n", ret);
+ goto out;
+ }
+ if (sent != skb->len)
+ dev_err(dev->dev, "Error: %s sent != skb->len\n", __func__);
+
+ if (wait_resp)
+ ret = mt7601u_mcu_wait_resp(dev, seq);
+out:
+ mutex_unlock(&dev->mcu.mutex);
+
+ consume_skb(skb);
+
+ return ret;
+}
+
+static int mt7601u_mcu_function_select(struct mt7601u_dev *dev,
+ enum mcu_function func, u32 val)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(func),
+ .value = cpu_to_le32(val),
+ };
+
+ skb = mt7601u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt7601u_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5);
+}
+
+int mt7601u_mcu_tssi_read_kick(struct mt7601u_dev *dev, int use_hvga)
+{
+ int ret;
+
+ if (!test_bit(MT7601U_STATE_MCU_RUNNING, &dev->state))
+ return 0;
+
+ ret = mt7601u_mcu_function_select(dev, ATOMIC_TSSI_SETTING,
+ use_hvga);
+ if (ret) {
+ dev_warn(dev->dev, "Warning: MCU TSSI read kick failed\n");
+ return ret;
+ }
+
+ dev->tssi_read_trig = true;
+
+ return 0;
+}
+
+int
+mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(cal),
+ .value = cpu_to_le32(val),
+ };
+
+ skb = mt7601u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt7601u_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true);
+}
+
+int mt7601u_write_reg_pairs(struct mt7601u_dev *dev, u32 base,
+ const struct mt76_reg_pair *data, int n)
+{
+ const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8;
+ struct sk_buff *skb;
+ int cnt, i, ret;
+
+ if (!n)
+ return 0;
+
+ cnt = min(max_vals_per_cmd, n);
+
+ skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+
+ for (i = 0; i < cnt; i++) {
+ skb_put_le32(skb, base + data[i].reg);
+ skb_put_le32(skb, data[i].value);
+ }
+
+ ret = mt7601u_mcu_msg_send(dev, skb, CMD_RANDOM_WRITE, cnt == n);
+ if (ret)
+ return ret;
+
+ return mt7601u_write_reg_pairs(dev, base, data + cnt, n - cnt);
+}
+
+int mt7601u_burst_write_regs(struct mt7601u_dev *dev, u32 offset,
+ const u32 *data, int n)
+{
+ const int max_regs_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1;
+ struct sk_buff *skb;
+ int cnt, i, ret;
+
+ if (!n)
+ return 0;
+
+ cnt = min(max_regs_per_cmd, n);
+
+ skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+
+ skb_put_le32(skb, MT_MCU_MEMMAP_WLAN + offset);
+ for (i = 0; i < cnt; i++)
+ skb_put_le32(skb, data[i]);
+
+ ret = mt7601u_mcu_msg_send(dev, skb, CMD_BURST_WRITE, cnt == n);
+ if (ret)
+ return ret;
+
+ return mt7601u_burst_write_regs(dev, offset + cnt * 4,
+ data + cnt, n - cnt);
+}
+
+struct mt76_fw_header {
+ __le32 ilm_len;
+ __le32 dlm_len;
+ __le16 build_ver;
+ __le16 fw_ver;
+ u8 pad[4];
+ char build_time[16];
+};
+
+struct mt76_fw {
+ struct mt76_fw_header hdr;
+ u8 ivb[MT_MCU_IVB_SIZE];
+ u8 ilm[];
+};
+
+static int __mt7601u_dma_fw(struct mt7601u_dev *dev,
+ const struct mt7601u_dma_buf *dma_buf,
+ const void *data, u32 len, u32 dst_addr)
+{
+ DECLARE_COMPLETION_ONSTACK(cmpl);
+ struct mt7601u_dma_buf buf = *dma_buf; /* we need to fake length */
+ __le32 reg;
+ u32 val;
+ int ret;
+
+ reg = cpu_to_le32(FIELD_PREP(MT_TXD_INFO_TYPE, DMA_PACKET) |
+ FIELD_PREP(MT_TXD_INFO_D_PORT, CPU_TX_PORT) |
+ FIELD_PREP(MT_TXD_INFO_LEN, len));
+ memcpy(buf.buf, &reg, sizeof(reg));
+ memcpy(buf.buf + sizeof(reg), data, len);
+ memset(buf.buf + sizeof(reg) + len, 0, 8);
+
+ ret = mt7601u_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
+ MT_FCE_DMA_ADDR, dst_addr);
+ if (ret)
+ return ret;
+ len = roundup(len, 4);
+ ret = mt7601u_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
+ MT_FCE_DMA_LEN, len << 16);
+ if (ret)
+ return ret;
+
+ buf.len = MT_DMA_HDR_LEN + len + 4;
+ ret = mt7601u_usb_submit_buf(dev, USB_DIR_OUT, MT_EP_OUT_INBAND_CMD,
+ &buf, GFP_KERNEL,
+ mt7601u_complete_urb, &cmpl);
+ if (ret)
+ return ret;
+
+ if (!wait_for_completion_timeout(&cmpl, msecs_to_jiffies(1000))) {
+ dev_err(dev->dev, "Error: firmware upload timed out\n");
+ usb_kill_urb(buf.urb);
+ return -ETIMEDOUT;
+ }
+ if (mt7601u_urb_has_error(buf.urb)) {
+ dev_err(dev->dev, "Error: firmware upload urb failed:%d\n",
+ buf.urb->status);
+ return buf.urb->status;
+ }
+
+ val = mt7601u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
+ val++;
+ mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
+
+ return 0;
+}
+
+static int
+mt7601u_dma_fw(struct mt7601u_dev *dev, struct mt7601u_dma_buf *dma_buf,
+ const void *data, int len, u32 dst_addr)
+{
+ int n, ret;
+
+ if (len == 0)
+ return 0;
+
+ n = min(MCU_FW_URB_MAX_PAYLOAD, len);
+ ret = __mt7601u_dma_fw(dev, dma_buf, data, n, dst_addr);
+ if (ret)
+ return ret;
+
+ if (!mt76_poll_msec(dev, MT_MCU_COM_REG1, BIT(31), BIT(31), 500))
+ return -ETIMEDOUT;
+
+ return mt7601u_dma_fw(dev, dma_buf, data + n, len - n, dst_addr + n);
+}
+
+static int
+mt7601u_upload_firmware(struct mt7601u_dev *dev, const struct mt76_fw *fw)
+{
+ struct mt7601u_dma_buf dma_buf;
+ void *ivb;
+ u32 ilm_len, dlm_len;
+ int i, ret;
+
+ ivb = kmemdup(fw->ivb, sizeof(fw->ivb), GFP_KERNEL);
+ if (!ivb)
+ return -ENOMEM;
+ if (mt7601u_usb_alloc_buf(dev, MCU_FW_URB_SIZE, &dma_buf)) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ilm_len = le32_to_cpu(fw->hdr.ilm_len) - sizeof(fw->ivb);
+ dev_dbg(dev->dev, "loading FW - ILM %u + IVB %zu\n",
+ ilm_len, sizeof(fw->ivb));
+ ret = mt7601u_dma_fw(dev, &dma_buf, fw->ilm, ilm_len, sizeof(fw->ivb));
+ if (ret)
+ goto error;
+
+ dlm_len = le32_to_cpu(fw->hdr.dlm_len);
+ dev_dbg(dev->dev, "loading FW - DLM %u\n", dlm_len);
+ ret = mt7601u_dma_fw(dev, &dma_buf, fw->ilm + ilm_len,
+ dlm_len, MT_MCU_DLM_OFFSET);
+ if (ret)
+ goto error;
+
+ ret = mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
+ 0x12, 0, ivb, sizeof(fw->ivb));
+ if (ret < 0)
+ goto error;
+ ret = 0;
+
+ for (i = 100; i && !firmware_running(dev); i--)
+ msleep(10);
+ if (!i) {
+ ret = -ETIMEDOUT;
+ goto error;
+ }
+
+ dev_dbg(dev->dev, "Firmware running!\n");
+error:
+ kfree(ivb);
+ mt7601u_usb_free_buf(dev, &dma_buf);
+
+ return ret;
+}
+
+static int mt7601u_load_firmware(struct mt7601u_dev *dev)
+{
+ const struct firmware *fw;
+ const struct mt76_fw_header *hdr;
+ int len, ret;
+ u32 val;
+
+ mt7601u_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN));
+
+ if (firmware_running(dev))
+ return firmware_request_cache(dev->dev, MT7601U_FIRMWARE);
+
+ ret = request_firmware(&fw, MT7601U_FIRMWARE, dev->dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr))
+ goto err_inv_fw;
+
+ hdr = (const struct mt76_fw_header *) fw->data;
+
+ if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE)
+ goto err_inv_fw;
+
+ len = sizeof(*hdr);
+ len += le32_to_cpu(hdr->ilm_len);
+ len += le32_to_cpu(hdr->dlm_len);
+
+ if (fw->size != len)
+ goto err_inv_fw;
+
+ val = le16_to_cpu(hdr->fw_ver);
+ dev_info(dev->dev,
+ "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n",
+ (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf,
+ le16_to_cpu(hdr->build_ver), hdr->build_time);
+
+ len = le32_to_cpu(hdr->ilm_len);
+
+ mt7601u_wr(dev, 0x94c, 0);
+ mt7601u_wr(dev, MT_FCE_PSE_CTRL, 0);
+
+ mt7601u_vendor_reset(dev);
+ msleep(5);
+
+ mt7601u_wr(dev, 0xa44, 0);
+ mt7601u_wr(dev, 0x230, 0x84210);
+ mt7601u_wr(dev, 0x400, 0x80c00);
+ mt7601u_wr(dev, 0x800, 1);
+
+ mt7601u_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN |
+ MT_PBF_CFG_TX1Q_EN |
+ MT_PBF_CFG_TX2Q_EN |
+ MT_PBF_CFG_TX3Q_EN));
+
+ mt7601u_wr(dev, MT_FCE_PSE_CTRL, 1);
+
+ mt7601u_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN));
+ val = mt76_set(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_CLR);
+ val &= ~MT_USB_DMA_CFG_TX_CLR;
+ mt7601u_wr(dev, MT_USB_DMA_CFG, val);
+
+ /* FCE tx_fs_base_ptr */
+ mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+ /* FCE tx_fs_max_cnt */
+ mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1);
+ /* FCE pdma enable */
+ mt7601u_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+ /* FCE skip_fs_en */
+ mt7601u_wr(dev, MT_FCE_SKIP_FS, 3);
+
+ ret = mt7601u_upload_firmware(dev, (const struct mt76_fw *)fw->data);
+
+ release_firmware(fw);
+
+ return ret;
+
+err_inv_fw:
+ dev_err(dev->dev, "Invalid firmware image\n");
+ release_firmware(fw);
+ return -ENOENT;
+}
+
+int mt7601u_mcu_init(struct mt7601u_dev *dev)
+{
+ int ret;
+
+ mutex_init(&dev->mcu.mutex);
+
+ ret = mt7601u_load_firmware(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT7601U_STATE_MCU_RUNNING, &dev->state);
+
+ return 0;
+}
+
+int mt7601u_mcu_cmd_init(struct mt7601u_dev *dev)
+{
+ int ret;
+
+ ret = mt7601u_mcu_function_select(dev, Q_SELECT, 1);
+ if (ret)
+ return ret;
+
+ init_completion(&dev->mcu.resp_cmpl);
+ if (mt7601u_usb_alloc_buf(dev, MCU_RESP_URB_SIZE, &dev->mcu.resp)) {
+ mt7601u_usb_free_buf(dev, &dev->mcu.resp);
+ return -ENOMEM;
+ }
+
+ ret = mt7601u_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
+ &dev->mcu.resp, GFP_KERNEL,
+ mt7601u_complete_urb, &dev->mcu.resp_cmpl);
+ if (ret) {
+ mt7601u_usb_free_buf(dev, &dev->mcu.resp);
+ return ret;
+ }
+
+ return 0;
+}
+
+void mt7601u_mcu_cmd_deinit(struct mt7601u_dev *dev)
+{
+ usb_kill_urb(dev->mcu.resp.urb);
+ mt7601u_usb_free_buf(dev, &dev->mcu.resp);
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.h b/drivers/net/wireless/mediatek/mt7601u/mcu.h
new file mode 100644
index 000000000..56cdd31aa
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/mcu.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#ifndef __MT7601U_MCU_H
+#define __MT7601U_MCU_H
+
+struct mt7601u_dev;
+
+/* Register definitions */
+#define MT_MCU_RESET_CTL 0x070C
+#define MT_MCU_INT_LEVEL 0x0718
+#define MT_MCU_COM_REG0 0x0730
+#define MT_MCU_COM_REG1 0x0734
+#define MT_MCU_COM_REG2 0x0738
+#define MT_MCU_COM_REG3 0x073C
+
+#define MT_MCU_IVB_SIZE 0x40
+#define MT_MCU_DLM_OFFSET 0x80000
+
+#define MT_MCU_MEMMAP_WLAN 0x00410000
+#define MT_MCU_MEMMAP_BBP 0x40000000
+#define MT_MCU_MEMMAP_RF 0x80000000
+
+#define INBAND_PACKET_MAX_LEN 192
+
+enum mcu_cmd {
+ CMD_FUN_SET_OP = 1,
+ CMD_LOAD_CR = 2,
+ CMD_INIT_GAIN_OP = 3,
+ CMD_DYNC_VGA_OP = 6,
+ CMD_TDLS_CH_SW = 7,
+ CMD_BURST_WRITE = 8,
+ CMD_READ_MODIFY_WRITE = 9,
+ CMD_RANDOM_READ = 10,
+ CMD_BURST_READ = 11,
+ CMD_RANDOM_WRITE = 12,
+ CMD_LED_MODE_OP = 16,
+ CMD_POWER_SAVING_OP = 20,
+ CMD_WOW_CONFIG = 21,
+ CMD_WOW_QUERY = 22,
+ CMD_WOW_FEATURE = 24,
+ CMD_CARRIER_DETECT_OP = 28,
+ CMD_RADOR_DETECT_OP = 29,
+ CMD_SWITCH_CHANNEL_OP = 30,
+ CMD_CALIBRATION_OP = 31,
+ CMD_BEACON_OP = 32,
+ CMD_ANTENNA_OP = 33,
+};
+
+enum mcu_function {
+ Q_SELECT = 1,
+ ATOMIC_TSSI_SETTING = 5,
+};
+
+enum mcu_power_mode {
+ RADIO_OFF = 0x30,
+ RADIO_ON = 0x31,
+ RADIO_OFF_AUTO_WAKEUP = 0x32,
+ RADIO_OFF_ADVANCE = 0x33,
+ RADIO_ON_ADVANCE = 0x34,
+};
+
+enum mcu_calibrate {
+ MCU_CAL_R = 1,
+ MCU_CAL_DCOC,
+ MCU_CAL_LC,
+ MCU_CAL_LOFT,
+ MCU_CAL_TXIQ,
+ MCU_CAL_BW,
+ MCU_CAL_DPD,
+ MCU_CAL_RXIQ,
+ MCU_CAL_TXDCOC,
+};
+
+int mt7601u_mcu_init(struct mt7601u_dev *dev);
+int mt7601u_mcu_cmd_init(struct mt7601u_dev *dev);
+void mt7601u_mcu_cmd_deinit(struct mt7601u_dev *dev);
+
+int
+mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val);
+int mt7601u_mcu_tssi_read_kick(struct mt7601u_dev *dev, int use_hvga);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
new file mode 100644
index 000000000..a122f1dd3
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
@@ -0,0 +1,392 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#ifndef MT7601U_H
+#define MT7601U_H
+
+#include <linux/bitfield.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/usb.h>
+#include <linux/completion.h>
+#include <net/mac80211.h>
+#include <linux/debugfs.h>
+#include <linux/average.h>
+
+#include "regs.h"
+
+#define MT_CALIBRATE_INTERVAL (4 * HZ)
+
+#define MT_FREQ_CAL_INIT_DELAY (30 * HZ)
+#define MT_FREQ_CAL_CHECK_INTERVAL (10 * HZ)
+#define MT_FREQ_CAL_ADJ_INTERVAL (HZ / 2)
+
+#define MT_BBP_REG_VERSION 0x00
+
+#define MT_USB_AGGR_SIZE_LIMIT 28 /* * 1024B */
+#define MT_USB_AGGR_TIMEOUT 0x80 /* * 33ns */
+#define MT_RX_ORDER 3
+#define MT_RX_URB_SIZE (PAGE_SIZE << MT_RX_ORDER)
+
+struct mt7601u_dma_buf {
+ struct urb *urb;
+ void *buf;
+ dma_addr_t dma;
+ size_t len;
+};
+
+struct mt7601u_mcu {
+ struct mutex mutex;
+
+ u8 msg_seq;
+
+ struct mt7601u_dma_buf resp;
+ struct completion resp_cmpl;
+};
+
+struct mt7601u_freq_cal {
+ struct delayed_work work;
+ u8 freq;
+ bool enabled;
+ bool adjusting;
+};
+
+struct mac_stats {
+ u64 rx_stat[6];
+ u64 tx_stat[6];
+ u64 aggr_stat[2];
+ u64 aggr_n[32];
+ u64 zero_len_del[2];
+};
+
+#define N_RX_ENTRIES 16
+struct mt7601u_rx_queue {
+ struct mt7601u_dev *dev;
+
+ struct mt7601u_dma_buf_rx {
+ struct urb *urb;
+ struct page *p;
+ } e[N_RX_ENTRIES];
+
+ unsigned int start;
+ unsigned int end;
+ unsigned int entries;
+ unsigned int pending;
+};
+
+#define N_TX_ENTRIES 64
+
+struct mt7601u_tx_queue {
+ struct mt7601u_dev *dev;
+
+ struct mt7601u_dma_buf_tx {
+ struct urb *urb;
+ struct sk_buff *skb;
+ } e[N_TX_ENTRIES];
+
+ unsigned int start;
+ unsigned int end;
+ unsigned int entries;
+ unsigned int used;
+ unsigned int fifo_seq;
+};
+
+/* WCID allocation:
+ * 0: mcast wcid
+ * 1: bssid wcid
+ * 1...: STAs
+ * ...7e: group wcids
+ * 7f: reserved
+ */
+#define N_WCIDS 128
+#define GROUP_WCID(idx) (N_WCIDS - 2 - idx)
+
+struct mt7601u_eeprom_params;
+
+#define MT_EE_TEMPERATURE_SLOPE 39
+#define MT_FREQ_OFFSET_INVALID -128
+
+enum mt_temp_mode {
+ MT_TEMP_MODE_NORMAL,
+ MT_TEMP_MODE_HIGH,
+ MT_TEMP_MODE_LOW,
+};
+
+enum mt_bw {
+ MT_BW_20,
+ MT_BW_40,
+};
+
+enum {
+ MT7601U_STATE_INITIALIZED,
+ MT7601U_STATE_REMOVED,
+ MT7601U_STATE_WLAN_RUNNING,
+ MT7601U_STATE_MCU_RUNNING,
+ MT7601U_STATE_SCANNING,
+ MT7601U_STATE_READING_STATS,
+ MT7601U_STATE_MORE_STATS,
+};
+
+DECLARE_EWMA(rssi, 10, 4);
+
+/**
+ * struct mt7601u_dev - adapter structure
+ * @lock: protects @wcid->tx_rate.
+ * @mac_lock: locks out mac80211's tx status and rx paths.
+ * @tx_lock: protects @tx_q and changes of MT7601U_STATE_*_STATS
+ * flags in @state.
+ * @rx_lock: protects @rx_q.
+ * @con_mon_lock: protects @ap_bssid, @bcn_*, @avg_rssi.
+ * @mutex: ensures exclusive access from mac80211 callbacks.
+ * @vendor_req_mutex: protects @vend_buf, ensures atomicity of read/write
+ * accesses
+ * @reg_atomic_mutex: ensures atomicity of indirect register accesses
+ * (accesses to RF and BBP).
+ * @hw_atomic_mutex: ensures exclusive access to HW during critical
+ * operations (power management, channel switch).
+ */
+struct mt7601u_dev {
+ struct ieee80211_hw *hw;
+ struct device *dev;
+
+ unsigned long state;
+
+ struct mutex mutex;
+
+ unsigned long wcid_mask[N_WCIDS / BITS_PER_LONG];
+
+ struct cfg80211_chan_def chandef;
+ struct ieee80211_supported_band *sband_2g;
+
+ struct mt7601u_mcu mcu;
+
+ struct delayed_work cal_work;
+ struct delayed_work mac_work;
+
+ struct workqueue_struct *stat_wq;
+ struct delayed_work stat_work;
+
+ struct mt76_wcid *mon_wcid;
+ struct mt76_wcid __rcu *wcid[N_WCIDS];
+
+ spinlock_t lock;
+ spinlock_t mac_lock;
+
+ const u16 *beacon_offsets;
+
+ u8 macaddr[ETH_ALEN];
+ struct mt7601u_eeprom_params *ee;
+
+ struct mutex vendor_req_mutex;
+ void *vend_buf;
+
+ struct mutex reg_atomic_mutex;
+ struct mutex hw_atomic_mutex;
+
+ u32 rxfilter;
+ u32 debugfs_reg;
+
+ u8 out_eps[8];
+ u8 in_eps[8];
+ u16 out_max_packet;
+ u16 in_max_packet;
+
+ /* TX */
+ spinlock_t tx_lock;
+ struct tasklet_struct tx_tasklet;
+ struct mt7601u_tx_queue *tx_q;
+ struct sk_buff_head tx_skb_done;
+
+ atomic_t avg_ampdu_len;
+
+ /* RX */
+ spinlock_t rx_lock;
+ struct tasklet_struct rx_tasklet;
+ struct mt7601u_rx_queue rx_q;
+
+ /* Connection monitoring things */
+ spinlock_t con_mon_lock;
+ u8 ap_bssid[ETH_ALEN];
+
+ s8 bcn_freq_off;
+ u8 bcn_phy_mode;
+
+ struct ewma_rssi avg_rssi;
+
+ u8 agc_save;
+
+ struct mt7601u_freq_cal freq_cal;
+
+ bool tssi_read_trig;
+
+ s8 tssi_init;
+ s8 tssi_init_hvga;
+ s16 tssi_init_hvga_offset_db;
+
+ int prev_pwr_diff;
+
+ enum mt_temp_mode temp_mode;
+ int curr_temp;
+ int dpd_temp;
+ s8 raw_temp;
+ bool pll_lock_protect;
+
+ u8 bw;
+ bool chan_ext_below;
+
+ /* PA mode */
+ u32 rf_pa_mode[2];
+
+ struct mac_stats stats;
+};
+
+struct mt7601u_tssi_params {
+ char tssi0;
+ int trgt_power;
+};
+
+struct mt76_wcid {
+ u8 idx;
+ u8 hw_key_idx;
+
+ u16 tx_rate;
+ bool tx_rate_set;
+ u8 tx_rate_nss;
+};
+
+struct mt76_vif {
+ u8 idx;
+
+ struct mt76_wcid group_wcid;
+};
+
+struct mt76_sta {
+ struct mt76_wcid wcid;
+ u16 agg_ssn[IEEE80211_NUM_TIDS];
+};
+
+struct mt76_reg_pair {
+ u32 reg;
+ u32 value;
+};
+
+struct mt7601u_rxwi;
+
+extern const struct ieee80211_ops mt7601u_ops;
+
+void mt7601u_init_debugfs(struct mt7601u_dev *dev);
+
+u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset);
+void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val);
+u32 mt7601u_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val);
+u32 mt7601u_rmc(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val);
+void mt7601u_wr_copy(struct mt7601u_dev *dev, u32 offset,
+ const void *data, int len);
+
+int mt7601u_wait_asic_ready(struct mt7601u_dev *dev);
+bool mt76_poll(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout);
+bool mt76_poll_msec(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout);
+
+/* Compatibility with mt76 */
+#define mt76_rmw_field(_dev, _reg, _field, _val) \
+ mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
+
+static inline u32 mt76_rr(struct mt7601u_dev *dev, u32 offset)
+{
+ return mt7601u_rr(dev, offset);
+}
+
+static inline void mt76_wr(struct mt7601u_dev *dev, u32 offset, u32 val)
+{
+ return mt7601u_wr(dev, offset, val);
+}
+
+static inline u32
+mt76_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val)
+{
+ return mt7601u_rmw(dev, offset, mask, val);
+}
+
+static inline u32 mt76_set(struct mt7601u_dev *dev, u32 offset, u32 val)
+{
+ return mt76_rmw(dev, offset, 0, val);
+}
+
+static inline u32 mt76_clear(struct mt7601u_dev *dev, u32 offset, u32 val)
+{
+ return mt76_rmw(dev, offset, val, 0);
+}
+
+int mt7601u_write_reg_pairs(struct mt7601u_dev *dev, u32 base,
+ const struct mt76_reg_pair *data, int len);
+int mt7601u_burst_write_regs(struct mt7601u_dev *dev, u32 offset,
+ const u32 *data, int n);
+void mt7601u_addr_wr(struct mt7601u_dev *dev, const u32 offset, const u8 *addr);
+
+/* Init */
+struct mt7601u_dev *mt7601u_alloc_device(struct device *dev);
+int mt7601u_init_hardware(struct mt7601u_dev *dev);
+int mt7601u_register_device(struct mt7601u_dev *dev);
+void mt7601u_cleanup(struct mt7601u_dev *dev);
+
+int mt7601u_mac_start(struct mt7601u_dev *dev);
+void mt7601u_mac_stop(struct mt7601u_dev *dev);
+
+/* PHY */
+int mt7601u_phy_init(struct mt7601u_dev *dev);
+int mt7601u_wait_bbp_ready(struct mt7601u_dev *dev);
+void mt7601u_set_rx_path(struct mt7601u_dev *dev, u8 path);
+void mt7601u_set_tx_dac(struct mt7601u_dev *dev, u8 path);
+int mt7601u_bbp_set_bw(struct mt7601u_dev *dev, int bw);
+void mt7601u_agc_save(struct mt7601u_dev *dev);
+void mt7601u_agc_restore(struct mt7601u_dev *dev);
+int mt7601u_phy_set_channel(struct mt7601u_dev *dev,
+ struct cfg80211_chan_def *chandef);
+void mt7601u_phy_recalibrate_after_assoc(struct mt7601u_dev *dev);
+int mt7601u_phy_get_rssi(struct mt7601u_dev *dev,
+ struct mt7601u_rxwi *rxwi, u16 rate);
+void mt7601u_phy_con_cal_onoff(struct mt7601u_dev *dev,
+ struct ieee80211_bss_conf *info);
+
+/* MAC */
+void mt7601u_mac_work(struct work_struct *work);
+void mt7601u_mac_set_protection(struct mt7601u_dev *dev, bool legacy_prot,
+ int ht_mode);
+void mt7601u_mac_set_short_preamble(struct mt7601u_dev *dev, bool short_preamb);
+void mt7601u_mac_config_tsf(struct mt7601u_dev *dev, bool enable, int interval);
+void
+mt7601u_mac_wcid_setup(struct mt7601u_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
+void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev);
+
+/* TX */
+void mt7601u_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
+int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params);
+void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb);
+void mt7601u_tx_stat(struct work_struct *work);
+
+/* util */
+void mt76_remove_hdr_pad(struct sk_buff *skb);
+int mt76_insert_hdr_pad(struct sk_buff *skb);
+
+u32 mt7601u_bbp_set_ctrlch(struct mt7601u_dev *dev, bool below);
+
+static inline u32 mt7601u_mac_set_ctrlch(struct mt7601u_dev *dev, bool below)
+{
+ return mt7601u_rmc(dev, MT_TX_BAND_CFG, 1, below);
+}
+
+int mt7601u_dma_init(struct mt7601u_dev *dev);
+void mt7601u_dma_cleanup(struct mt7601u_dev *dev);
+
+int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
+ struct mt76_wcid *wcid, int hw_q);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c
new file mode 100644
index 000000000..28db24a2b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/phy.c
@@ -0,0 +1,1252 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#include "mt7601u.h"
+#include "mcu.h"
+#include "eeprom.h"
+#include "trace.h"
+#include "initvals_phy.h"
+
+#include <linux/etherdevice.h>
+
+static void mt7601u_agc_reset(struct mt7601u_dev *dev);
+
+static int
+mt7601u_rf_wr(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 value)
+{
+ int ret = 0;
+
+ if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)) ||
+ WARN_ON(offset > 63))
+ return -EINVAL;
+ if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+ return 0;
+
+ mutex_lock(&dev->reg_atomic_mutex);
+
+ if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ mt7601u_wr(dev, MT_RF_CSR_CFG,
+ FIELD_PREP(MT_RF_CSR_CFG_DATA, value) |
+ FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
+ FIELD_PREP(MT_RF_CSR_CFG_REG_ID, offset) |
+ MT_RF_CSR_CFG_WR |
+ MT_RF_CSR_CFG_KICK);
+ trace_rf_write(dev, bank, offset, value);
+out:
+ mutex_unlock(&dev->reg_atomic_mutex);
+
+ if (ret < 0)
+ dev_err(dev->dev, "Error: RF write %02hhx:%02hhx failed:%d!!\n",
+ bank, offset, ret);
+
+ return ret;
+}
+
+static int
+mt7601u_rf_rr(struct mt7601u_dev *dev, u8 bank, u8 offset)
+{
+ int ret = -ETIMEDOUT;
+ u32 val;
+
+ if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)) ||
+ WARN_ON(offset > 63))
+ return -EINVAL;
+ if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+ return 0xff;
+
+ mutex_lock(&dev->reg_atomic_mutex);
+
+ if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
+ goto out;
+
+ mt7601u_wr(dev, MT_RF_CSR_CFG,
+ FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
+ FIELD_PREP(MT_RF_CSR_CFG_REG_ID, offset) |
+ MT_RF_CSR_CFG_KICK);
+
+ if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
+ goto out;
+
+ val = mt7601u_rr(dev, MT_RF_CSR_CFG);
+ if (FIELD_GET(MT_RF_CSR_CFG_REG_ID, val) == offset &&
+ FIELD_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank) {
+ ret = FIELD_GET(MT_RF_CSR_CFG_DATA, val);
+ trace_rf_read(dev, bank, offset, ret);
+ }
+out:
+ mutex_unlock(&dev->reg_atomic_mutex);
+
+ if (ret < 0)
+ dev_err(dev->dev, "Error: RF read %02hhx:%02hhx failed:%d!!\n",
+ bank, offset, ret);
+
+ return ret;
+}
+
+static int
+mt7601u_rf_rmw(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 mask, u8 val)
+{
+ int ret;
+
+ ret = mt7601u_rf_rr(dev, bank, offset);
+ if (ret < 0)
+ return ret;
+ val |= ret & ~mask;
+ ret = mt7601u_rf_wr(dev, bank, offset, val);
+ if (ret)
+ return ret;
+
+ return val;
+}
+
+static int
+mt7601u_rf_set(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 val)
+{
+ return mt7601u_rf_rmw(dev, bank, offset, 0, val);
+}
+
+static int
+mt7601u_rf_clear(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 mask)
+{
+ return mt7601u_rf_rmw(dev, bank, offset, mask, 0);
+}
+
+static void mt7601u_bbp_wr(struct mt7601u_dev *dev, u8 offset, u8 val)
+{
+ if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)) ||
+ test_bit(MT7601U_STATE_REMOVED, &dev->state))
+ return;
+
+ mutex_lock(&dev->reg_atomic_mutex);
+
+ if (!mt76_poll(dev, MT_BBP_CSR_CFG, MT_BBP_CSR_CFG_BUSY, 0, 1000)) {
+ dev_err(dev->dev, "Error: BBP write %02hhx failed!!\n", offset);
+ goto out;
+ }
+
+ mt7601u_wr(dev, MT_BBP_CSR_CFG,
+ FIELD_PREP(MT_BBP_CSR_CFG_VAL, val) |
+ FIELD_PREP(MT_BBP_CSR_CFG_REG_NUM, offset) |
+ MT_BBP_CSR_CFG_RW_MODE | MT_BBP_CSR_CFG_BUSY);
+ trace_bbp_write(dev, offset, val);
+out:
+ mutex_unlock(&dev->reg_atomic_mutex);
+}
+
+static int mt7601u_bbp_rr(struct mt7601u_dev *dev, u8 offset)
+{
+ u32 val;
+ int ret = -ETIMEDOUT;
+
+ if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)))
+ return -EINVAL;
+ if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+ return 0xff;
+
+ mutex_lock(&dev->reg_atomic_mutex);
+
+ if (!mt76_poll(dev, MT_BBP_CSR_CFG, MT_BBP_CSR_CFG_BUSY, 0, 1000))
+ goto out;
+
+ mt7601u_wr(dev, MT_BBP_CSR_CFG,
+ FIELD_PREP(MT_BBP_CSR_CFG_REG_NUM, offset) |
+ MT_BBP_CSR_CFG_RW_MODE | MT_BBP_CSR_CFG_BUSY |
+ MT_BBP_CSR_CFG_READ);
+
+ if (!mt76_poll(dev, MT_BBP_CSR_CFG, MT_BBP_CSR_CFG_BUSY, 0, 1000))
+ goto out;
+
+ val = mt7601u_rr(dev, MT_BBP_CSR_CFG);
+ if (FIELD_GET(MT_BBP_CSR_CFG_REG_NUM, val) == offset) {
+ ret = FIELD_GET(MT_BBP_CSR_CFG_VAL, val);
+ trace_bbp_read(dev, offset, ret);
+ }
+out:
+ mutex_unlock(&dev->reg_atomic_mutex);
+
+ if (ret < 0)
+ dev_err(dev->dev, "Error: BBP read %02hhx failed:%d!!\n",
+ offset, ret);
+
+ return ret;
+}
+
+static int mt7601u_bbp_rmw(struct mt7601u_dev *dev, u8 offset, u8 mask, u8 val)
+{
+ int ret;
+
+ ret = mt7601u_bbp_rr(dev, offset);
+ if (ret < 0)
+ return ret;
+ val |= ret & ~mask;
+ mt7601u_bbp_wr(dev, offset, val);
+
+ return val;
+}
+
+static u8 mt7601u_bbp_rmc(struct mt7601u_dev *dev, u8 offset, u8 mask, u8 val)
+{
+ int ret;
+
+ ret = mt7601u_bbp_rr(dev, offset);
+ if (ret < 0)
+ return ret;
+ val |= ret & ~mask;
+ if (ret != val)
+ mt7601u_bbp_wr(dev, offset, val);
+
+ return val;
+}
+
+int mt7601u_wait_bbp_ready(struct mt7601u_dev *dev)
+{
+ int i = 20;
+ u8 val;
+
+ do {
+ val = mt7601u_bbp_rr(dev, MT_BBP_REG_VERSION);
+ if (val && val != 0xff)
+ break;
+ } while (--i);
+
+ if (!i) {
+ dev_err(dev->dev, "Error: BBP is not ready\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+u32 mt7601u_bbp_set_ctrlch(struct mt7601u_dev *dev, bool below)
+{
+ return mt7601u_bbp_rmc(dev, 3, 0x20, below ? 0x20 : 0);
+}
+
+int mt7601u_phy_get_rssi(struct mt7601u_dev *dev,
+ struct mt7601u_rxwi *rxwi, u16 rate)
+{
+ static const s8 lna[2][2][3] = {
+ /* main LNA */ {
+ /* bw20 */ { -2, 15, 33 },
+ /* bw40 */ { 0, 16, 34 }
+ },
+ /* aux LNA */ {
+ /* bw20 */ { -2, 15, 33 },
+ /* bw40 */ { -2, 16, 34 }
+ }
+ };
+ int bw = FIELD_GET(MT_RXWI_RATE_BW, rate);
+ int aux_lna = FIELD_GET(MT_RXWI_ANT_AUX_LNA, rxwi->ant);
+ int lna_id = FIELD_GET(MT_RXWI_GAIN_RSSI_LNA_ID, rxwi->gain);
+ int val;
+
+ if (lna_id) /* LNA id can be 0, 2, 3. */
+ lna_id--;
+
+ val = 8;
+ val -= lna[aux_lna][bw][lna_id];
+ val -= FIELD_GET(MT_RXWI_GAIN_RSSI_VAL, rxwi->gain);
+ val -= dev->ee->lna_gain;
+ val -= dev->ee->rssi_offset[0];
+
+ return val;
+}
+
+static void mt7601u_vco_cal(struct mt7601u_dev *dev)
+{
+ mt7601u_rf_wr(dev, 0, 4, 0x0a);
+ mt7601u_rf_wr(dev, 0, 5, 0x20);
+ mt7601u_rf_set(dev, 0, 4, BIT(7));
+ msleep(2);
+}
+
+static int mt7601u_set_bw_filter(struct mt7601u_dev *dev, bool cal)
+{
+ u32 filter = 0;
+ int ret;
+
+ if (!cal)
+ filter |= 0x10000;
+ if (dev->bw != MT_BW_20)
+ filter |= 0x00100;
+
+ /* TX */
+ ret = mt7601u_mcu_calibrate(dev, MCU_CAL_BW, filter | 1);
+ if (ret)
+ return ret;
+ /* RX */
+ return mt7601u_mcu_calibrate(dev, MCU_CAL_BW, filter);
+}
+
+static int mt7601u_load_bbp_temp_table_bw(struct mt7601u_dev *dev)
+{
+ const struct reg_table *t;
+
+ if (WARN_ON(dev->temp_mode > MT_TEMP_MODE_LOW))
+ return -EINVAL;
+
+ t = &bbp_mode_table[dev->temp_mode][dev->bw];
+
+ return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, t->regs, t->n);
+}
+
+static int mt7601u_bbp_temp(struct mt7601u_dev *dev, int mode, const char *name)
+{
+ const struct reg_table *t;
+ int ret;
+
+ if (dev->temp_mode == mode)
+ return 0;
+
+ dev->temp_mode = mode;
+ trace_temp_mode(dev, mode);
+
+ t = bbp_mode_table[dev->temp_mode];
+ ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+ t[2].regs, t[2].n);
+ if (ret)
+ return ret;
+
+ return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+ t[dev->bw].regs, t[dev->bw].n);
+}
+
+static void mt7601u_apply_ch14_fixup(struct mt7601u_dev *dev, int hw_chan)
+{
+ struct mt7601u_rate_power *t = &dev->ee->power_rate_table;
+
+ if (hw_chan != 14 || dev->bw != MT_BW_20) {
+ mt7601u_bbp_rmw(dev, 4, 0x20, 0);
+ mt7601u_bbp_wr(dev, 178, 0xff);
+
+ t->cck[0].bw20 = dev->ee->real_cck_bw20[0];
+ t->cck[1].bw20 = dev->ee->real_cck_bw20[1];
+ } else { /* Apply CH14 OBW fixup */
+ mt7601u_bbp_wr(dev, 4, 0x60);
+ mt7601u_bbp_wr(dev, 178, 0);
+
+ /* Note: vendor code is buggy here for negative values */
+ t->cck[0].bw20 = dev->ee->real_cck_bw20[0] - 2;
+ t->cck[1].bw20 = dev->ee->real_cck_bw20[1] - 2;
+ }
+}
+
+static int __mt7601u_phy_set_channel(struct mt7601u_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+#define FREQ_PLAN_REGS 4
+ static const u8 freq_plan[14][FREQ_PLAN_REGS] = {
+ { 0x99, 0x99, 0x09, 0x50 },
+ { 0x46, 0x44, 0x0a, 0x50 },
+ { 0xec, 0xee, 0x0a, 0x50 },
+ { 0x99, 0x99, 0x0b, 0x50 },
+ { 0x46, 0x44, 0x08, 0x51 },
+ { 0xec, 0xee, 0x08, 0x51 },
+ { 0x99, 0x99, 0x09, 0x51 },
+ { 0x46, 0x44, 0x0a, 0x51 },
+ { 0xec, 0xee, 0x0a, 0x51 },
+ { 0x99, 0x99, 0x0b, 0x51 },
+ { 0x46, 0x44, 0x08, 0x52 },
+ { 0xec, 0xee, 0x08, 0x52 },
+ { 0x99, 0x99, 0x09, 0x52 },
+ { 0x33, 0x33, 0x0b, 0x52 },
+ };
+ struct mt76_reg_pair channel_freq_plan[FREQ_PLAN_REGS] = {
+ { 17, 0 }, { 18, 0 }, { 19, 0 }, { 20, 0 },
+ };
+ struct mt76_reg_pair bbp_settings[3] = {
+ { 62, 0x37 - dev->ee->lna_gain },
+ { 63, 0x37 - dev->ee->lna_gain },
+ { 64, 0x37 - dev->ee->lna_gain },
+ };
+
+ struct ieee80211_channel *chan = chandef->chan;
+ enum nl80211_channel_type chan_type =
+ cfg80211_get_chandef_type(chandef);
+ struct mt7601u_rate_power *t = &dev->ee->power_rate_table;
+ int chan_idx;
+ bool chan_ext_below;
+ u8 bw;
+ int i, ret;
+
+ bw = MT_BW_20;
+ chan_ext_below = (chan_type == NL80211_CHAN_HT40MINUS);
+ chan_idx = chan->hw_value - 1;
+
+ if (chandef->width == NL80211_CHAN_WIDTH_40) {
+ bw = MT_BW_40;
+
+ if (chan_idx > 1 && chan_type == NL80211_CHAN_HT40MINUS)
+ chan_idx -= 2;
+ else if (chan_idx < 12 && chan_type == NL80211_CHAN_HT40PLUS)
+ chan_idx += 2;
+ else
+ dev_err(dev->dev, "Error: invalid 40MHz channel!!\n");
+ }
+
+ if (bw != dev->bw || chan_ext_below != dev->chan_ext_below) {
+ dev_dbg(dev->dev, "Info: switching HT mode bw:%d below:%d\n",
+ bw, chan_ext_below);
+
+ mt7601u_bbp_set_bw(dev, bw);
+
+ mt7601u_bbp_set_ctrlch(dev, chan_ext_below);
+ mt7601u_mac_set_ctrlch(dev, chan_ext_below);
+ dev->chan_ext_below = chan_ext_below;
+ }
+
+ for (i = 0; i < FREQ_PLAN_REGS; i++)
+ channel_freq_plan[i].value = freq_plan[chan_idx][i];
+
+ ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_RF,
+ channel_freq_plan, FREQ_PLAN_REGS);
+ if (ret)
+ return ret;
+
+ mt7601u_rmw(dev, MT_TX_ALC_CFG_0, 0x3f3f,
+ dev->ee->chan_pwr[chan_idx] & 0x3f);
+
+ ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+ bbp_settings, ARRAY_SIZE(bbp_settings));
+ if (ret)
+ return ret;
+
+ mt7601u_vco_cal(dev);
+ mt7601u_bbp_set_bw(dev, bw);
+ ret = mt7601u_set_bw_filter(dev, false);
+ if (ret)
+ return ret;
+
+ mt7601u_apply_ch14_fixup(dev, chan->hw_value);
+ mt7601u_wr(dev, MT_TX_PWR_CFG_0, int_to_s6(t->ofdm[1].bw20) << 24 |
+ int_to_s6(t->ofdm[0].bw20) << 16 |
+ int_to_s6(t->cck[1].bw20) << 8 |
+ int_to_s6(t->cck[0].bw20));
+
+ if (test_bit(MT7601U_STATE_SCANNING, &dev->state))
+ mt7601u_agc_reset(dev);
+
+ dev->chandef = *chandef;
+
+ return 0;
+}
+
+int mt7601u_phy_set_channel(struct mt7601u_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+ int ret;
+
+ cancel_delayed_work_sync(&dev->cal_work);
+ cancel_delayed_work_sync(&dev->freq_cal.work);
+
+ mutex_lock(&dev->hw_atomic_mutex);
+ ret = __mt7601u_phy_set_channel(dev, chandef);
+ mutex_unlock(&dev->hw_atomic_mutex);
+ if (ret)
+ return ret;
+
+ if (test_bit(MT7601U_STATE_SCANNING, &dev->state))
+ return 0;
+
+ ieee80211_queue_delayed_work(dev->hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+ if (dev->freq_cal.enabled)
+ ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work,
+ MT_FREQ_CAL_INIT_DELAY);
+ return 0;
+}
+
+#define BBP_R47_FLAG GENMASK(2, 0)
+#define BBP_R47_F_TSSI 0
+#define BBP_R47_F_PKT_T 1
+#define BBP_R47_F_TX_RATE 2
+#define BBP_R47_F_TEMP 4
+/**
+ * mt7601u_bbp_r47_get - read value through BBP R47/R49 pair
+ * @dev: pointer to adapter structure
+ * @reg: value of BBP R47 before the operation
+ * @flag: one of the BBP_R47_F_* flags
+ *
+ * Convenience helper for reading values through BBP R47/R49 pair.
+ * Takes old value of BBP R47 as @reg, because callers usually have it
+ * cached already.
+ *
+ * Return: value of BBP R49.
+ */
+static u8 mt7601u_bbp_r47_get(struct mt7601u_dev *dev, u8 reg, u8 flag)
+{
+ flag |= reg & ~BBP_R47_FLAG;
+ mt7601u_bbp_wr(dev, 47, flag);
+ usleep_range(500, 700);
+ return mt7601u_bbp_rr(dev, 49);
+}
+
+static s8 mt7601u_read_bootup_temp(struct mt7601u_dev *dev)
+{
+ u8 bbp_val, temp;
+ u32 rf_bp, rf_set;
+ int i;
+
+ rf_set = mt7601u_rr(dev, MT_RF_SETTING_0);
+ rf_bp = mt7601u_rr(dev, MT_RF_BYPASS_0);
+
+ mt7601u_wr(dev, MT_RF_BYPASS_0, 0);
+ mt7601u_wr(dev, MT_RF_SETTING_0, 0x00000010);
+ mt7601u_wr(dev, MT_RF_BYPASS_0, 0x00000010);
+
+ bbp_val = mt7601u_bbp_rmw(dev, 47, 0, 0x10);
+
+ mt7601u_bbp_wr(dev, 22, 0x40);
+
+ for (i = 100; i && (bbp_val & 0x10); i--)
+ bbp_val = mt7601u_bbp_rr(dev, 47);
+
+ temp = mt7601u_bbp_r47_get(dev, bbp_val, BBP_R47_F_TEMP);
+
+ mt7601u_bbp_wr(dev, 22, 0);
+
+ bbp_val = mt7601u_bbp_rr(dev, 21);
+ bbp_val |= 0x02;
+ mt7601u_bbp_wr(dev, 21, bbp_val);
+ bbp_val &= ~0x02;
+ mt7601u_bbp_wr(dev, 21, bbp_val);
+
+ mt7601u_wr(dev, MT_RF_BYPASS_0, 0);
+ mt7601u_wr(dev, MT_RF_SETTING_0, rf_set);
+ mt7601u_wr(dev, MT_RF_BYPASS_0, rf_bp);
+
+ trace_read_temp(dev, temp);
+ return temp;
+}
+
+static s8 mt7601u_read_temp(struct mt7601u_dev *dev)
+{
+ int i;
+ u8 val;
+ s8 temp;
+
+ val = mt7601u_bbp_rmw(dev, 47, 0x7f, 0x10);
+
+ /* Note: this rarely succeeds, temp can change even if it fails. */
+ for (i = 100; i && (val & 0x10); i--)
+ val = mt7601u_bbp_rr(dev, 47);
+
+ temp = mt7601u_bbp_r47_get(dev, val, BBP_R47_F_TEMP);
+
+ trace_read_temp(dev, temp);
+ return temp;
+}
+
+static void mt7601u_rxdc_cal(struct mt7601u_dev *dev)
+{
+ static const struct mt76_reg_pair intro[] = {
+ { 158, 0x8d }, { 159, 0xfc },
+ { 158, 0x8c }, { 159, 0x4c },
+ }, outro[] = {
+ { 158, 0x8d }, { 159, 0xe0 },
+ };
+ u32 mac_ctrl;
+ int i, ret;
+
+ mac_ctrl = mt7601u_rr(dev, MT_MAC_SYS_CTRL);
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX);
+
+ ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+ intro, ARRAY_SIZE(intro));
+ if (ret)
+ dev_err(dev->dev, "%s intro failed:%d\n", __func__, ret);
+
+ for (i = 20; i; i--) {
+ usleep_range(300, 500);
+
+ mt7601u_bbp_wr(dev, 158, 0x8c);
+ if (mt7601u_bbp_rr(dev, 159) == 0x0c)
+ break;
+ }
+ if (!i)
+ dev_err(dev->dev, "%s timed out\n", __func__);
+
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0);
+
+ ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+ outro, ARRAY_SIZE(outro));
+ if (ret)
+ dev_err(dev->dev, "%s outro failed:%d\n", __func__, ret);
+
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, mac_ctrl);
+}
+
+void mt7601u_phy_recalibrate_after_assoc(struct mt7601u_dev *dev)
+{
+ mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->curr_temp);
+
+ mt7601u_rxdc_cal(dev);
+}
+
+/* Note: function copied from vendor driver */
+static s16 lin2dBd(u16 linear)
+{
+ short exp = 0;
+ unsigned int mantisa;
+ int app, dBd;
+
+ if (WARN_ON(!linear))
+ return -10000;
+
+ mantisa = linear;
+
+ exp = fls(mantisa) - 16;
+ if (exp > 0)
+ mantisa >>= exp;
+ else
+ mantisa <<= abs(exp);
+
+ if (mantisa <= 0xb800)
+ app = (mantisa + (mantisa >> 3) + (mantisa >> 4) - 0x9600);
+ else
+ app = (mantisa - (mantisa >> 3) - (mantisa >> 6) - 0x5a00);
+ if (app < 0)
+ app = 0;
+
+ dBd = ((15 + exp) << 15) + app;
+ dBd = (dBd << 2) + (dBd << 1) + (dBd >> 6) + (dBd >> 7);
+ dBd = (dBd >> 10);
+
+ return dBd;
+}
+
+static void
+mt7601u_set_initial_tssi(struct mt7601u_dev *dev, s16 tssi_db, s16 tssi_hvga_db)
+{
+ struct tssi_data *d = &dev->ee->tssi_data;
+ int init_offset;
+
+ init_offset = -((tssi_db * d->slope + d->offset[1]) / 4096) + 10;
+
+ mt76_rmw(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
+ int_to_s6(init_offset) & MT_TX_ALC_CFG_1_TEMP_COMP);
+}
+
+static void mt7601u_tssi_dc_gain_cal(struct mt7601u_dev *dev)
+{
+ u8 rf_vga, rf_mixer, bbp_r47;
+ int i, j;
+ s8 res[4];
+ s16 tssi_init_db, tssi_init_hvga_db;
+
+ mt7601u_wr(dev, MT_RF_SETTING_0, 0x00000030);
+ mt7601u_wr(dev, MT_RF_BYPASS_0, 0x000c0030);
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0);
+
+ mt7601u_bbp_wr(dev, 58, 0);
+ mt7601u_bbp_wr(dev, 241, 0x2);
+ mt7601u_bbp_wr(dev, 23, 0x8);
+ bbp_r47 = mt7601u_bbp_rr(dev, 47);
+
+ /* Set VGA gain */
+ rf_vga = mt7601u_rf_rr(dev, 5, 3);
+ mt7601u_rf_wr(dev, 5, 3, 8);
+
+ /* Mixer disable */
+ rf_mixer = mt7601u_rf_rr(dev, 4, 39);
+ mt7601u_rf_wr(dev, 4, 39, 0);
+
+ for (i = 0; i < 4; i++) {
+ mt7601u_rf_wr(dev, 4, 39, (i & 1) ? rf_mixer : 0);
+
+ mt7601u_bbp_wr(dev, 23, (i < 2) ? 0x08 : 0x02);
+ mt7601u_rf_wr(dev, 5, 3, (i < 2) ? 0x08 : 0x11);
+
+ /* BBP TSSI initial and soft reset */
+ mt7601u_bbp_wr(dev, 22, 0);
+ mt7601u_bbp_wr(dev, 244, 0);
+
+ mt7601u_bbp_wr(dev, 21, 1);
+ udelay(1);
+ mt7601u_bbp_wr(dev, 21, 0);
+
+ /* TSSI measurement */
+ mt7601u_bbp_wr(dev, 47, 0x50);
+ mt7601u_bbp_wr(dev, (i & 1) ? 244 : 22, (i & 1) ? 0x31 : 0x40);
+
+ for (j = 20; j; j--)
+ if (!(mt7601u_bbp_rr(dev, 47) & 0x10))
+ break;
+ if (!j)
+ dev_err(dev->dev, "%s timed out\n", __func__);
+
+ /* TSSI read */
+ mt7601u_bbp_wr(dev, 47, 0x40);
+ res[i] = mt7601u_bbp_rr(dev, 49);
+ }
+
+ tssi_init_db = lin2dBd((short)res[1] - res[0]);
+ tssi_init_hvga_db = lin2dBd(((short)res[3] - res[2]) * 4);
+ dev->tssi_init = res[0];
+ dev->tssi_init_hvga = res[2];
+ dev->tssi_init_hvga_offset_db = tssi_init_hvga_db - tssi_init_db;
+
+ dev_dbg(dev->dev,
+ "TSSI_init:%hhx db:%hx hvga:%hhx hvga_db:%hx off_db:%hx\n",
+ dev->tssi_init, tssi_init_db, dev->tssi_init_hvga,
+ tssi_init_hvga_db, dev->tssi_init_hvga_offset_db);
+
+ mt7601u_bbp_wr(dev, 22, 0);
+ mt7601u_bbp_wr(dev, 244, 0);
+
+ mt7601u_bbp_wr(dev, 21, 1);
+ udelay(1);
+ mt7601u_bbp_wr(dev, 21, 0);
+
+ mt7601u_wr(dev, MT_RF_BYPASS_0, 0);
+ mt7601u_wr(dev, MT_RF_SETTING_0, 0);
+
+ mt7601u_rf_wr(dev, 5, 3, rf_vga);
+ mt7601u_rf_wr(dev, 4, 39, rf_mixer);
+ mt7601u_bbp_wr(dev, 47, bbp_r47);
+
+ mt7601u_set_initial_tssi(dev, tssi_init_db, tssi_init_hvga_db);
+}
+
+static int mt7601u_temp_comp(struct mt7601u_dev *dev, bool on)
+{
+ int ret, temp, hi_temp = 400, lo_temp = -200;
+
+ temp = (dev->raw_temp - dev->ee->ref_temp) * MT_EE_TEMPERATURE_SLOPE;
+ dev->curr_temp = temp;
+
+ /* DPD Calibration */
+ if (temp - dev->dpd_temp > 450 || temp - dev->dpd_temp < -450) {
+ dev->dpd_temp = temp;
+
+ ret = mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->dpd_temp);
+ if (ret)
+ return ret;
+
+ mt7601u_vco_cal(dev);
+
+ dev_dbg(dev->dev, "Recalibrate DPD\n");
+ }
+
+ /* PLL Lock Protect */
+ if (temp < -50 && !dev->pll_lock_protect) { /* < 20C */
+ dev->pll_lock_protect = true;
+
+ mt7601u_rf_wr(dev, 4, 4, 6);
+ mt7601u_rf_clear(dev, 4, 10, 0x30);
+
+ dev_dbg(dev->dev, "PLL lock protect on - too cold\n");
+ } else if (temp > 50 && dev->pll_lock_protect) { /* > 30C */
+ dev->pll_lock_protect = false;
+
+ mt7601u_rf_wr(dev, 4, 4, 0);
+ mt7601u_rf_rmw(dev, 4, 10, 0x30, 0x10);
+
+ dev_dbg(dev->dev, "PLL lock protect off\n");
+ }
+
+ if (on) {
+ hi_temp -= 50;
+ lo_temp -= 50;
+ }
+
+ /* BBP CR for H, L, N temperature */
+ if (temp > hi_temp)
+ return mt7601u_bbp_temp(dev, MT_TEMP_MODE_HIGH, "high");
+ else if (temp > lo_temp)
+ return mt7601u_bbp_temp(dev, MT_TEMP_MODE_NORMAL, "normal");
+ else
+ return mt7601u_bbp_temp(dev, MT_TEMP_MODE_LOW, "low");
+}
+
+/* Note: this is used only with TSSI, we can just use trgt_pwr from eeprom. */
+static int mt7601u_current_tx_power(struct mt7601u_dev *dev)
+{
+ return dev->ee->chan_pwr[dev->chandef.chan->hw_value - 1];
+}
+
+static bool mt7601u_use_hvga(struct mt7601u_dev *dev)
+{
+ return !(mt7601u_current_tx_power(dev) > 20);
+}
+
+static s16
+mt7601u_phy_rf_pa_mode_val(struct mt7601u_dev *dev, int phy_mode, int tx_rate)
+{
+ static const s16 decode_tb[] = { 0, 8847, -5734, -5734 };
+ u32 reg;
+
+ switch (phy_mode) {
+ case MT_PHY_TYPE_OFDM:
+ tx_rate += 4;
+ fallthrough;
+ case MT_PHY_TYPE_CCK:
+ reg = dev->rf_pa_mode[0];
+ break;
+ default:
+ reg = dev->rf_pa_mode[1];
+ break;
+ }
+
+ return decode_tb[(reg >> (tx_rate * 2)) & 0x3];
+}
+
+static struct mt7601u_tssi_params
+mt7601u_tssi_params_get(struct mt7601u_dev *dev)
+{
+ static const u8 ofdm_pkt2rate[8] = { 6, 4, 2, 0, 7, 5, 3, 1 };
+ static const int static_power[4] = { 0, -49152, -98304, 49152 };
+ struct mt7601u_tssi_params p;
+ u8 bbp_r47, pkt_type, tx_rate;
+ struct power_per_rate *rate_table;
+
+ bbp_r47 = mt7601u_bbp_rr(dev, 47);
+
+ p.tssi0 = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TSSI);
+ dev->raw_temp = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TEMP);
+ pkt_type = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_PKT_T);
+
+ p.trgt_power = mt7601u_current_tx_power(dev);
+
+ switch (pkt_type & 0x03) {
+ case MT_PHY_TYPE_CCK:
+ tx_rate = (pkt_type >> 4) & 0x03;
+ rate_table = dev->ee->power_rate_table.cck;
+ break;
+
+ case MT_PHY_TYPE_OFDM:
+ tx_rate = ofdm_pkt2rate[(pkt_type >> 4) & 0x07];
+ rate_table = dev->ee->power_rate_table.ofdm;
+ break;
+
+ default:
+ tx_rate = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TX_RATE);
+ tx_rate &= 0x7f;
+ rate_table = dev->ee->power_rate_table.ht;
+ break;
+ }
+
+ if (dev->bw == MT_BW_20)
+ p.trgt_power += rate_table[tx_rate / 2].bw20;
+ else
+ p.trgt_power += rate_table[tx_rate / 2].bw40;
+
+ p.trgt_power <<= 12;
+
+ dev_dbg(dev->dev, "tx_rate:%02hhx pwr:%08x\n", tx_rate, p.trgt_power);
+
+ p.trgt_power += mt7601u_phy_rf_pa_mode_val(dev, pkt_type & 0x03,
+ tx_rate);
+
+ /* Channel 14, cck, bw20 */
+ if ((pkt_type & 0x03) == MT_PHY_TYPE_CCK) {
+ if (mt7601u_bbp_rr(dev, 4) & 0x20)
+ p.trgt_power += mt7601u_bbp_rr(dev, 178) ? 18022 : 9830;
+ else
+ p.trgt_power += mt7601u_bbp_rr(dev, 178) ? 819 : 24576;
+ }
+
+ p.trgt_power += static_power[mt7601u_bbp_rr(dev, 1) & 0x03];
+
+ p.trgt_power += dev->ee->tssi_data.tx0_delta_offset;
+
+ dev_dbg(dev->dev,
+ "tssi:%02hhx t_power:%08x temp:%02hhx pkt_type:%02hhx\n",
+ p.tssi0, p.trgt_power, dev->raw_temp, pkt_type);
+
+ return p;
+}
+
+static bool mt7601u_tssi_read_ready(struct mt7601u_dev *dev)
+{
+ return !(mt7601u_bbp_rr(dev, 47) & 0x10);
+}
+
+static int mt7601u_tssi_cal(struct mt7601u_dev *dev)
+{
+ struct mt7601u_tssi_params params;
+ int curr_pwr, diff_pwr;
+ char tssi_offset;
+ s8 tssi_init;
+ s16 tssi_m_dc, tssi_db;
+ bool hvga;
+ u32 val;
+
+ if (!dev->ee->tssi_enabled)
+ return 0;
+
+ hvga = mt7601u_use_hvga(dev);
+ if (!dev->tssi_read_trig)
+ return mt7601u_mcu_tssi_read_kick(dev, hvga);
+
+ if (!mt7601u_tssi_read_ready(dev))
+ return 0;
+
+ params = mt7601u_tssi_params_get(dev);
+
+ tssi_init = (hvga ? dev->tssi_init_hvga : dev->tssi_init);
+ tssi_m_dc = params.tssi0 - tssi_init;
+ tssi_db = lin2dBd(tssi_m_dc);
+ dev_dbg(dev->dev, "tssi dc:%04hx db:%04hx hvga:%d\n",
+ tssi_m_dc, tssi_db, hvga);
+
+ if (dev->chandef.chan->hw_value < 5)
+ tssi_offset = dev->ee->tssi_data.offset[0];
+ else if (dev->chandef.chan->hw_value < 9)
+ tssi_offset = dev->ee->tssi_data.offset[1];
+ else
+ tssi_offset = dev->ee->tssi_data.offset[2];
+
+ if (hvga)
+ tssi_db -= dev->tssi_init_hvga_offset_db;
+
+ curr_pwr = tssi_db * dev->ee->tssi_data.slope + (tssi_offset << 9);
+ diff_pwr = params.trgt_power - curr_pwr;
+ dev_dbg(dev->dev, "Power curr:%08x diff:%08x\n", curr_pwr, diff_pwr);
+
+ if (params.tssi0 > 126 && diff_pwr > 0) {
+ dev_err(dev->dev, "Error: TSSI upper saturation\n");
+ diff_pwr = 0;
+ }
+ if (params.tssi0 - tssi_init < 1 && diff_pwr < 0) {
+ dev_err(dev->dev, "Error: TSSI lower saturation\n");
+ diff_pwr = 0;
+ }
+
+ if ((dev->prev_pwr_diff ^ diff_pwr) < 0 && abs(diff_pwr) < 4096 &&
+ (abs(diff_pwr) > abs(dev->prev_pwr_diff) ||
+ (diff_pwr > 0 && diff_pwr == -dev->prev_pwr_diff)))
+ diff_pwr = 0;
+ else
+ dev->prev_pwr_diff = diff_pwr;
+
+ diff_pwr += (diff_pwr > 0) ? 2048 : -2048;
+ diff_pwr /= 4096;
+
+ dev_dbg(dev->dev, "final diff: %08x\n", diff_pwr);
+
+ val = mt7601u_rr(dev, MT_TX_ALC_CFG_1);
+ curr_pwr = s6_to_int(FIELD_GET(MT_TX_ALC_CFG_1_TEMP_COMP, val));
+ diff_pwr += curr_pwr;
+ val = (val & ~MT_TX_ALC_CFG_1_TEMP_COMP) | int_to_s6(diff_pwr);
+ mt7601u_wr(dev, MT_TX_ALC_CFG_1, val);
+
+ return mt7601u_mcu_tssi_read_kick(dev, hvga);
+}
+
+static u8 mt7601u_agc_default(struct mt7601u_dev *dev)
+{
+ return (dev->ee->lna_gain - 8) * 2 + 0x34;
+}
+
+static void mt7601u_agc_reset(struct mt7601u_dev *dev)
+{
+ u8 agc = mt7601u_agc_default(dev);
+
+ mt7601u_bbp_wr(dev, 66, agc);
+}
+
+void mt7601u_agc_save(struct mt7601u_dev *dev)
+{
+ dev->agc_save = mt7601u_bbp_rr(dev, 66);
+}
+
+void mt7601u_agc_restore(struct mt7601u_dev *dev)
+{
+ mt7601u_bbp_wr(dev, 66, dev->agc_save);
+}
+
+static void mt7601u_agc_tune(struct mt7601u_dev *dev)
+{
+ u8 val = mt7601u_agc_default(dev);
+ long avg_rssi;
+
+ if (test_bit(MT7601U_STATE_SCANNING, &dev->state))
+ return;
+
+ /* Note: only in STA mode and not dozing; perhaps do this only if
+ * there is enough rssi updates since last run?
+ * Rssi updates are only on beacons and U2M so should work...
+ */
+ spin_lock_bh(&dev->con_mon_lock);
+ avg_rssi = ewma_rssi_read(&dev->avg_rssi);
+ spin_unlock_bh(&dev->con_mon_lock);
+ if (avg_rssi == 0)
+ return;
+
+ avg_rssi = -avg_rssi;
+ if (avg_rssi <= -70)
+ val -= 0x20;
+ else if (avg_rssi <= -60)
+ val -= 0x10;
+
+ if (val != mt7601u_bbp_rr(dev, 66))
+ mt7601u_bbp_wr(dev, 66, val);
+
+ /* TODO: also if lost a lot of beacons try resetting
+ * (see RTMPSetAGCInitValue() call in mlme.c).
+ */
+}
+
+static void mt7601u_phy_calibrate(struct work_struct *work)
+{
+ struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
+ cal_work.work);
+
+ mt7601u_agc_tune(dev);
+ mt7601u_tssi_cal(dev);
+ /* If TSSI calibration was run it already updated temperature. */
+ if (!dev->ee->tssi_enabled)
+ dev->raw_temp = mt7601u_read_temp(dev);
+ mt7601u_temp_comp(dev, true); /* TODO: find right value for @on */
+
+ ieee80211_queue_delayed_work(dev->hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+}
+
+static unsigned long
+__mt7601u_phy_freq_cal(struct mt7601u_dev *dev, s8 last_offset, u8 phy_mode)
+{
+ u8 activate_threshold, deactivate_threshold;
+
+ trace_freq_cal_offset(dev, phy_mode, last_offset);
+
+ /* No beacons received - reschedule soon */
+ if (last_offset == MT_FREQ_OFFSET_INVALID)
+ return MT_FREQ_CAL_ADJ_INTERVAL;
+
+ switch (phy_mode) {
+ case MT_PHY_TYPE_CCK:
+ activate_threshold = 19;
+ deactivate_threshold = 5;
+ break;
+ case MT_PHY_TYPE_OFDM:
+ activate_threshold = 102;
+ deactivate_threshold = 32;
+ break;
+ case MT_PHY_TYPE_HT:
+ case MT_PHY_TYPE_HT_GF:
+ activate_threshold = 82;
+ deactivate_threshold = 20;
+ break;
+ default:
+ WARN_ON(1);
+ return MT_FREQ_CAL_CHECK_INTERVAL;
+ }
+
+ if (abs(last_offset) >= activate_threshold)
+ dev->freq_cal.adjusting = true;
+ else if (abs(last_offset) <= deactivate_threshold)
+ dev->freq_cal.adjusting = false;
+
+ if (!dev->freq_cal.adjusting)
+ return MT_FREQ_CAL_CHECK_INTERVAL;
+
+ if (last_offset > deactivate_threshold) {
+ if (dev->freq_cal.freq > 0)
+ dev->freq_cal.freq--;
+ else
+ dev->freq_cal.adjusting = false;
+ } else if (last_offset < -deactivate_threshold) {
+ if (dev->freq_cal.freq < 0xbf)
+ dev->freq_cal.freq++;
+ else
+ dev->freq_cal.adjusting = false;
+ }
+
+ trace_freq_cal_adjust(dev, dev->freq_cal.freq);
+ mt7601u_rf_wr(dev, 0, 12, dev->freq_cal.freq);
+ mt7601u_vco_cal(dev);
+
+ return dev->freq_cal.adjusting ? MT_FREQ_CAL_ADJ_INTERVAL :
+ MT_FREQ_CAL_CHECK_INTERVAL;
+}
+
+static void mt7601u_phy_freq_cal(struct work_struct *work)
+{
+ struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
+ freq_cal.work.work);
+ s8 last_offset;
+ u8 phy_mode;
+ unsigned long delay;
+
+ spin_lock_bh(&dev->con_mon_lock);
+ last_offset = dev->bcn_freq_off;
+ phy_mode = dev->bcn_phy_mode;
+ spin_unlock_bh(&dev->con_mon_lock);
+
+ delay = __mt7601u_phy_freq_cal(dev, last_offset, phy_mode);
+ ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work, delay);
+
+ spin_lock_bh(&dev->con_mon_lock);
+ dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID;
+ spin_unlock_bh(&dev->con_mon_lock);
+}
+
+void mt7601u_phy_con_cal_onoff(struct mt7601u_dev *dev,
+ struct ieee80211_bss_conf *info)
+{
+ if (!info->assoc)
+ cancel_delayed_work_sync(&dev->freq_cal.work);
+
+ /* Start/stop collecting beacon data */
+ spin_lock_bh(&dev->con_mon_lock);
+ ether_addr_copy(dev->ap_bssid, info->bssid);
+ ewma_rssi_init(&dev->avg_rssi);
+ dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID;
+ spin_unlock_bh(&dev->con_mon_lock);
+
+ dev->freq_cal.freq = dev->ee->rf_freq_off;
+ dev->freq_cal.enabled = info->assoc;
+ dev->freq_cal.adjusting = false;
+
+ if (info->assoc)
+ ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work,
+ MT_FREQ_CAL_INIT_DELAY);
+}
+
+static int mt7601u_init_cal(struct mt7601u_dev *dev)
+{
+ u32 mac_ctrl;
+ int ret;
+
+ dev->raw_temp = mt7601u_read_bootup_temp(dev);
+ dev->curr_temp = (dev->raw_temp - dev->ee->ref_temp) *
+ MT_EE_TEMPERATURE_SLOPE;
+ dev->dpd_temp = dev->curr_temp;
+
+ mac_ctrl = mt7601u_rr(dev, MT_MAC_SYS_CTRL);
+
+ ret = mt7601u_mcu_calibrate(dev, MCU_CAL_R, 0);
+ if (ret)
+ return ret;
+
+ ret = mt7601u_rf_rr(dev, 0, 4);
+ if (ret < 0)
+ return ret;
+ ret |= 0x80;
+ ret = mt7601u_rf_wr(dev, 0, 4, ret);
+ if (ret)
+ return ret;
+ msleep(2);
+
+ ret = mt7601u_mcu_calibrate(dev, MCU_CAL_TXDCOC, 0);
+ if (ret)
+ return ret;
+
+ mt7601u_rxdc_cal(dev);
+
+ ret = mt7601u_set_bw_filter(dev, true);
+ if (ret)
+ return ret;
+ ret = mt7601u_mcu_calibrate(dev, MCU_CAL_LOFT, 0);
+ if (ret)
+ return ret;
+ ret = mt7601u_mcu_calibrate(dev, MCU_CAL_TXIQ, 0);
+ if (ret)
+ return ret;
+ ret = mt7601u_mcu_calibrate(dev, MCU_CAL_RXIQ, 0);
+ if (ret)
+ return ret;
+ ret = mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->dpd_temp);
+ if (ret)
+ return ret;
+
+ mt7601u_rxdc_cal(dev);
+
+ mt7601u_tssi_dc_gain_cal(dev);
+
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, mac_ctrl);
+
+ mt7601u_temp_comp(dev, true);
+
+ return 0;
+}
+
+int mt7601u_bbp_set_bw(struct mt7601u_dev *dev, int bw)
+{
+ u32 val, old;
+
+ if (bw == dev->bw) {
+ /* Vendor driver does the rmc even when no change is needed. */
+ mt7601u_bbp_rmc(dev, 4, 0x18, bw == MT_BW_20 ? 0 : 0x10);
+
+ return 0;
+ }
+ dev->bw = bw;
+
+ /* Stop MAC for the time of bw change */
+ old = mt7601u_rr(dev, MT_MAC_SYS_CTRL);
+ val = old & ~(MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, val);
+ mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX | MT_MAC_STATUS_RX,
+ 0, 500000);
+
+ mt7601u_bbp_rmc(dev, 4, 0x18, bw == MT_BW_20 ? 0 : 0x10);
+
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, old);
+
+ return mt7601u_load_bbp_temp_table_bw(dev);
+}
+
+/**
+ * mt7601u_set_rx_path - set rx path in BBP
+ * @dev: pointer to adapter structure
+ * @path: rx path to set values are 0-based
+ */
+void mt7601u_set_rx_path(struct mt7601u_dev *dev, u8 path)
+{
+ mt7601u_bbp_rmw(dev, 3, 0x18, path << 3);
+}
+
+/**
+ * mt7601u_set_tx_dac - set which tx DAC to use
+ * @dev: pointer to adapter structure
+ * @dac: DAC index, values are 0-based
+ */
+void mt7601u_set_tx_dac(struct mt7601u_dev *dev, u8 dac)
+{
+ mt7601u_bbp_rmc(dev, 1, 0x18, dac << 3);
+}
+
+int mt7601u_phy_init(struct mt7601u_dev *dev)
+{
+ int ret;
+
+ dev->rf_pa_mode[0] = mt7601u_rr(dev, MT_RF_PA_MODE_CFG0);
+ dev->rf_pa_mode[1] = mt7601u_rr(dev, MT_RF_PA_MODE_CFG1);
+
+ ret = mt7601u_rf_wr(dev, 0, 12, dev->ee->rf_freq_off);
+ if (ret)
+ return ret;
+ ret = mt7601u_write_reg_pairs(dev, 0, rf_central,
+ ARRAY_SIZE(rf_central));
+ if (ret)
+ return ret;
+ ret = mt7601u_write_reg_pairs(dev, 0, rf_channel,
+ ARRAY_SIZE(rf_channel));
+ if (ret)
+ return ret;
+ ret = mt7601u_write_reg_pairs(dev, 0, rf_vga, ARRAY_SIZE(rf_vga));
+ if (ret)
+ return ret;
+
+ ret = mt7601u_init_cal(dev);
+ if (ret)
+ return ret;
+
+ dev->prev_pwr_diff = 100;
+
+ INIT_DELAYED_WORK(&dev->cal_work, mt7601u_phy_calibrate);
+ INIT_DELAYED_WORK(&dev->freq_cal.work, mt7601u_phy_freq_cal);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/regs.h b/drivers/net/wireless/mediatek/mt7601u/regs.h
new file mode 100644
index 000000000..e75522788
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/regs.h
@@ -0,0 +1,627 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#ifndef __MT76_REGS_H
+#define __MT76_REGS_H
+
+#include <linux/bitops.h>
+
+#define MT_ASIC_VERSION 0x0000
+
+#define MT76XX_REV_E3 0x22
+#define MT76XX_REV_E4 0x33
+
+#define MT_CMB_CTRL 0x0020
+#define MT_CMB_CTRL_XTAL_RDY BIT(22)
+#define MT_CMB_CTRL_PLL_LD BIT(23)
+
+#define MT_EFUSE_CTRL 0x0024
+#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0)
+#define MT_EFUSE_CTRL_MODE GENMASK(7, 6)
+#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8)
+#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14)
+#define MT_EFUSE_CTRL_AIN GENMASK(25, 16)
+#define MT_EFUSE_CTRL_KICK BIT(30)
+#define MT_EFUSE_CTRL_SEL BIT(31)
+
+#define MT_EFUSE_DATA_BASE 0x0028
+#define MT_EFUSE_DATA(_n) (MT_EFUSE_DATA_BASE + ((_n) << 2))
+
+#define MT_COEXCFG0 0x0040
+#define MT_COEXCFG0_COEX_EN BIT(0)
+
+#define MT_WLAN_FUN_CTRL 0x0080
+#define MT_WLAN_FUN_CTRL_WLAN_EN BIT(0)
+#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN BIT(1)
+#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF BIT(2)
+
+#define MT_WLAN_FUN_CTRL_WLAN_RESET BIT(3) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN BIT(3) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ BIT(4)
+#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL BIT(5)
+#define MT_WLAN_FUN_CTRL_INV_ANT_SEL BIT(6)
+#define MT_WLAN_FUN_CTRL_WAKE_HOST BIT(7)
+
+#define MT_WLAN_FUN_CTRL_THERM_RST BIT(8) /* MT76x2 */
+#define MT_WLAN_FUN_CTRL_THERM_CKEN BIT(9) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_GPIO_IN GENMASK(15, 8) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT GENMASK(23, 16) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN GENMASK(31, 24) /* MT76x0 */
+
+#define MT_XO_CTRL0 0x0100
+#define MT_XO_CTRL1 0x0104
+#define MT_XO_CTRL2 0x0108
+#define MT_XO_CTRL3 0x010c
+#define MT_XO_CTRL4 0x0110
+
+#define MT_XO_CTRL5 0x0114
+#define MT_XO_CTRL5_C2_VAL GENMASK(14, 8)
+
+#define MT_XO_CTRL6 0x0118
+#define MT_XO_CTRL6_C2_CTRL GENMASK(14, 8)
+
+#define MT_XO_CTRL7 0x011c
+
+#define MT_WLAN_MTC_CTRL 0x10148
+#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0)
+#define MT_WLAN_MTC_CTRL_PWR_ACK BIT(12)
+#define MT_WLAN_MTC_CTRL_PWR_ACK_S BIT(13)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_PD GENMASK(19, 16)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_PD BIT(20)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_PD BIT(21)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_PD BIT(22)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_RB BIT(24)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_RB BIT(25)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_RB BIT(26)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_RB BIT(27)
+#define MT_WLAN_MTC_CTRL_STATE_UP BIT(28)
+
+#define MT_INT_SOURCE_CSR 0x0200
+#define MT_INT_MASK_CSR 0x0204
+
+#define MT_INT_RX_DONE(_n) BIT(_n)
+#define MT_INT_RX_DONE_ALL GENMASK(1, 0)
+#define MT_INT_TX_DONE_ALL GENMASK(13, 4)
+#define MT_INT_TX_DONE(_n) BIT(_n + 4)
+#define MT_INT_RX_COHERENT BIT(16)
+#define MT_INT_TX_COHERENT BIT(17)
+#define MT_INT_ANY_COHERENT BIT(18)
+#define MT_INT_MCU_CMD BIT(19)
+#define MT_INT_TBTT BIT(20)
+#define MT_INT_PRE_TBTT BIT(21)
+#define MT_INT_TX_STAT BIT(22)
+#define MT_INT_AUTO_WAKEUP BIT(23)
+#define MT_INT_GPTIMER BIT(24)
+#define MT_INT_RXDELAYINT BIT(26)
+#define MT_INT_TXDELAYINT BIT(27)
+
+#define MT_WPDMA_GLO_CFG 0x0208
+#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0)
+#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
+#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2)
+#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
+#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4)
+#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6)
+#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
+#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN GENMASK(15, 8)
+#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS BIT(30)
+#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
+
+#define MT_WPDMA_RST_IDX 0x020c
+
+#define MT_WPDMA_DELAY_INT_CFG 0x0210
+
+#define MT_WMM_AIFSN 0x0214
+#define MT_WMM_AIFSN_MASK GENMASK(3, 0)
+#define MT_WMM_AIFSN_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_CWMIN 0x0218
+#define MT_WMM_CWMIN_MASK GENMASK(3, 0)
+#define MT_WMM_CWMIN_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_CWMAX 0x021c
+#define MT_WMM_CWMAX_MASK GENMASK(3, 0)
+#define MT_WMM_CWMAX_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_TXOP_BASE 0x0220
+#define MT_WMM_TXOP(_n) (MT_WMM_TXOP_BASE + (((_n) / 2) << 2))
+#define MT_WMM_TXOP_SHIFT(_n) ((_n & 1) * 16)
+#define MT_WMM_TXOP_MASK GENMASK(15, 0)
+
+#define MT_FCE_DMA_ADDR 0x0230
+#define MT_FCE_DMA_LEN 0x0234
+
+#define MT_USB_DMA_CFG 0x238
+#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT GENMASK(7, 0)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT GENMASK(15, 8)
+#define MT_USB_DMA_CFG_PHY_CLR BIT(16)
+#define MT_USB_DMA_CFG_TX_CLR BIT(19)
+#define MT_USB_DMA_CFG_TXOP_HALT BIT(20)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21)
+#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22)
+#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23)
+#define MT_USB_DMA_CFG_UDMA_RX_WL_DROP BIT(25)
+#define MT_USB_DMA_CFG_EP_OUT_VALID GENMASK(29, 27)
+#define MT_USB_DMA_CFG_RX_BUSY BIT(30)
+#define MT_USB_DMA_CFG_TX_BUSY BIT(31)
+
+#define MT_TSO_CTRL 0x0250
+#define MT_HEADER_TRANS_CTRL_REG 0x0260
+
+#define MT_US_CYC_CFG 0x02a4
+#define MT_US_CYC_CNT GENMASK(7, 0)
+
+#define MT_TX_RING_BASE 0x0300
+#define MT_RX_RING_BASE 0x03c0
+#define MT_RING_SIZE 0x10
+
+#define MT_TX_HW_QUEUE_MCU 8
+#define MT_TX_HW_QUEUE_MGMT 9
+
+#define MT_PBF_SYS_CTRL 0x0400
+#define MT_PBF_SYS_CTRL_MCU_RESET BIT(0)
+#define MT_PBF_SYS_CTRL_DMA_RESET BIT(1)
+#define MT_PBF_SYS_CTRL_MAC_RESET BIT(2)
+#define MT_PBF_SYS_CTRL_PBF_RESET BIT(3)
+#define MT_PBF_SYS_CTRL_ASY_RESET BIT(4)
+
+#define MT_PBF_CFG 0x0404
+#define MT_PBF_CFG_TX0Q_EN BIT(0)
+#define MT_PBF_CFG_TX1Q_EN BIT(1)
+#define MT_PBF_CFG_TX2Q_EN BIT(2)
+#define MT_PBF_CFG_TX3Q_EN BIT(3)
+#define MT_PBF_CFG_RX0Q_EN BIT(4)
+#define MT_PBF_CFG_RX_DROP_EN BIT(8)
+
+#define MT_PBF_TX_MAX_PCNT 0x0408
+#define MT_PBF_RX_MAX_PCNT 0x040c
+
+#define MT_BCN_OFFSET_BASE 0x041c
+#define MT_BCN_OFFSET(_n) (MT_BCN_OFFSET_BASE + ((_n) << 2))
+
+#define MT_RXQ_STA 0x0430
+#define MT_TXQ_STA 0x0434
+
+#define MT_RF_CSR_CFG 0x0500
+#define MT_RF_CSR_CFG_DATA GENMASK(7, 0)
+#define MT_RF_CSR_CFG_REG_ID GENMASK(13, 8)
+#define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 14)
+#define MT_RF_CSR_CFG_WR BIT(30)
+#define MT_RF_CSR_CFG_KICK BIT(31)
+
+#define MT_RF_BYPASS_0 0x0504
+#define MT_RF_BYPASS_1 0x0508
+#define MT_RF_SETTING_0 0x050c
+
+#define MT_RF_DATA_WRITE 0x0524
+
+#define MT_RF_CTRL 0x0528
+#define MT_RF_CTRL_ADDR GENMASK(11, 0)
+#define MT_RF_CTRL_WRITE BIT(12)
+#define MT_RF_CTRL_BUSY BIT(13)
+#define MT_RF_CTRL_IDX BIT(16)
+
+#define MT_RF_DATA_READ 0x052c
+
+#define MT_FCE_PSE_CTRL 0x0800
+#define MT_FCE_PARAMETERS 0x0804
+#define MT_FCE_CSO 0x0808
+
+#define MT_FCE_L2_STUFF 0x080c
+#define MT_FCE_L2_STUFF_HT_L2_EN BIT(0)
+#define MT_FCE_L2_STUFF_QOS_L2_EN BIT(1)
+#define MT_FCE_L2_STUFF_RX_STUFF_EN BIT(2)
+#define MT_FCE_L2_STUFF_TX_STUFF_EN BIT(3)
+#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN BIT(4)
+#define MT_FCE_L2_STUFF_MVINV_BSWAP BIT(5)
+#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN GENMASK(15, 8)
+#define MT_FCE_L2_STUFF_TS_LEN_EN GENMASK(23, 16)
+#define MT_FCE_L2_STUFF_OTHER_PORT GENMASK(25, 24)
+
+#define MT_FCE_WLAN_FLOW_CONTROL1 0x0824
+
+#define MT_TX_CPU_FROM_FCE_BASE_PTR 0x09a0
+#define MT_TX_CPU_FROM_FCE_MAX_COUNT 0x09a4
+#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8
+
+#define MT_FCE_PDMA_GLOBAL_CONF 0x09c4
+
+#define MT_PAUSE_ENABLE_CONTROL1 0x0a38
+
+#define MT_FCE_SKIP_FS 0x0a6c
+
+#define MT_MAC_CSR0 0x1000
+
+#define MT_MAC_SYS_CTRL 0x1004
+#define MT_MAC_SYS_CTRL_RESET_CSR BIT(0)
+#define MT_MAC_SYS_CTRL_RESET_BBP BIT(1)
+#define MT_MAC_SYS_CTRL_ENABLE_TX BIT(2)
+#define MT_MAC_SYS_CTRL_ENABLE_RX BIT(3)
+
+#define MT_MAC_ADDR_DW0 0x1008
+#define MT_MAC_ADDR_DW1 0x100c
+#define MT_MAC_ADDR_DW1_U2ME_MASK GENMASK(23, 16)
+
+#define MT_MAC_BSSID_DW0 0x1010
+#define MT_MAC_BSSID_DW1 0x1014
+#define MT_MAC_BSSID_DW1_ADDR GENMASK(15, 0)
+#define MT_MAC_BSSID_DW1_MBSS_MODE GENMASK(17, 16)
+#define MT_MAC_BSSID_DW1_MBEACON_N GENMASK(20, 18)
+#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT BIT(21)
+#define MT_MAC_BSSID_DW1_MBSS_MODE_B2 BIT(22)
+#define MT_MAC_BSSID_DW1_MBEACON_N_B3 BIT(23)
+#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE GENMASK(26, 24)
+
+#define MT_MAX_LEN_CFG 0x1018
+#define MT_MAX_LEN_CFG_AMPDU GENMASK(13, 12)
+
+#define MT_BBP_CSR_CFG 0x101c
+#define MT_BBP_CSR_CFG_VAL GENMASK(7, 0)
+#define MT_BBP_CSR_CFG_REG_NUM GENMASK(15, 8)
+#define MT_BBP_CSR_CFG_READ BIT(16)
+#define MT_BBP_CSR_CFG_BUSY BIT(17)
+#define MT_BBP_CSR_CFG_PAR_DUR BIT(18)
+#define MT_BBP_CSR_CFG_RW_MODE BIT(19)
+
+#define MT_AMPDU_MAX_LEN_20M1S 0x1030
+#define MT_AMPDU_MAX_LEN_20M2S 0x1034
+#define MT_AMPDU_MAX_LEN_40M1S 0x1038
+#define MT_AMPDU_MAX_LEN_40M2S 0x103c
+#define MT_AMPDU_MAX_LEN 0x1040
+
+#define MT_WCID_DROP_BASE 0x106c
+#define MT_WCID_DROP(_n) (MT_WCID_DROP_BASE + ((_n) >> 5) * 4)
+#define MT_WCID_DROP_MASK(_n) BIT((_n) % 32)
+
+#define MT_BCN_BYPASS_MASK 0x108c
+
+#define MT_MAC_APC_BSSID_BASE 0x1090
+#define MT_MAC_APC_BSSID_L(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8))
+#define MT_MAC_APC_BSSID_H(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4))
+#define MT_MAC_APC_BSSID_H_ADDR GENMASK(15, 0)
+#define MT_MAC_APC_BSSID0_H_EN BIT(16)
+
+#define MT_XIFS_TIME_CFG 0x1100
+#define MT_XIFS_TIME_CFG_CCK_SIFS GENMASK(7, 0)
+#define MT_XIFS_TIME_CFG_OFDM_SIFS GENMASK(15, 8)
+#define MT_XIFS_TIME_CFG_OFDM_XIFS GENMASK(19, 16)
+#define MT_XIFS_TIME_CFG_EIFS GENMASK(28, 20)
+#define MT_XIFS_TIME_CFG_BB_RXEND_EN BIT(29)
+
+#define MT_BKOFF_SLOT_CFG 0x1104
+#define MT_BKOFF_SLOT_CFG_SLOTTIME GENMASK(7, 0)
+#define MT_BKOFF_SLOT_CFG_CC_DELAY GENMASK(11, 8)
+
+#define MT_BEACON_TIME_CFG 0x1114
+#define MT_BEACON_TIME_CFG_INTVAL GENMASK(15, 0)
+#define MT_BEACON_TIME_CFG_TIMER_EN BIT(16)
+#define MT_BEACON_TIME_CFG_SYNC_MODE GENMASK(18, 17)
+#define MT_BEACON_TIME_CFG_TBTT_EN BIT(19)
+#define MT_BEACON_TIME_CFG_BEACON_TX BIT(20)
+#define MT_BEACON_TIME_CFG_TSF_COMP GENMASK(31, 24)
+
+#define MT_TBTT_SYNC_CFG 0x1118
+#define MT_TBTT_TIMER_CFG 0x1124
+
+#define MT_INT_TIMER_CFG 0x1128
+#define MT_INT_TIMER_CFG_PRE_TBTT GENMASK(15, 0)
+#define MT_INT_TIMER_CFG_GP_TIMER GENMASK(31, 16)
+
+#define MT_INT_TIMER_EN 0x112c
+#define MT_INT_TIMER_EN_PRE_TBTT_EN BIT(0)
+#define MT_INT_TIMER_EN_GP_TIMER_EN BIT(1)
+
+#define MT_MAC_STATUS 0x1200
+#define MT_MAC_STATUS_TX BIT(0)
+#define MT_MAC_STATUS_RX BIT(1)
+
+#define MT_PWR_PIN_CFG 0x1204
+#define MT_AUX_CLK_CFG 0x120c
+
+#define MT_BB_PA_MODE_CFG0 0x1214
+#define MT_BB_PA_MODE_CFG1 0x1218
+#define MT_RF_PA_MODE_CFG0 0x121c
+#define MT_RF_PA_MODE_CFG1 0x1220
+
+#define MT_RF_PA_MODE_ADJ0 0x1228
+#define MT_RF_PA_MODE_ADJ1 0x122c
+
+#define MT_DACCLK_EN_DLY_CFG 0x1264
+
+#define MT_EDCA_CFG_BASE 0x1300
+#define MT_EDCA_CFG_AC(_n) (MT_EDCA_CFG_BASE + ((_n) << 2))
+#define MT_EDCA_CFG_TXOP GENMASK(7, 0)
+#define MT_EDCA_CFG_AIFSN GENMASK(11, 8)
+#define MT_EDCA_CFG_CWMIN GENMASK(15, 12)
+#define MT_EDCA_CFG_CWMAX GENMASK(19, 16)
+
+#define MT_TX_PWR_CFG_0 0x1314
+#define MT_TX_PWR_CFG_1 0x1318
+#define MT_TX_PWR_CFG_2 0x131c
+#define MT_TX_PWR_CFG_3 0x1320
+#define MT_TX_PWR_CFG_4 0x1324
+
+#define MT_TX_BAND_CFG 0x132c
+#define MT_TX_BAND_CFG_UPPER_40M BIT(0)
+#define MT_TX_BAND_CFG_5G BIT(1)
+#define MT_TX_BAND_CFG_2G BIT(2)
+
+#define MT_HT_FBK_TO_LEGACY 0x1384
+#define MT_TX_MPDU_ADJ_INT 0x1388
+
+#define MT_TX_PWR_CFG_7 0x13d4
+#define MT_TX_PWR_CFG_8 0x13d8
+#define MT_TX_PWR_CFG_9 0x13dc
+
+#define MT_TX_SW_CFG0 0x1330
+#define MT_TX_SW_CFG1 0x1334
+#define MT_TX_SW_CFG2 0x1338
+
+#define MT_TXOP_CTRL_CFG 0x1340
+#define MT_TXOP_TRUN_EN GENMASK(5, 0)
+#define MT_TXOP_EXT_CCA_DLY GENMASK(15, 8)
+#define MT_TXOP_CTRL
+
+#define MT_TX_RTS_CFG 0x1344
+#define MT_TX_RTS_CFG_RETRY_LIMIT GENMASK(7, 0)
+#define MT_TX_RTS_CFG_THRESH GENMASK(23, 8)
+#define MT_TX_RTS_FALLBACK BIT(24)
+
+#define MT_TX_TIMEOUT_CFG 0x1348
+#define MT_TX_RETRY_CFG 0x134c
+#define MT_TX_LINK_CFG 0x1350
+#define MT_HT_FBK_CFG0 0x1354
+#define MT_HT_FBK_CFG1 0x1358
+#define MT_LG_FBK_CFG0 0x135c
+#define MT_LG_FBK_CFG1 0x1360
+
+#define MT_CCK_PROT_CFG 0x1364
+#define MT_OFDM_PROT_CFG 0x1368
+#define MT_MM20_PROT_CFG 0x136c
+#define MT_MM40_PROT_CFG 0x1370
+#define MT_GF20_PROT_CFG 0x1374
+#define MT_GF40_PROT_CFG 0x1378
+
+#define MT_PROT_RATE GENMASK(15, 0)
+#define MT_PROT_CTRL_RTS_CTS BIT(16)
+#define MT_PROT_CTRL_CTS2SELF BIT(17)
+#define MT_PROT_NAV_SHORT BIT(18)
+#define MT_PROT_NAV_LONG BIT(19)
+#define MT_PROT_TXOP_ALLOW_CCK BIT(20)
+#define MT_PROT_TXOP_ALLOW_OFDM BIT(21)
+#define MT_PROT_TXOP_ALLOW_MM20 BIT(22)
+#define MT_PROT_TXOP_ALLOW_MM40 BIT(23)
+#define MT_PROT_TXOP_ALLOW_GF20 BIT(24)
+#define MT_PROT_TXOP_ALLOW_GF40 BIT(25)
+#define MT_PROT_RTS_THR_EN BIT(26)
+#define MT_PROT_RATE_CCK_11 0x0003
+#define MT_PROT_RATE_OFDM_6 0x4000
+#define MT_PROT_RATE_OFDM_24 0x4004
+#define MT_PROT_RATE_DUP_OFDM_24 0x4084
+#define MT_PROT_TXOP_ALLOW_ALL GENMASK(25, 20)
+#define MT_PROT_TXOP_ALLOW_BW20 (MT_PROT_TXOP_ALLOW_ALL & \
+ ~MT_PROT_TXOP_ALLOW_MM40 & \
+ ~MT_PROT_TXOP_ALLOW_GF40)
+
+#define MT_EXP_ACK_TIME 0x1380
+
+#define MT_TX_PWR_CFG_0_EXT 0x1390
+#define MT_TX_PWR_CFG_1_EXT 0x1394
+
+#define MT_TX_FBK_LIMIT 0x1398
+#define MT_TX_FBK_LIMIT_MPDU_FBK GENMASK(7, 0)
+#define MT_TX_FBK_LIMIT_AMPDU_FBK GENMASK(15, 8)
+#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR BIT(16)
+#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR BIT(17)
+#define MT_TX_FBK_LIMIT_RATE_LUT BIT(18)
+
+#define MT_TX0_RF_GAIN_CORR 0x13a0
+#define MT_TX1_RF_GAIN_CORR 0x13a4
+#define MT_TX0_RF_GAIN_ATTEN 0x13a8
+
+#define MT_TX_ALC_CFG_0 0x13b0
+#define MT_TX_ALC_CFG_0_CH_INIT_0 GENMASK(5, 0)
+#define MT_TX_ALC_CFG_0_CH_INIT_1 GENMASK(13, 8)
+#define MT_TX_ALC_CFG_0_LIMIT_0 GENMASK(21, 16)
+#define MT_TX_ALC_CFG_0_LIMIT_1 GENMASK(29, 24)
+
+#define MT_TX_ALC_CFG_1 0x13b4
+#define MT_TX_ALC_CFG_1_TEMP_COMP GENMASK(5, 0)
+
+#define MT_TX_ALC_CFG_2 0x13a8
+#define MT_TX_ALC_CFG_2_TEMP_COMP GENMASK(5, 0)
+
+#define MT_TX0_BB_GAIN_ATTEN 0x13c0
+
+#define MT_TX_ALC_VGA3 0x13c8
+
+#define MT_TX_PROT_CFG6 0x13e0
+#define MT_TX_PROT_CFG7 0x13e4
+#define MT_TX_PROT_CFG8 0x13e8
+
+#define MT_PIFS_TX_CFG 0x13ec
+
+#define MT_RX_FILTR_CFG 0x1400
+
+#define MT_RX_FILTR_CFG_CRC_ERR BIT(0)
+#define MT_RX_FILTR_CFG_PHY_ERR BIT(1)
+#define MT_RX_FILTR_CFG_PROMISC BIT(2)
+#define MT_RX_FILTR_CFG_OTHER_BSS BIT(3)
+#define MT_RX_FILTR_CFG_VER_ERR BIT(4)
+#define MT_RX_FILTR_CFG_MCAST BIT(5)
+#define MT_RX_FILTR_CFG_BCAST BIT(6)
+#define MT_RX_FILTR_CFG_DUP BIT(7)
+#define MT_RX_FILTR_CFG_CFACK BIT(8)
+#define MT_RX_FILTR_CFG_CFEND BIT(9)
+#define MT_RX_FILTR_CFG_ACK BIT(10)
+#define MT_RX_FILTR_CFG_CTS BIT(11)
+#define MT_RX_FILTR_CFG_RTS BIT(12)
+#define MT_RX_FILTR_CFG_PSPOLL BIT(13)
+#define MT_RX_FILTR_CFG_BA BIT(14)
+#define MT_RX_FILTR_CFG_BAR BIT(15)
+#define MT_RX_FILTR_CFG_CTRL_RSV BIT(16)
+
+#define MT_AUTO_RSP_CFG 0x1404
+
+#define MT_AUTO_RSP_PREAMB_SHORT BIT(4)
+
+#define MT_LEGACY_BASIC_RATE 0x1408
+#define MT_HT_BASIC_RATE 0x140c
+
+#define MT_RX_PARSER_CFG 0x1418
+#define MT_RX_PARSER_RX_SET_NAV_ALL BIT(0)
+
+#define MT_EXT_CCA_CFG 0x141c
+#define MT_EXT_CCA_CFG_CCA0 GENMASK(1, 0)
+#define MT_EXT_CCA_CFG_CCA1 GENMASK(3, 2)
+#define MT_EXT_CCA_CFG_CCA2 GENMASK(5, 4)
+#define MT_EXT_CCA_CFG_CCA3 GENMASK(7, 6)
+#define MT_EXT_CCA_CFG_CCA_MASK GENMASK(11, 8)
+#define MT_EXT_CCA_CFG_ED_CCA_MASK GENMASK(15, 12)
+
+#define MT_TX_SW_CFG3 0x1478
+
+#define MT_PN_PAD_MODE 0x150c
+
+#define MT_TXOP_HLDR_ET 0x1608
+
+#define MT_PROT_AUTO_TX_CFG 0x1648
+
+#define MT_RX_STA_CNT0 0x1700
+#define MT_RX_STA_CNT1 0x1704
+#define MT_RX_STA_CNT2 0x1708
+#define MT_TX_STA_CNT0 0x170c
+#define MT_TX_STA_CNT1 0x1710
+#define MT_TX_STA_CNT2 0x1714
+
+/* Vendor driver defines content of the second word of STAT_FIFO as follows:
+ * MT_TX_STAT_FIFO_RATE GENMASK(26, 16)
+ * MT_TX_STAT_FIFO_ETXBF BIT(27)
+ * MT_TX_STAT_FIFO_SND BIT(28)
+ * MT_TX_STAT_FIFO_ITXBF BIT(29)
+ * However, tests show that b16-31 have the same layout as TXWI rate_ctl
+ * with rate set to rate at which frame was acked.
+ */
+#define MT_TX_STAT_FIFO 0x1718
+#define MT_TX_STAT_FIFO_VALID BIT(0)
+#define MT_TX_STAT_FIFO_PID_TYPE GENMASK(4, 1)
+#define MT_TX_STAT_FIFO_SUCCESS BIT(5)
+#define MT_TX_STAT_FIFO_AGGR BIT(6)
+#define MT_TX_STAT_FIFO_ACKREQ BIT(7)
+#define MT_TX_STAT_FIFO_WCID GENMASK(15, 8)
+#define MT_TX_STAT_FIFO_RATE GENMASK(31, 16)
+
+#define MT_TX_AGG_STAT 0x171c
+
+#define MT_TX_AGG_CNT_BASE0 0x1720
+
+#define MT_MPDU_DENSITY_CNT 0x1740
+
+#define MT_TX_AGG_CNT_BASE1 0x174c
+
+#define MT_TX_AGG_CNT(_id) ((_id) < 8 ? \
+ MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \
+ MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2))
+
+#define MT_TX_STAT_FIFO_EXT 0x1798
+#define MT_TX_STAT_FIFO_EXT_RETRY GENMASK(7, 0)
+
+#define MT_BBP_CORE_BASE 0x2000
+#define MT_BBP_IBI_BASE 0x2100
+#define MT_BBP_AGC_BASE 0x2300
+#define MT_BBP_TXC_BASE 0x2400
+#define MT_BBP_RXC_BASE 0x2500
+#define MT_BBP_TXO_BASE 0x2600
+#define MT_BBP_TXBE_BASE 0x2700
+#define MT_BBP_RXFE_BASE 0x2800
+#define MT_BBP_RXO_BASE 0x2900
+#define MT_BBP_DFS_BASE 0x2a00
+#define MT_BBP_TR_BASE 0x2b00
+#define MT_BBP_CAL_BASE 0x2c00
+#define MT_BBP_DSC_BASE 0x2e00
+#define MT_BBP_PFMU_BASE 0x2f00
+
+#define MT_BBP(_type, _n) (MT_BBP_##_type##_BASE + ((_n) << 2))
+
+#define MT_BBP_CORE_R1_BW GENMASK(4, 3)
+
+#define MT_BBP_AGC_R0_CTRL_CHAN GENMASK(9, 8)
+#define MT_BBP_AGC_R0_BW GENMASK(14, 12)
+
+/* AGC, R4/R5 */
+#define MT_BBP_AGC_LNA_GAIN GENMASK(21, 16)
+
+/* AGC, R8/R9 */
+#define MT_BBP_AGC_GAIN GENMASK(14, 8)
+
+#define MT_BBP_AGC20_RSSI0 GENMASK(7, 0)
+#define MT_BBP_AGC20_RSSI1 GENMASK(15, 8)
+
+#define MT_BBP_TXBE_R0_CTRL_CHAN GENMASK(1, 0)
+
+#define MT_WCID_ADDR_BASE 0x1800
+#define MT_WCID_ADDR(_n) (MT_WCID_ADDR_BASE + (_n) * 8)
+
+#define MT_SRAM_BASE 0x4000
+
+#define MT_WCID_KEY_BASE 0x8000
+#define MT_WCID_KEY(_n) (MT_WCID_KEY_BASE + (_n) * 32)
+
+#define MT_WCID_IV_BASE 0xa000
+#define MT_WCID_IV(_n) (MT_WCID_IV_BASE + (_n) * 8)
+
+#define MT_WCID_ATTR_BASE 0xa800
+#define MT_WCID_ATTR(_n) (MT_WCID_ATTR_BASE + (_n) * 4)
+
+#define MT_WCID_ATTR_PAIRWISE BIT(0)
+#define MT_WCID_ATTR_PKEY_MODE GENMASK(3, 1)
+#define MT_WCID_ATTR_BSS_IDX GENMASK(6, 4)
+#define MT_WCID_ATTR_RXWI_UDF GENMASK(9, 7)
+#define MT_WCID_ATTR_PKEY_MODE_EXT BIT(10)
+#define MT_WCID_ATTR_BSS_IDX_EXT BIT(11)
+#define MT_WCID_ATTR_WAPI_MCBC BIT(15)
+#define MT_WCID_ATTR_WAPI_KEYID GENMASK(31, 24)
+
+#define MT_SKEY_BASE_0 0xac00
+#define MT_SKEY_BASE_1 0xb400
+#define MT_SKEY_0(_bss, _idx) \
+ (MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32)
+#define MT_SKEY_1(_bss, _idx) \
+ (MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32)
+#define MT_SKEY(_bss, _idx) \
+ ((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx))
+
+#define MT_SKEY_MODE_BASE_0 0xb000
+#define MT_SKEY_MODE_BASE_1 0xb3f0
+#define MT_SKEY_MODE_0(_bss) \
+ (MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2))
+#define MT_SKEY_MODE_1(_bss) \
+ (MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2))
+#define MT_SKEY_MODE(_bss) \
+ ((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss))
+#define MT_SKEY_MODE_MASK GENMASK(3, 0)
+#define MT_SKEY_MODE_SHIFT(_bss, _idx) (4 * ((_idx) + 4 * (_bss & 1)))
+
+#define MT_BEACON_BASE 0xc000
+
+#define MT_TEMP_SENSOR 0x1d000
+#define MT_TEMP_SENSOR_VAL GENMASK(6, 0)
+
+enum mt76_cipher_type {
+ MT_CIPHER_NONE,
+ MT_CIPHER_WEP40,
+ MT_CIPHER_WEP104,
+ MT_CIPHER_TKIP,
+ MT_CIPHER_AES_CCMP,
+ MT_CIPHER_CKIP40,
+ MT_CIPHER_CKIP104,
+ MT_CIPHER_CKIP128,
+ MT_CIPHER_WAPI,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/trace.c b/drivers/net/wireless/mediatek/mt7601u/trace.c
new file mode 100644
index 000000000..42df68a54
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/trace.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/trace.h b/drivers/net/wireless/mediatek/mt7601u/trace.h
new file mode 100644
index 000000000..5a6ba0150
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/trace.h
@@ -0,0 +1,392 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#if !defined(__MT7601U_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT7601U_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt7601u.h"
+#include "mac.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt7601u
+
+#define MAXNAME 32
+#define DEV_ENTRY __array(char, wiphy_name, 32)
+#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \
+ wiphy_name(dev->hw->wiphy), MAXNAME)
+#define DEV_PR_FMT "%s "
+#define DEV_PR_ARG __entry->wiphy_name
+
+#define REG_ENTRY __field(u32, reg) __field(u32, val)
+#define REG_ASSIGN __entry->reg = reg; __entry->val = val
+#define REG_PR_FMT "%04x=%08x"
+#define REG_PR_ARG __entry->reg, __entry->val
+
+DECLARE_EVENT_CLASS(dev_reg_evtu,
+ TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ REG_ENTRY
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ REG_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT REG_PR_FMT,
+ DEV_PR_ARG, REG_PR_ARG
+ )
+);
+
+DEFINE_EVENT(dev_reg_evtu, reg_read,
+ TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_reg_evtu, reg_write,
+ TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+TRACE_EVENT(mt_submit_urb,
+ TP_PROTO(struct mt7601u_dev *dev, struct urb *u),
+ TP_ARGS(dev, u),
+ TP_STRUCT__entry(
+ DEV_ENTRY __field(unsigned, pipe) __field(u32, len)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->pipe = u->pipe;
+ __entry->len = u->transfer_buffer_length;
+ ),
+ TP_printk(DEV_PR_FMT "p:%08x len:%u",
+ DEV_PR_ARG, __entry->pipe, __entry->len)
+);
+
+#define trace_mt_submit_urb_sync(__dev, __pipe, __len) ({ \
+ struct urb u; \
+ u.pipe = __pipe; \
+ u.transfer_buffer_length = __len; \
+ trace_mt_submit_urb(__dev, &u); \
+})
+
+TRACE_EVENT(mt_mcu_msg_send,
+ TP_PROTO(struct mt7601u_dev *dev,
+ struct sk_buff *skb, u32 csum, bool resp),
+ TP_ARGS(dev, skb, csum, resp),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, info)
+ __field(u32, csum)
+ __field(bool, resp)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->info = *(u32 *)skb->data;
+ __entry->csum = csum;
+ __entry->resp = resp;
+ ),
+ TP_printk(DEV_PR_FMT "i:%08x c:%08x r:%d",
+ DEV_PR_ARG, __entry->info, __entry->csum, __entry->resp)
+);
+
+TRACE_EVENT(mt_vend_req,
+ TP_PROTO(struct mt7601u_dev *dev, unsigned pipe, u8 req, u8 req_type,
+ u16 val, u16 offset, void *buf, size_t buflen, int ret),
+ TP_ARGS(dev, pipe, req, req_type, val, offset, buf, buflen, ret),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(unsigned, pipe) __field(u8, req) __field(u8, req_type)
+ __field(u16, val) __field(u16, offset) __field(void*, buf)
+ __field(int, buflen) __field(int, ret)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->pipe = pipe;
+ __entry->req = req;
+ __entry->req_type = req_type;
+ __entry->val = val;
+ __entry->offset = offset;
+ __entry->buf = buf;
+ __entry->buflen = buflen;
+ __entry->ret = ret;
+ ),
+ TP_printk(DEV_PR_FMT
+ "%d p:%08x req:%02hhx %02hhx val:%04hx %04hx buf:%d %d",
+ DEV_PR_ARG, __entry->ret, __entry->pipe, __entry->req,
+ __entry->req_type, __entry->val, __entry->offset,
+ !!__entry->buf, __entry->buflen)
+);
+
+TRACE_EVENT(ee_read,
+ TP_PROTO(struct mt7601u_dev *dev, int offset, u16 val),
+ TP_ARGS(dev, offset, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(int, o) __field(u16, v)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->o = offset;
+ __entry->v = val;
+ ),
+ TP_printk(DEV_PR_FMT "%04x=%04x", DEV_PR_ARG, __entry->o, __entry->v)
+);
+
+DECLARE_EVENT_CLASS(dev_rf_reg_evt,
+ TP_PROTO(struct mt7601u_dev *dev, u8 bank, u8 reg, u8 val),
+ TP_ARGS(dev, bank, reg, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, bank)
+ __field(u8, reg)
+ __field(u8, val)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ REG_ASSIGN;
+ __entry->bank = bank;
+ ),
+ TP_printk(
+ DEV_PR_FMT "%02hhx:%02hhx=%02hhx",
+ DEV_PR_ARG, __entry->bank, __entry->reg, __entry->val
+ )
+);
+
+DEFINE_EVENT(dev_rf_reg_evt, rf_read,
+ TP_PROTO(struct mt7601u_dev *dev, u8 bank, u8 reg, u8 val),
+ TP_ARGS(dev, bank, reg, val)
+);
+
+DEFINE_EVENT(dev_rf_reg_evt, rf_write,
+ TP_PROTO(struct mt7601u_dev *dev, u8 bank, u8 reg, u8 val),
+ TP_ARGS(dev, bank, reg, val)
+);
+
+DECLARE_EVENT_CLASS(dev_bbp_reg_evt,
+ TP_PROTO(struct mt7601u_dev *dev, u8 reg, u8 val),
+ TP_ARGS(dev, reg, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, reg)
+ __field(u8, val)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ REG_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT "%02hhx=%02hhx",
+ DEV_PR_ARG, __entry->reg, __entry->val
+ )
+);
+
+DEFINE_EVENT(dev_bbp_reg_evt, bbp_read,
+ TP_PROTO(struct mt7601u_dev *dev, u8 reg, u8 val),
+ TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_bbp_reg_evt, bbp_write,
+ TP_PROTO(struct mt7601u_dev *dev, u8 reg, u8 val),
+ TP_ARGS(dev, reg, val)
+);
+
+DECLARE_EVENT_CLASS(dev_simple_evt,
+ TP_PROTO(struct mt7601u_dev *dev, u8 val),
+ TP_ARGS(dev, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, val)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->val = val;
+ ),
+ TP_printk(
+ DEV_PR_FMT "%02hhx", DEV_PR_ARG, __entry->val
+ )
+);
+
+DEFINE_EVENT(dev_simple_evt, temp_mode,
+ TP_PROTO(struct mt7601u_dev *dev, u8 val),
+ TP_ARGS(dev, val)
+);
+
+DEFINE_EVENT(dev_simple_evt, read_temp,
+ TP_PROTO(struct mt7601u_dev *dev, u8 val),
+ TP_ARGS(dev, val)
+);
+
+DEFINE_EVENT(dev_simple_evt, freq_cal_adjust,
+ TP_PROTO(struct mt7601u_dev *dev, u8 val),
+ TP_ARGS(dev, val)
+);
+
+TRACE_EVENT(freq_cal_offset,
+ TP_PROTO(struct mt7601u_dev *dev, u8 phy_mode, s8 freq_off),
+ TP_ARGS(dev, phy_mode, freq_off),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, phy_mode)
+ __field(s8, freq_off)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->phy_mode = phy_mode;
+ __entry->freq_off = freq_off;
+ ),
+ TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx",
+ DEV_PR_ARG, __entry->phy_mode, __entry->freq_off)
+);
+
+TRACE_EVENT(mt_rx,
+ TP_PROTO(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi, u32 f),
+ TP_ARGS(dev, rxwi, f),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field_struct(struct mt7601u_rxwi, rxwi)
+ __field(u32, fce_info)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->rxwi = *rxwi;
+ __entry->fce_info = f;
+ ),
+ TP_printk(DEV_PR_FMT "rxi:%08x ctl:%08x frag_sn:%04hx rate:%04hx "
+ "uknw:%02hhx z:%02hhx%02hhx%02hhx snr:%02hhx "
+ "ant:%02hhx gain:%02hhx freq_o:%02hhx "
+ "r:%08x ea:%08x fce:%08x", DEV_PR_ARG,
+ le32_to_cpu(__entry->rxwi.rxinfo),
+ le32_to_cpu(__entry->rxwi.ctl),
+ le16_to_cpu(__entry->rxwi.frag_sn),
+ le16_to_cpu(__entry->rxwi.rate),
+ __entry->rxwi.unknown,
+ __entry->rxwi.zero[0], __entry->rxwi.zero[1],
+ __entry->rxwi.zero[2],
+ __entry->rxwi.snr, __entry->rxwi.ant,
+ __entry->rxwi.gain, __entry->rxwi.freq_off,
+ __entry->rxwi.resv2, __entry->rxwi.expert_ant,
+ __entry->fce_info)
+);
+
+TRACE_EVENT(mt_tx,
+ TP_PROTO(struct mt7601u_dev *dev, struct sk_buff *skb,
+ struct mt76_sta *sta, struct mt76_txwi *h),
+ TP_ARGS(dev, skb, sta, h),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field_struct(struct mt76_txwi, h)
+ __field(struct sk_buff *, skb)
+ __field(struct mt76_sta *, sta)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->h = *h;
+ __entry->skb = skb;
+ __entry->sta = sta;
+ ),
+ TP_printk(DEV_PR_FMT "skb:%p sta:%p flg:%04hx rate_ctl:%04hx "
+ "ack:%02hhx wcid:%02hhx len_ctl:%05hx", DEV_PR_ARG,
+ __entry->skb, __entry->sta,
+ le16_to_cpu(__entry->h.flags),
+ le16_to_cpu(__entry->h.rate_ctl),
+ __entry->h.ack_ctl, __entry->h.wcid,
+ le16_to_cpu(__entry->h.len_ctl))
+);
+
+TRACE_EVENT(mt_tx_dma_done,
+ TP_PROTO(struct mt7601u_dev *dev, struct sk_buff *skb),
+ TP_ARGS(dev, skb),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(struct sk_buff *, skb)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->skb = skb;
+ ),
+ TP_printk(DEV_PR_FMT "%p", DEV_PR_ARG, __entry->skb)
+);
+
+TRACE_EVENT(mt_tx_status_cleaned,
+ TP_PROTO(struct mt7601u_dev *dev, int cleaned),
+ TP_ARGS(dev, cleaned),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(int, cleaned)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->cleaned = cleaned;
+ ),
+ TP_printk(DEV_PR_FMT "%d", DEV_PR_ARG, __entry->cleaned)
+);
+
+TRACE_EVENT(mt_tx_status,
+ TP_PROTO(struct mt7601u_dev *dev, u32 stat1, u32 stat2),
+ TP_ARGS(dev, stat1, stat2),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, stat1) __field(u32, stat2)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->stat1 = stat1;
+ __entry->stat2 = stat2;
+ ),
+ TP_printk(DEV_PR_FMT "%08x %08x",
+ DEV_PR_ARG, __entry->stat1, __entry->stat2)
+);
+
+TRACE_EVENT(mt_rx_dma_aggr,
+ TP_PROTO(struct mt7601u_dev *dev, int cnt, bool paged),
+ TP_ARGS(dev, cnt, paged),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, cnt)
+ __field(bool, paged)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->cnt = cnt;
+ __entry->paged = paged;
+ ),
+ TP_printk(DEV_PR_FMT "cnt:%d paged:%d",
+ DEV_PR_ARG, __entry->cnt, __entry->paged)
+);
+
+DEFINE_EVENT(dev_simple_evt, set_key,
+ TP_PROTO(struct mt7601u_dev *dev, u8 val),
+ TP_ARGS(dev, val)
+);
+
+TRACE_EVENT(set_shared_key,
+ TP_PROTO(struct mt7601u_dev *dev, u8 vid, u8 key),
+ TP_ARGS(dev, vid, key),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, vid)
+ __field(u8, key)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->vid = vid;
+ __entry->key = key;
+ ),
+ TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx",
+ DEV_PR_ARG, __entry->vid, __entry->key)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt7601u/tx.c b/drivers/net/wireless/mediatek/mt7601u/tx.c
new file mode 100644
index 000000000..f3dff8319
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/tx.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#include "mt7601u.h"
+#include "trace.h"
+
+enum mt76_txq_id {
+ MT_TXQ_VO = IEEE80211_AC_VO,
+ MT_TXQ_VI = IEEE80211_AC_VI,
+ MT_TXQ_BE = IEEE80211_AC_BE,
+ MT_TXQ_BK = IEEE80211_AC_BK,
+ MT_TXQ_PSD,
+ MT_TXQ_MCU,
+ __MT_TXQ_MAX
+};
+
+/* Hardware uses mirrored order of queues with Q0 having the highest priority */
+static u8 q2hwq(u8 q)
+{
+ return q ^ 0x3;
+}
+
+/* Take mac80211 Q id from the skb and translate it to hardware Q id */
+static u8 skb2q(struct sk_buff *skb)
+{
+ int qid = skb_get_queue_mapping(skb);
+
+ if (WARN_ON(qid >= MT_TXQ_PSD)) {
+ qid = MT_TXQ_BE;
+ skb_set_queue_mapping(skb, qid);
+ }
+
+ return q2hwq(qid);
+}
+
+/* Note: TX retry reporting is a bit broken.
+ * Retries are reported only once per AMPDU and often come a frame early
+ * i.e. they are reported in the last status preceding the AMPDU. Apart
+ * from the fact that it's hard to know the length of the AMPDU (which is
+ * required to know to how many consecutive frames retries should be
+ * applied), if status comes early on full FIFO it gets lost and retries
+ * of the whole AMPDU become invisible.
+ * As a work-around encode the desired rate in PKT_ID of TX descriptor
+ * and based on that guess the retries (every rate is tried once).
+ * Only downside here is that for MCS0 we have to rely solely on
+ * transmission failures as no retries can ever be reported.
+ * Not having to read EXT_FIFO has a nice effect of doubling the number
+ * of reports which can be fetched.
+ * Also the vendor driver never uses the EXT_FIFO register so it may be
+ * undertested.
+ */
+static u8 mt7601u_tx_pktid_enc(struct mt7601u_dev *dev, u8 rate, bool is_probe)
+{
+ u8 encoded = (rate + 1) + is_probe * 8;
+
+ /* Because PKT_ID 0 disables status reporting only 15 values are
+ * available but 16 are needed (8 MCS * 2 for encoding is_probe)
+ * - we need to cram together two rates. MCS0 and MCS7 with is_probe
+ * share PKT_ID 9.
+ */
+ if (is_probe && rate == 7)
+ return encoded - 7;
+
+ return encoded;
+}
+
+static void
+mt7601u_tx_pktid_dec(struct mt7601u_dev *dev, struct mt76_tx_status *stat)
+{
+ u8 req_rate = stat->pktid;
+ u8 eff_rate = stat->rate & 0x7;
+
+ req_rate -= 1;
+
+ if (req_rate > 7) {
+ stat->is_probe = true;
+ req_rate -= 8;
+
+ /* Decide between MCS0 and MCS7 which share pktid 9 */
+ if (!req_rate && eff_rate)
+ req_rate = 7;
+ }
+
+ stat->retry = req_rate - eff_rate;
+}
+
+static void mt7601u_tx_skb_remove_dma_overhead(struct sk_buff *skb,
+ struct ieee80211_tx_info *info)
+{
+ int pkt_len = (unsigned long)info->status.status_driver_data[0];
+
+ skb_pull(skb, sizeof(struct mt76_txwi) + 4);
+ if (ieee80211_get_hdrlen_from_skb(skb) % 4)
+ mt76_remove_hdr_pad(skb);
+
+ skb_trim(skb, pkt_len);
+}
+
+void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ mt7601u_tx_skb_remove_dma_overhead(skb, info);
+
+ ieee80211_tx_info_clear_status(info);
+ info->status.rates[0].idx = -1;
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ spin_lock_bh(&dev->mac_lock);
+ ieee80211_tx_status(dev->hw, skb);
+ spin_unlock_bh(&dev->mac_lock);
+}
+
+static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb)
+{
+ int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ u32 need_head;
+
+ need_head = sizeof(struct mt76_txwi) + 4;
+ if (hdr_len % 4)
+ need_head += 2;
+
+ return skb_cow(skb, need_head);
+}
+
+static struct mt76_txwi *
+mt7601u_push_txwi(struct mt7601u_dev *dev, struct sk_buff *skb,
+ struct ieee80211_sta *sta, struct mt76_wcid *wcid,
+ int pkt_len)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rate = &info->control.rates[0];
+ struct mt76_txwi *txwi;
+ unsigned long flags;
+ bool is_probe;
+ u32 pkt_id;
+ u16 rate_ctl;
+ u8 nss;
+
+ txwi = skb_push(skb, sizeof(struct mt76_txwi));
+ memset(txwi, 0, sizeof(*txwi));
+
+ if (!wcid->tx_rate_set)
+ ieee80211_get_tx_rates(info->control.vif, sta, skb,
+ info->control.rates, 1);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (rate->idx < 0 || !rate->count)
+ rate_ctl = wcid->tx_rate;
+ else
+ rate_ctl = mt76_mac_tx_rate_val(dev, rate, &nss);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ txwi->rate_ctl = cpu_to_le16(rate_ctl);
+
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
+
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
+ u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
+
+ ba_size <<= sta->ht_cap.ampdu_factor;
+ ba_size = min_t(int, 63, ba_size);
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ ba_size = 0;
+ txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+
+ txwi->flags =
+ cpu_to_le16(MT_TXWI_FLAGS_AMPDU |
+ FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
+ sta->ht_cap.ampdu_density));
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ txwi->flags = 0;
+ }
+
+ txwi->wcid = wcid->idx;
+
+ is_probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
+ pkt_id = mt7601u_tx_pktid_enc(dev, rate_ctl & 0x7, is_probe);
+ pkt_len |= FIELD_PREP(MT_TXWI_LEN_PKTID, pkt_id);
+ txwi->len_ctl = cpu_to_le16(pkt_len);
+
+ return txwi;
+}
+
+void mt7601u_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt7601u_dev *dev = hw->priv;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ieee80211_sta *sta = control->sta;
+ struct mt76_sta *msta = NULL;
+ struct mt76_wcid *wcid = dev->mon_wcid;
+ struct mt76_txwi *txwi;
+ int pkt_len = skb->len;
+ int hw_q = skb2q(skb);
+
+ BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
+ info->status.status_driver_data[0] = (void *)(unsigned long)pkt_len;
+
+ if (mt7601u_skb_rooms(dev, skb) || mt76_insert_hdr_pad(skb)) {
+ ieee80211_free_txskb(dev->hw, skb);
+ return;
+ }
+
+ if (sta) {
+ msta = (struct mt76_sta *) sta->drv_priv;
+ wcid = &msta->wcid;
+ } else if (vif) {
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+
+ wcid = &mvif->group_wcid;
+ }
+
+ txwi = mt7601u_push_txwi(dev, skb, sta, wcid, pkt_len);
+
+ if (mt7601u_dma_enqueue_tx(dev, skb, wcid, hw_q))
+ return;
+
+ trace_mt_tx(dev, skb, msta, txwi);
+}
+
+void mt7601u_tx_stat(struct work_struct *work)
+{
+ struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
+ stat_work.work);
+ struct mt76_tx_status stat;
+ unsigned long flags;
+ int cleaned = 0;
+
+ while (!test_bit(MT7601U_STATE_REMOVED, &dev->state)) {
+ stat = mt7601u_mac_fetch_tx_status(dev);
+ if (!stat.valid)
+ break;
+
+ mt7601u_tx_pktid_dec(dev, &stat);
+ mt76_send_tx_status(dev, &stat);
+
+ cleaned++;
+ }
+ trace_mt_tx_status_cleaned(dev, cleaned);
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+ if (cleaned)
+ queue_delayed_work(dev->stat_wq, &dev->stat_work,
+ msecs_to_jiffies(10));
+ else if (test_and_clear_bit(MT7601U_STATE_MORE_STATS, &dev->state))
+ queue_delayed_work(dev->stat_wq, &dev->stat_work,
+ msecs_to_jiffies(20));
+ else
+ clear_bit(MT7601U_STATE_READING_STATS, &dev->state);
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
+
+int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ u8 cw_min = 5, cw_max = 10, hw_q = q2hwq(queue);
+ u32 val;
+
+ /* TODO: should we do funny things with the parameters?
+ * See what mt7601u_set_default_edca() used to do in init.c.
+ */
+
+ if (params->cw_min)
+ cw_min = fls(params->cw_min);
+ if (params->cw_max)
+ cw_max = fls(params->cw_max);
+
+ WARN_ON(params->txop > 0xff);
+ WARN_ON(params->aifs > 0xf);
+ WARN_ON(cw_min > 0xf);
+ WARN_ON(cw_max > 0xf);
+
+ val = FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
+ FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
+ FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
+ /* TODO: based on user-controlled EnableTxBurst var vendor drv sets
+ * a really long txop on AC0 (see connect.c:2009) but only on
+ * connect? When not connected should be 0.
+ */
+ if (!hw_q)
+ val |= 0x60;
+ else
+ val |= FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop);
+ mt76_wr(dev, MT_EDCA_CFG_AC(hw_q), val);
+
+ val = mt76_rr(dev, MT_WMM_TXOP(hw_q));
+ val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(hw_q));
+ val |= params->txop << MT_WMM_TXOP_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_TXOP(hw_q), val);
+
+ val = mt76_rr(dev, MT_WMM_AIFSN);
+ val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(hw_q));
+ val |= params->aifs << MT_WMM_AIFSN_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_AIFSN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMIN);
+ val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(hw_q));
+ val |= cw_min << MT_WMM_CWMIN_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_CWMIN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMAX);
+ val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(hw_q));
+ val |= cw_max << MT_WMM_CWMAX_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_CWMAX, val);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c
new file mode 100644
index 000000000..cc772045d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/usb.c
@@ -0,0 +1,381 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "mt7601u.h"
+#include "usb.h"
+#include "trace.h"
+
+static const struct usb_device_id mt7601u_device_table[] = {
+ { USB_DEVICE(0x0b05, 0x17d3) },
+ { USB_DEVICE(0x0e8d, 0x760a) },
+ { USB_DEVICE(0x0e8d, 0x760b) },
+ { USB_DEVICE(0x13d3, 0x3431) },
+ { USB_DEVICE(0x13d3, 0x3434) },
+ { USB_DEVICE(0x148f, 0x7601) },
+ { USB_DEVICE(0x148f, 0x760a) },
+ { USB_DEVICE(0x148f, 0x760b) },
+ { USB_DEVICE(0x148f, 0x760c) },
+ { USB_DEVICE(0x148f, 0x760d) },
+ { USB_DEVICE(0x2001, 0x3d04) },
+ { USB_DEVICE(0x2717, 0x4106) },
+ { USB_DEVICE(0x2955, 0x0001) },
+ { USB_DEVICE(0x2955, 0x1001) },
+ { USB_DEVICE(0x2955, 0x1003) },
+ { USB_DEVICE(0x2a5f, 0x1000) },
+ { USB_DEVICE(0x7392, 0x7710) },
+ { 0, }
+};
+
+bool mt7601u_usb_alloc_buf(struct mt7601u_dev *dev, size_t len,
+ struct mt7601u_dma_buf *buf)
+{
+ struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+
+ buf->len = len;
+ buf->urb = usb_alloc_urb(0, GFP_KERNEL);
+ buf->buf = usb_alloc_coherent(usb_dev, buf->len, GFP_KERNEL, &buf->dma);
+
+ return !buf->urb || !buf->buf;
+}
+
+void mt7601u_usb_free_buf(struct mt7601u_dev *dev, struct mt7601u_dma_buf *buf)
+{
+ struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+
+ usb_free_coherent(usb_dev, buf->len, buf->buf, buf->dma);
+ usb_free_urb(buf->urb);
+}
+
+int mt7601u_usb_submit_buf(struct mt7601u_dev *dev, int dir, int ep_idx,
+ struct mt7601u_dma_buf *buf, gfp_t gfp,
+ usb_complete_t complete_fn, void *context)
+{
+ struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+ unsigned pipe;
+ int ret;
+
+ if (dir == USB_DIR_IN)
+ pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[ep_idx]);
+ else
+ pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep_idx]);
+
+ usb_fill_bulk_urb(buf->urb, usb_dev, pipe, buf->buf, buf->len,
+ complete_fn, context);
+ buf->urb->transfer_dma = buf->dma;
+ buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ trace_mt_submit_urb(dev, buf->urb);
+ ret = usb_submit_urb(buf->urb, gfp);
+ if (ret)
+ dev_err(dev->dev, "Error: submit URB dir:%d ep:%d failed:%d\n",
+ dir, ep_idx, ret);
+ return ret;
+}
+
+void mt7601u_complete_urb(struct urb *urb)
+{
+ struct completion *cmpl = urb->context;
+
+ complete(cmpl);
+}
+
+int mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
+ const u8 direction, const u16 val, const u16 offset,
+ void *buf, const size_t buflen)
+{
+ int i, ret;
+ struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+ const u8 req_type = direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
+ const unsigned int pipe = (direction == USB_DIR_IN) ?
+ usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
+
+ for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
+ ret = usb_control_msg(usb_dev, pipe, req, req_type,
+ val, offset, buf, buflen,
+ MT_VEND_REQ_TOUT_MS);
+ trace_mt_vend_req(dev, pipe, req, req_type, val, offset,
+ buf, buflen, ret);
+
+ if (ret == -ENODEV)
+ set_bit(MT7601U_STATE_REMOVED, &dev->state);
+ if (ret >= 0 || ret == -ENODEV)
+ return ret;
+
+ msleep(5);
+ }
+
+ dev_err(dev->dev, "Vendor request req:%02x off:%04x failed:%d\n",
+ req, offset, ret);
+
+ return ret;
+}
+
+void mt7601u_vendor_reset(struct mt7601u_dev *dev)
+{
+ mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
+ MT_VEND_DEV_MODE_RESET, 0, NULL, 0);
+}
+
+/* should be called with vendor_req_mutex held */
+static u32 __mt7601u_rr(struct mt7601u_dev *dev, u32 offset)
+{
+ int ret;
+ u32 val = ~0;
+
+ WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset);
+
+ ret = mt7601u_vendor_request(dev, MT_VEND_MULTI_READ, USB_DIR_IN,
+ 0, offset, dev->vend_buf, MT_VEND_BUF);
+ if (ret == MT_VEND_BUF)
+ val = get_unaligned_le32(dev->vend_buf);
+ else if (ret > 0)
+ dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n",
+ ret, offset);
+
+ trace_reg_read(dev, offset, val);
+ return val;
+}
+
+u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset)
+{
+ u32 ret;
+
+ mutex_lock(&dev->vendor_req_mutex);
+ ret = __mt7601u_rr(dev, offset);
+ mutex_unlock(&dev->vendor_req_mutex);
+
+ return ret;
+}
+
+/* should be called with vendor_req_mutex held */
+static int __mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req,
+ const u16 offset, const u32 val)
+{
+ int ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT,
+ val & 0xffff, offset, NULL, 0);
+ if (!ret)
+ ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT,
+ val >> 16, offset + 2, NULL, 0);
+ trace_reg_write(dev, offset, val);
+ return ret;
+}
+
+int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req,
+ const u16 offset, const u32 val)
+{
+ int ret;
+
+ mutex_lock(&dev->vendor_req_mutex);
+ ret = __mt7601u_vendor_single_wr(dev, req, offset, val);
+ mutex_unlock(&dev->vendor_req_mutex);
+
+ return ret;
+}
+
+void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val)
+{
+ WARN_ONCE(offset > USHRT_MAX, "write high off:%08x", offset);
+
+ mt7601u_vendor_single_wr(dev, MT_VEND_WRITE, offset, val);
+}
+
+u32 mt7601u_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val)
+{
+ mutex_lock(&dev->vendor_req_mutex);
+ val |= __mt7601u_rr(dev, offset) & ~mask;
+ __mt7601u_vendor_single_wr(dev, MT_VEND_WRITE, offset, val);
+ mutex_unlock(&dev->vendor_req_mutex);
+
+ return val;
+}
+
+u32 mt7601u_rmc(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val)
+{
+ u32 reg;
+
+ mutex_lock(&dev->vendor_req_mutex);
+ reg = __mt7601u_rr(dev, offset);
+ val |= reg & ~mask;
+ if (reg != val)
+ __mt7601u_vendor_single_wr(dev, MT_VEND_WRITE,
+ offset, val);
+ mutex_unlock(&dev->vendor_req_mutex);
+
+ return val;
+}
+
+void mt7601u_wr_copy(struct mt7601u_dev *dev, u32 offset,
+ const void *data, int len)
+{
+ WARN_ONCE(offset & 3, "unaligned write copy off:%08x", offset);
+ WARN_ONCE(len & 3, "short write copy off:%08x", offset);
+
+ mt7601u_burst_write_regs(dev, offset, data, len / 4);
+}
+
+void mt7601u_addr_wr(struct mt7601u_dev *dev, const u32 offset, const u8 *addr)
+{
+ mt7601u_wr(dev, offset, get_unaligned_le32(addr));
+ mt7601u_wr(dev, offset + 4, addr[4] | addr[5] << 8);
+}
+
+static int mt7601u_assign_pipes(struct usb_interface *usb_intf,
+ struct mt7601u_dev *dev)
+{
+ struct usb_endpoint_descriptor *ep_desc;
+ struct usb_host_interface *intf_desc = usb_intf->cur_altsetting;
+ unsigned i, ep_i = 0, ep_o = 0;
+
+ BUILD_BUG_ON(sizeof(dev->in_eps) < __MT_EP_IN_MAX);
+ BUILD_BUG_ON(sizeof(dev->out_eps) < __MT_EP_OUT_MAX);
+
+ for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
+ ep_desc = &intf_desc->endpoint[i].desc;
+
+ if (usb_endpoint_is_bulk_in(ep_desc) &&
+ ep_i++ < __MT_EP_IN_MAX) {
+ dev->in_eps[ep_i - 1] = usb_endpoint_num(ep_desc);
+ dev->in_max_packet = usb_endpoint_maxp(ep_desc);
+ /* Note: this is ignored by usb sub-system but vendor
+ * code does it. We can drop this at some point.
+ */
+ dev->in_eps[ep_i - 1] |= USB_DIR_IN;
+ } else if (usb_endpoint_is_bulk_out(ep_desc) &&
+ ep_o++ < __MT_EP_OUT_MAX) {
+ dev->out_eps[ep_o - 1] = usb_endpoint_num(ep_desc);
+ dev->out_max_packet = usb_endpoint_maxp(ep_desc);
+ }
+ }
+
+ if (ep_i != __MT_EP_IN_MAX || ep_o != __MT_EP_OUT_MAX) {
+ dev_err(dev->dev, "Error: wrong pipe number in:%d out:%d\n",
+ ep_i, ep_o);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mt7601u_probe(struct usb_interface *usb_intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
+ struct mt7601u_dev *dev;
+ u32 asic_rev, mac_rev;
+ int ret;
+
+ dev = mt7601u_alloc_device(&usb_intf->dev);
+ if (!dev)
+ return -ENOMEM;
+
+ usb_dev = usb_get_dev(usb_dev);
+ usb_reset_device(usb_dev);
+
+ usb_set_intfdata(usb_intf, dev);
+
+ dev->vend_buf = devm_kmalloc(dev->dev, MT_VEND_BUF, GFP_KERNEL);
+ if (!dev->vend_buf) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = mt7601u_assign_pipes(usb_intf, dev);
+ if (ret)
+ goto err;
+ ret = mt7601u_wait_asic_ready(dev);
+ if (ret)
+ goto err;
+
+ asic_rev = mt7601u_rr(dev, MT_ASIC_VERSION);
+ mac_rev = mt7601u_rr(dev, MT_MAC_CSR0);
+ dev_info(dev->dev, "ASIC revision: %08x MAC revision: %08x\n",
+ asic_rev, mac_rev);
+ if ((asic_rev >> 16) != 0x7601) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* Note: vendor driver skips this check for MT7601U */
+ if (!(mt7601u_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
+ dev_warn(dev->dev, "Warning: eFUSE not present\n");
+
+ ret = mt7601u_init_hardware(dev);
+ if (ret)
+ goto err;
+ ret = mt7601u_register_device(dev);
+ if (ret)
+ goto err_hw;
+
+ set_bit(MT7601U_STATE_INITIALIZED, &dev->state);
+
+ return 0;
+err_hw:
+ mt7601u_cleanup(dev);
+err:
+ usb_set_intfdata(usb_intf, NULL);
+ usb_put_dev(interface_to_usbdev(usb_intf));
+
+ destroy_workqueue(dev->stat_wq);
+ ieee80211_free_hw(dev->hw);
+ return ret;
+}
+
+static void mt7601u_disconnect(struct usb_interface *usb_intf)
+{
+ struct mt7601u_dev *dev = usb_get_intfdata(usb_intf);
+
+ ieee80211_unregister_hw(dev->hw);
+ mt7601u_cleanup(dev);
+
+ usb_set_intfdata(usb_intf, NULL);
+ usb_put_dev(interface_to_usbdev(usb_intf));
+
+ destroy_workqueue(dev->stat_wq);
+ ieee80211_free_hw(dev->hw);
+}
+
+static int mt7601u_suspend(struct usb_interface *usb_intf, pm_message_t state)
+{
+ struct mt7601u_dev *dev = usb_get_intfdata(usb_intf);
+
+ mt7601u_cleanup(dev);
+
+ return 0;
+}
+
+static int mt7601u_resume(struct usb_interface *usb_intf)
+{
+ struct mt7601u_dev *dev = usb_get_intfdata(usb_intf);
+ int ret;
+
+ ret = mt7601u_init_hardware(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT7601U_STATE_INITIALIZED, &dev->state);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(usb, mt7601u_device_table);
+MODULE_FIRMWARE(MT7601U_FIRMWARE);
+MODULE_LICENSE("GPL");
+
+static struct usb_driver mt7601u_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt7601u_device_table,
+ .probe = mt7601u_probe,
+ .disconnect = mt7601u_disconnect,
+ .suspend = mt7601u_suspend,
+ .resume = mt7601u_resume,
+ .reset_resume = mt7601u_resume,
+ .soft_unbind = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(mt7601u_driver);
diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.h b/drivers/net/wireless/mediatek/mt7601u/usb.h
new file mode 100644
index 000000000..9fdf35970
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/usb.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#ifndef __MT7601U_USB_H
+#define __MT7601U_USB_H
+
+#include "mt7601u.h"
+
+#define MT7601U_FIRMWARE "mt7601u.bin"
+
+#define MT_VEND_REQ_MAX_RETRY 10
+#define MT_VEND_REQ_TOUT_MS 300
+
+#define MT_VEND_DEV_MODE_RESET 1
+
+#define MT_VEND_BUF sizeof(__le32)
+
+enum mt_vendor_req {
+ MT_VEND_DEV_MODE = 1,
+ MT_VEND_WRITE = 2,
+ MT_VEND_MULTI_READ = 7,
+ MT_VEND_WRITE_FCE = 0x42,
+};
+
+enum mt_usb_ep_in {
+ MT_EP_IN_PKT_RX,
+ MT_EP_IN_CMD_RESP,
+ __MT_EP_IN_MAX,
+};
+
+enum mt_usb_ep_out {
+ MT_EP_OUT_INBAND_CMD,
+ MT_EP_OUT_AC_BK,
+ MT_EP_OUT_AC_BE,
+ MT_EP_OUT_AC_VI,
+ MT_EP_OUT_AC_VO,
+ MT_EP_OUT_HCCA,
+ __MT_EP_OUT_MAX,
+};
+
+static inline struct usb_device *mt7601u_to_usb_dev(struct mt7601u_dev *mt7601u)
+{
+ return interface_to_usbdev(to_usb_interface(mt7601u->dev));
+}
+
+static inline bool mt7601u_urb_has_error(struct urb *urb)
+{
+ return urb->status &&
+ urb->status != -ENOENT &&
+ urb->status != -ECONNRESET &&
+ urb->status != -ESHUTDOWN;
+}
+
+bool mt7601u_usb_alloc_buf(struct mt7601u_dev *dev, size_t len,
+ struct mt7601u_dma_buf *buf);
+void mt7601u_usb_free_buf(struct mt7601u_dev *dev, struct mt7601u_dma_buf *buf);
+int mt7601u_usb_submit_buf(struct mt7601u_dev *dev, int dir, int ep_idx,
+ struct mt7601u_dma_buf *buf, gfp_t gfp,
+ usb_complete_t complete_fn, void *context);
+void mt7601u_complete_urb(struct urb *urb);
+
+int mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
+ const u8 direction, const u16 val, const u16 offset,
+ void *buf, const size_t buflen);
+void mt7601u_vendor_reset(struct mt7601u_dev *dev);
+int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req,
+ const u16 offset, const u32 val);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/util.c b/drivers/net/wireless/mediatek/mt7601u/util.c
new file mode 100644
index 000000000..050c2dd9d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/util.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ */
+
+#include "mt7601u.h"
+
+void mt76_remove_hdr_pad(struct sk_buff *skb)
+{
+ int len = ieee80211_get_hdrlen_from_skb(skb);
+
+ memmove(skb->data + 2, skb->data, len);
+ skb_pull(skb, 2);
+}
+
+int mt76_insert_hdr_pad(struct sk_buff *skb)
+{
+ int len = ieee80211_get_hdrlen_from_skb(skb);
+ int ret;
+
+ if (len % 4 == 0)
+ return 0;
+
+ ret = skb_cow(skb, 2);
+ if (ret)
+ return ret;
+
+ skb_push(skb, 2);
+ memmove(skb->data, skb->data + 2, len);
+
+ skb->data[len] = 0;
+ skb->data[len + 1] = 0;
+ return 0;
+}