From 2c3c1048746a4622d8c89a29670120dc8fab93c4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:49:45 +0200 Subject: Adding upstream version 6.1.76. Signed-off-by: Daniel Baumann --- drivers/net/wireless/mediatek/mt76/mt7615/Kconfig | 56 + drivers/net/wireless/mediatek/mt76/mt7615/Makefile | 20 + .../net/wireless/mediatek/mt76/mt7615/debugfs.c | 611 +++++ drivers/net/wireless/mediatek/mt76/mt7615/dma.c | 315 +++ drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c | 353 +++ drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h | 116 + drivers/net/wireless/mediatek/mt76/mt7615/init.c | 561 +++++ drivers/net/wireless/mediatek/mt76/mt7615/mac.c | 2363 ++++++++++++++++++ drivers/net/wireless/mediatek/mt76/mt7615/mac.h | 337 +++ drivers/net/wireless/mediatek/mt76/mt7615/main.c | 1346 ++++++++++ drivers/net/wireless/mediatek/mt76/mt7615/mcu.c | 2570 ++++++++++++++++++++ drivers/net/wireless/mediatek/mt76/mt7615/mcu.h | 254 ++ drivers/net/wireless/mediatek/mt76/mt7615/mmio.c | 292 +++ drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h | 563 +++++ .../wireless/mediatek/mt76/mt7615/mt7615_trace.h | 56 + drivers/net/wireless/mediatek/mt76/mt7615/pci.c | 202 ++ .../net/wireless/mediatek/mt76/mt7615/pci_init.c | 186 ++ .../net/wireless/mediatek/mt76/mt7615/pci_mac.c | 293 +++ drivers/net/wireless/mediatek/mt76/mt7615/regs.h | 609 +++++ drivers/net/wireless/mediatek/mt76/mt7615/sdio.c | 257 ++ .../net/wireless/mediatek/mt76/mt7615/sdio_mcu.c | 181 ++ drivers/net/wireless/mediatek/mt76/mt7615/soc.c | 72 + .../net/wireless/mediatek/mt76/mt7615/testmode.c | 376 +++ drivers/net/wireless/mediatek/mt76/mt7615/trace.c | 12 + drivers/net/wireless/mediatek/mt76/mt7615/usb.c | 285 +++ .../net/wireless/mediatek/mt76/mt7615/usb_mcu.c | 101 + .../net/wireless/mediatek/mt76/mt7615/usb_sdio.c | 352 +++ 27 files changed, 12739 insertions(+) create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/Kconfig create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/Makefile create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/dma.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/init.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/mac.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/mac.h create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/main.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/mcu.h create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/mmio.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/pci.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/regs.h create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/sdio.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/soc.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/testmode.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/trace.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/usb.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c (limited to 'drivers/net/wireless/mediatek/mt76/mt7615') diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig new file mode 100644 index 000000000..30fba36ff --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig @@ -0,0 +1,56 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config MT7615_COMMON + tristate + select WANT_DEV_COREDUMP + select MT76_CONNAC_LIB + +config MT7615E + tristate "MediaTek MT7615E and MT7663E (PCIe) support" + select MT7615_COMMON + depends on MAC80211 + depends on PCI + help + This adds support for MT7615-based wireless PCIe devices, + which support concurrent dual-band operation at both 5GHz + and 2.4GHz, IEEE 802.11ac 4x4:4SS 1733Mbps PHY rate, wave2 + MU-MIMO up to 4 users/group and 160MHz channels. + + To compile this driver as a module, choose M here. + +config MT7622_WMAC + bool "MT7622 (SoC) WMAC support" + depends on MT7615E + depends on ARCH_MEDIATEK || COMPILE_TEST + select REGMAP + default y + help + This adds support for the built-in WMAC on MT7622 SoC devices + which has the same feature set as a MT7615, but limited to + 2.4 GHz only. + +config MT7663_USB_SDIO_COMMON + tristate + select MT7615_COMMON + +config MT7663U + tristate "MediaTek MT7663U (USB) support" + select MT76_USB + select MT7663_USB_SDIO_COMMON + depends on MAC80211 + depends on USB + help + This adds support for MT7663U 802.11ac 2x2:2 wireless devices. + + To compile this driver as a module, choose M here. + +config MT7663S + tristate "MediaTek MT7663S (SDIO) support" + select MT76_SDIO + select MT7663_USB_SDIO_COMMON + depends on MAC80211 + depends on MMC + help + This adds support for MT7663S 802.11ac 2x2:2 wireless devices. + + To compile this driver as a module, choose M here. diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile new file mode 100644 index 000000000..2b97b9dde --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: ISC + +obj-$(CONFIG_MT7615_COMMON) += mt7615-common.o +obj-$(CONFIG_MT7615E) += mt7615e.o +obj-$(CONFIG_MT7663_USB_SDIO_COMMON) += mt7663-usb-sdio-common.o +obj-$(CONFIG_MT7663U) += mt7663u.o +obj-$(CONFIG_MT7663S) += mt7663s.o + +CFLAGS_trace.o := -I$(src) + +mt7615-common-y := main.o init.o mcu.o eeprom.o mac.o \ + debugfs.o trace.o +mt7615-common-$(CONFIG_NL80211_TESTMODE) += testmode.o + +mt7615e-y := pci.o pci_init.o dma.o pci_mac.o mmio.o +mt7615e-$(CONFIG_MT7622_WMAC) += soc.o + +mt7663-usb-sdio-common-y := usb_sdio.o +mt7663u-y := usb.o usb_mcu.o +mt7663s-y := sdio.o sdio_mcu.o diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c new file mode 100644 index 000000000..c26b45a09 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c @@ -0,0 +1,611 @@ +// SPDX-License-Identifier: ISC + +#include "mt7615.h" + +static int +mt7615_reg_set(void *data, u64 val) +{ + struct mt7615_dev *dev = data; + + mt7615_mutex_acquire(dev); + mt76_wr(dev, dev->mt76.debugfs_reg, val); + mt7615_mutex_release(dev); + + return 0; +} + +static int +mt7615_reg_get(void *data, u64 *val) +{ + struct mt7615_dev *dev = data; + + mt7615_mutex_acquire(dev); + *val = mt76_rr(dev, dev->mt76.debugfs_reg); + mt7615_mutex_release(dev); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt7615_reg_get, mt7615_reg_set, + "0x%08llx\n"); + +static int +mt7615_radar_pattern_set(void *data, u64 val) +{ + struct mt7615_dev *dev = data; + int err; + + if (!mt7615_wait_for_mcu_init(dev)) + return 0; + + mt7615_mutex_acquire(dev); + err = mt7615_mcu_rdd_send_pattern(dev); + mt7615_mutex_release(dev); + + return err; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_pattern, NULL, + mt7615_radar_pattern_set, "%lld\n"); + +static int mt7615_config(void *data, u64 val) +{ + struct mt7615_dev *dev = data; + int ret; + + mt7615_mutex_acquire(dev); + ret = mt76_connac_mcu_chip_config(&dev->mt76); + mt7615_mutex_release(dev); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_config, NULL, mt7615_config, "%lld\n"); + +static int +mt7615_scs_set(void *data, u64 val) +{ + struct mt7615_dev *dev = data; + struct mt7615_phy *ext_phy; + + if (!mt7615_wait_for_mcu_init(dev)) + return 0; + + mt7615_mac_set_scs(&dev->phy, val); + ext_phy = mt7615_ext_phy(dev); + if (ext_phy) + mt7615_mac_set_scs(ext_phy, val); + + return 0; +} + +static int +mt7615_scs_get(void *data, u64 *val) +{ + struct mt7615_dev *dev = data; + + *val = dev->phy.scs_en; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_scs, mt7615_scs_get, + mt7615_scs_set, "%lld\n"); + +static int +mt7615_pm_set(void *data, u64 val) +{ + struct mt7615_dev *dev = data; + struct mt76_connac_pm *pm = &dev->pm; + int ret = 0; + + if (!mt7615_wait_for_mcu_init(dev)) + return 0; + + if (!mt7615_firmware_offload(dev) || mt76_is_usb(&dev->mt76)) + return -EOPNOTSUPP; + + mutex_lock(&dev->mt76.mutex); + + if (val == pm->enable) + goto out; + + if (dev->phy.n_beacon_vif) { + ret = -EBUSY; + goto out; + } + + if (!pm->enable) { + pm->stats.last_wake_event = jiffies; + pm->stats.last_doze_event = jiffies; + } + /* make sure the chip is awake here and ps_work is scheduled + * just at end of the this routine. + */ + pm->enable = false; + mt76_connac_pm_wake(&dev->mphy, pm); + + pm->enable = val; + mt76_connac_power_save_sched(&dev->mphy, pm); +out: + mutex_unlock(&dev->mt76.mutex); + + return ret; +} + +static int +mt7615_pm_get(void *data, u64 *val) +{ + struct mt7615_dev *dev = data; + + *val = dev->pm.enable; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_pm, mt7615_pm_get, mt7615_pm_set, "%lld\n"); + +static int +mt7615_pm_stats(struct seq_file *s, void *data) +{ + struct mt7615_dev *dev = dev_get_drvdata(s->private); + struct mt76_connac_pm *pm = &dev->pm; + unsigned long awake_time = pm->stats.awake_time; + unsigned long doze_time = pm->stats.doze_time; + + if (!test_bit(MT76_STATE_PM, &dev->mphy.state)) + awake_time += jiffies - pm->stats.last_wake_event; + else + doze_time += jiffies - pm->stats.last_doze_event; + + seq_printf(s, "awake time: %14u\ndoze time: %15u\n", + jiffies_to_msecs(awake_time), + jiffies_to_msecs(doze_time)); + + return 0; +} + +static int +mt7615_pm_idle_timeout_set(void *data, u64 val) +{ + struct mt7615_dev *dev = data; + + dev->pm.idle_timeout = msecs_to_jiffies(val); + + return 0; +} + +static int +mt7615_pm_idle_timeout_get(void *data, u64 *val) +{ + struct mt7615_dev *dev = data; + + *val = jiffies_to_msecs(dev->pm.idle_timeout); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_pm_idle_timeout, mt7615_pm_idle_timeout_get, + mt7615_pm_idle_timeout_set, "%lld\n"); + +static int +mt7615_dbdc_set(void *data, u64 val) +{ + struct mt7615_dev *dev = data; + + if (!mt7615_wait_for_mcu_init(dev)) + return 0; + + if (val) + mt7615_register_ext_phy(dev); + else + mt7615_unregister_ext_phy(dev); + + return 0; +} + +static int +mt7615_dbdc_get(void *data, u64 *val) +{ + struct mt7615_dev *dev = data; + + *val = !!mt7615_ext_phy(dev); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_dbdc, mt7615_dbdc_get, + mt7615_dbdc_set, "%lld\n"); + +static int +mt7615_fw_debug_set(void *data, u64 val) +{ + struct mt7615_dev *dev = data; + + if (!mt7615_wait_for_mcu_init(dev)) + return 0; + + dev->fw_debug = val; + + mt7615_mutex_acquire(dev); + mt7615_mcu_fw_log_2_host(dev, dev->fw_debug ? 2 : 0); + mt7615_mutex_release(dev); + + return 0; +} + +static int +mt7615_fw_debug_get(void *data, u64 *val) +{ + struct mt7615_dev *dev = data; + + *val = dev->fw_debug; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug, mt7615_fw_debug_get, + mt7615_fw_debug_set, "%lld\n"); + +static int +mt7615_reset_test_set(void *data, u64 val) +{ + struct mt7615_dev *dev = data; + struct sk_buff *skb; + + if (!mt7615_wait_for_mcu_init(dev)) + return 0; + + skb = alloc_skb(1, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + skb_put(skb, 1); + + mt7615_mutex_acquire(dev); + mt76_tx_queue_skb_raw(dev, dev->mphy.q_tx[0], skb, 0); + mt7615_mutex_release(dev); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_reset_test, NULL, + mt7615_reset_test_set, "%lld\n"); + +static void +mt7615_ampdu_stat_read_phy(struct mt7615_phy *phy, + struct seq_file *file) +{ + struct mt7615_dev *dev = file->private; + u32 reg = is_mt7663(&dev->mt76) ? MT_MIB_ARNG(0) : MT_AGG_ASRCR0; + bool ext_phy = phy != &dev->phy; + int bound[7], i, range; + + if (!phy) + return; + + range = mt76_rr(dev, reg); + for (i = 0; i < 4; i++) + bound[i] = MT_AGG_ASRCR_RANGE(range, i) + 1; + + range = mt76_rr(dev, reg + 4); + for (i = 0; i < 3; i++) + bound[i + 4] = MT_AGG_ASRCR_RANGE(range, i) + 1; + + seq_printf(file, "\nPhy %d\n", ext_phy); + + seq_printf(file, "Length: %8d | ", bound[0]); + for (i = 0; i < ARRAY_SIZE(bound) - 1; i++) + seq_printf(file, "%3d -%3d | ", + bound[i], bound[i + 1]); + seq_puts(file, "\nCount: "); + + range = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0; + for (i = 0; i < ARRAY_SIZE(bound); i++) + seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i + range]); + seq_puts(file, "\n"); + + seq_printf(file, "BA miss count: %d\n", phy->mib.ba_miss_cnt); + seq_printf(file, "PER: %ld.%1ld%%\n", + phy->mib.aggr_per / 10, phy->mib.aggr_per % 10); +} + +static int +mt7615_ampdu_stat_show(struct seq_file *file, void *data) +{ + struct mt7615_dev *dev = file->private; + + mt7615_mutex_acquire(dev); + + mt7615_ampdu_stat_read_phy(&dev->phy, file); + mt7615_ampdu_stat_read_phy(mt7615_ext_phy(dev), file); + + mt7615_mutex_release(dev); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mt7615_ampdu_stat); + +static void +mt7615_radio_read_phy(struct mt7615_phy *phy, struct seq_file *s) +{ + struct mt7615_dev *dev = dev_get_drvdata(s->private); + bool ext_phy = phy != &dev->phy; + + if (!phy) + return; + + seq_printf(s, "Radio %d sensitivity: ofdm=%d cck=%d\n", ext_phy, + phy->ofdm_sensitivity, phy->cck_sensitivity); + seq_printf(s, "Radio %d false CCA: ofdm=%d cck=%d\n", ext_phy, + phy->false_cca_ofdm, phy->false_cca_cck); +} + +static int +mt7615_radio_read(struct seq_file *s, void *data) +{ + struct mt7615_dev *dev = dev_get_drvdata(s->private); + + mt7615_radio_read_phy(&dev->phy, s); + mt7615_radio_read_phy(mt7615_ext_phy(dev), s); + + return 0; +} + +static int +mt7615_queues_acq(struct seq_file *s, void *data) +{ + struct mt7615_dev *dev = dev_get_drvdata(s->private); + int i; + + mt7615_mutex_acquire(dev); + + for (i = 0; i < 16; i++) { + int j, wmm_idx = i % MT7615_MAX_WMM_SETS; + int acs = i / MT7615_MAX_WMM_SETS; + u32 ctrl, val, qlen = 0; + + if (wmm_idx == 3 && is_mt7663(&dev->mt76)) + continue; + + val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, wmm_idx)); + ctrl = BIT(31) | BIT(15) | (acs << 8); + + for (j = 0; j < 32; j++) { + if (val & BIT(j)) + continue; + + mt76_wr(dev, MT_PLE_FL_Q0_CTRL, + ctrl | (j + (wmm_idx << 5))); + qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL, + GENMASK(11, 0)); + } + seq_printf(s, "AC%d%d: queued=%d\n", wmm_idx, acs, qlen); + } + + mt7615_mutex_release(dev); + + return 0; +} + +static int +mt7615_queues_read(struct seq_file *s, void *data) +{ + struct mt7615_dev *dev = dev_get_drvdata(s->private); + struct { + struct mt76_queue *q; + char *queue; + } queue_map[] = { + { dev->mphy.q_tx[MT_TXQ_BE], "PDMA0" }, + { dev->mt76.q_mcu[MT_MCUQ_WM], "MCUQ" }, + { dev->mt76.q_mcu[MT_MCUQ_FWDL], "MCUFWQ" }, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(queue_map); i++) { + struct mt76_queue *q = queue_map[i].q; + + seq_printf(s, + "%s: queued=%d head=%d tail=%d\n", + queue_map[i].queue, q->queued, q->head, + q->tail); + } + + return 0; +} + +static int +mt7615_rf_reg_set(void *data, u64 val) +{ + struct mt7615_dev *dev = data; + + mt7615_rf_wr(dev, dev->debugfs_rf_wf, dev->debugfs_rf_reg, val); + + return 0; +} + +static int +mt7615_rf_reg_get(void *data, u64 *val) +{ + struct mt7615_dev *dev = data; + + *val = mt7615_rf_rr(dev, dev->debugfs_rf_wf, dev->debugfs_rf_reg); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_rf_reg, mt7615_rf_reg_get, mt7615_rf_reg_set, + "0x%08llx\n"); + +static ssize_t +mt7615_ext_mac_addr_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct mt7615_dev *dev = file->private_data; + u32 len = 32 * ((ETH_ALEN * 3) + 4) + 1; + u8 addr[ETH_ALEN]; + char *buf; + int ofs = 0; + int i; + + buf = kzalloc(len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (i = 0; i < 32; i++) { + if (!(dev->muar_mask & BIT(i))) + continue; + + mt76_wr(dev, MT_WF_RMAC_MAR1, + FIELD_PREP(MT_WF_RMAC_MAR1_IDX, i * 2) | + MT_WF_RMAC_MAR1_START); + put_unaligned_le32(mt76_rr(dev, MT_WF_RMAC_MAR0), addr); + put_unaligned_le16((mt76_rr(dev, MT_WF_RMAC_MAR1) & + MT_WF_RMAC_MAR1_ADDR), addr + 4); + ofs += snprintf(buf + ofs, len - ofs, "%d=%pM\n", i, addr); + } + + ofs = simple_read_from_buffer(userbuf, count, ppos, buf, ofs); + + kfree(buf); + return ofs; +} + +static ssize_t +mt7615_ext_mac_addr_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct mt7615_dev *dev = file->private_data; + unsigned long idx = 0; + u8 addr[ETH_ALEN]; + char buf[32]; + char *p; + + if (count > sizeof(buf)) + return -EINVAL; + + if (copy_from_user(buf, userbuf, count)) + return -EFAULT; + + buf[sizeof(buf) - 1] = '\0'; + + p = strchr(buf, '='); + if (p) { + *p = 0; + p++; + + if (kstrtoul(buf, 0, &idx) || idx > 31) + return -EINVAL; + } else { + idx = 0; + p = buf; + } + + if (!mac_pton(p, addr)) + return -EINVAL; + + if (is_valid_ether_addr(addr)) { + dev->muar_mask |= BIT(idx); + } else { + memset(addr, 0, sizeof(addr)); + dev->muar_mask &= ~BIT(idx); + } + + mt76_rmw_field(dev, MT_WF_RMAC_MORE(0), MT_WF_RMAC_MORE_MUAR_MODE, 1); + mt76_wr(dev, MT_WF_RMAC_MAR0, get_unaligned_le32(addr)); + mt76_wr(dev, MT_WF_RMAC_MAR1, + get_unaligned_le16(addr + 4) | + FIELD_PREP(MT_WF_RMAC_MAR1_IDX, idx * 2) | + MT_WF_RMAC_MAR1_START | + MT_WF_RMAC_MAR1_WRITE); + + mt76_rmw_field(dev, MT_WF_RMAC_MORE(0), MT_WF_RMAC_MORE_MUAR_MODE, !!dev->muar_mask); + + return count; +} + +static const struct file_operations fops_ext_mac_addr = { + .open = simple_open, + .llseek = generic_file_llseek, + .read = mt7615_ext_mac_addr_read, + .write = mt7615_ext_mac_addr_write, + .owner = THIS_MODULE, +}; + +static int +mt7663s_sched_quota_read(struct seq_file *s, void *data) +{ + struct mt7615_dev *dev = dev_get_drvdata(s->private); + struct mt76_sdio *sdio = &dev->mt76.sdio; + + seq_printf(s, "pse_data_quota\t%d\n", sdio->sched.pse_data_quota); + seq_printf(s, "ple_data_quota\t%d\n", sdio->sched.ple_data_quota); + seq_printf(s, "pse_mcu_quota\t%d\n", sdio->sched.pse_mcu_quota); + seq_printf(s, "sched_deficit\t%d\n", sdio->sched.deficit); + + return 0; +} + +int mt7615_init_debugfs(struct mt7615_dev *dev) +{ + struct dentry *dir; + + dir = mt76_register_debugfs_fops(&dev->mphy, &fops_regval); + if (!dir) + return -ENOMEM; + + if (is_mt7615(&dev->mt76)) + debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", dir, + mt7615_queues_read); + else + debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", dir, + mt76_queues_read); + debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir, + mt7615_queues_acq); + debugfs_create_file("ampdu_stat", 0400, dir, dev, &mt7615_ampdu_stat_fops); + debugfs_create_file("scs", 0600, dir, dev, &fops_scs); + debugfs_create_file("dbdc", 0600, dir, dev, &fops_dbdc); + debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug); + debugfs_create_file("runtime-pm", 0600, dir, dev, &fops_pm); + debugfs_create_file("idle-timeout", 0600, dir, dev, + &fops_pm_idle_timeout); + debugfs_create_devm_seqfile(dev->mt76.dev, "runtime_pm_stats", dir, + mt7615_pm_stats); + debugfs_create_devm_seqfile(dev->mt76.dev, "radio", dir, + mt7615_radio_read); + + if (is_mt7615(&dev->mt76)) { + debugfs_create_u32("dfs_hw_pattern", 0400, dir, + &dev->hw_pattern); + /* test pattern knobs */ + debugfs_create_u8("pattern_len", 0600, dir, + &dev->radar_pattern.n_pulses); + debugfs_create_u32("pulse_period", 0600, dir, + &dev->radar_pattern.period); + debugfs_create_u16("pulse_width", 0600, dir, + &dev->radar_pattern.width); + debugfs_create_u16("pulse_power", 0600, dir, + &dev->radar_pattern.power); + debugfs_create_file("radar_trigger", 0200, dir, dev, + &fops_radar_pattern); + } + + debugfs_create_file("reset_test", 0200, dir, dev, + &fops_reset_test); + debugfs_create_file("ext_mac_addr", 0600, dir, dev, &fops_ext_mac_addr); + + debugfs_create_u32("rf_wfidx", 0600, dir, &dev->debugfs_rf_wf); + debugfs_create_u32("rf_regidx", 0600, dir, &dev->debugfs_rf_reg); + debugfs_create_file_unsafe("rf_regval", 0600, dir, dev, + &fops_rf_reg); + if (is_mt7663(&dev->mt76)) + debugfs_create_file("chip_config", 0600, dir, dev, + &fops_config); + if (mt76_is_sdio(&dev->mt76)) + debugfs_create_devm_seqfile(dev->mt76.dev, "sched-quota", dir, + mt7663s_sched_quota_read); + + return 0; +} +EXPORT_SYMBOL_GPL(mt7615_init_debugfs); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c new file mode 100644 index 000000000..f1914431f --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c @@ -0,0 +1,315 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2019 MediaTek Inc. + * + * Author: Ryder Lee + * Roy Luo + * Lorenzo Bianconi + * Felix Fietkau + */ + +#include "mt7615.h" +#include "../dma.h" +#include "mac.h" + +static int +mt7622_init_tx_queues_multi(struct mt7615_dev *dev) +{ + static const u8 wmm_queue_map[] = { + [IEEE80211_AC_BK] = MT7622_TXQ_AC0, + [IEEE80211_AC_BE] = MT7622_TXQ_AC1, + [IEEE80211_AC_VI] = MT7622_TXQ_AC2, + [IEEE80211_AC_VO] = MT7622_TXQ_AC3, + }; + int ret; + int i; + + for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) { + ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i], + MT7615_TX_RING_SIZE / 2, + MT_TX_RING_BASE, 0); + if (ret) + return ret; + } + + ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT7622_TXQ_MGMT, + MT7615_TX_MGMT_RING_SIZE, + MT_TX_RING_BASE, 0); + if (ret) + return ret; + + return mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7622_TXQ_MCU, + MT7615_TX_MCU_RING_SIZE, MT_TX_RING_BASE); +} + +static int +mt7615_init_tx_queues(struct mt7615_dev *dev) +{ + int ret; + + ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, MT7615_TXQ_FWDL, + MT7615_TX_FWDL_RING_SIZE, MT_TX_RING_BASE); + if (ret) + return ret; + + if (!is_mt7615(&dev->mt76)) + return mt7622_init_tx_queues_multi(dev); + + ret = mt76_connac_init_tx_queues(&dev->mphy, 0, MT7615_TX_RING_SIZE, + MT_TX_RING_BASE, 0); + if (ret) + return ret; + + return mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7615_TXQ_MCU, + MT7615_TX_MCU_RING_SIZE, MT_TX_RING_BASE); +} + +static int mt7615_poll_tx(struct napi_struct *napi, int budget) +{ + struct mt7615_dev *dev; + + dev = container_of(napi, struct mt7615_dev, mt76.tx_napi); + if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { + napi_complete(napi); + queue_work(dev->mt76.wq, &dev->pm.wake_work); + return 0; + } + + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false); + if (napi_complete(napi)) + mt7615_irq_enable(dev, mt7615_tx_mcu_int_mask(dev)); + + mt76_connac_pm_unref(&dev->mphy, &dev->pm); + + return 0; +} + +static int mt7615_poll_rx(struct napi_struct *napi, int budget) +{ + struct mt7615_dev *dev; + int done; + + dev = container_of(napi->dev, struct mt7615_dev, mt76.napi_dev); + + if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { + napi_complete(napi); + queue_work(dev->mt76.wq, &dev->pm.wake_work); + return 0; + } + done = mt76_dma_rx_poll(napi, budget); + mt76_connac_pm_unref(&dev->mphy, &dev->pm); + + return done; +} + +int mt7615_wait_pdma_busy(struct mt7615_dev *dev) +{ + struct mt76_dev *mdev = &dev->mt76; + + if (!is_mt7663(mdev)) { + u32 mask = MT_PDMA_TX_BUSY | MT_PDMA_RX_BUSY; + u32 reg = mt7615_reg_map(dev, MT_PDMA_BUSY); + + if (!mt76_poll_msec(dev, reg, mask, 0, 1000)) { + dev_err(mdev->dev, "PDMA engine busy\n"); + return -EIO; + } + + return 0; + } + + if (!mt76_poll_msec(dev, MT_PDMA_BUSY_STATUS, + MT_PDMA_TX_IDX_BUSY, 0, 1000)) { + dev_err(mdev->dev, "PDMA engine tx busy\n"); + return -EIO; + } + + if (!mt76_poll_msec(dev, MT_PSE_PG_INFO, + MT_PSE_SRC_CNT, 0, 1000)) { + dev_err(mdev->dev, "PSE engine busy\n"); + return -EIO; + } + + if (!mt76_poll_msec(dev, MT_PDMA_BUSY_STATUS, + MT_PDMA_BUSY_IDX, 0, 1000)) { + dev_err(mdev->dev, "PDMA engine busy\n"); + return -EIO; + } + + return 0; +} + +static void mt7622_dma_sched_init(struct mt7615_dev *dev) +{ + u32 reg = mt7615_reg_map(dev, MT_DMASHDL_BASE); + int i; + + mt76_rmw(dev, reg + MT_DMASHDL_PKT_MAX_SIZE, + MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE, + FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) | + FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 8)); + + for (i = 0; i <= 5; i++) + mt76_wr(dev, reg + MT_DMASHDL_GROUP_QUOTA(i), + FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x10) | + FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x800)); + + mt76_wr(dev, reg + MT_DMASHDL_Q_MAP(0), 0x42104210); + mt76_wr(dev, reg + MT_DMASHDL_Q_MAP(1), 0x42104210); + mt76_wr(dev, reg + MT_DMASHDL_Q_MAP(2), 0x5); + mt76_wr(dev, reg + MT_DMASHDL_Q_MAP(3), 0); + + mt76_wr(dev, reg + MT_DMASHDL_SCHED_SET0, 0x6012345f); + mt76_wr(dev, reg + MT_DMASHDL_SCHED_SET1, 0xedcba987); +} + +static void mt7663_dma_sched_init(struct mt7615_dev *dev) +{ + int i; + + mt76_rmw(dev, MT_DMA_SHDL(MT_DMASHDL_PKT_MAX_SIZE), + MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE, + FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) | + FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 8)); + + /* enable refill control group 0, 1, 2, 4, 5 */ + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_REFILL), 0xffc80000); + /* enable group 0, 1, 2, 4, 5, 15 */ + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_OPTIONAL), 0x70068037); + + /* each group min quota must larger then PLE_PKT_MAX_SIZE_NUM */ + for (i = 0; i < 5; i++) + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(i)), + FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x40) | + FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x800)); + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(5)), + FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x40) | + FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x40)); + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(15)), + FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x20) | + FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x20)); + + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(0)), 0x42104210); + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(1)), 0x42104210); + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(2)), 0x00050005); + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(3)), 0); + /* ALTX0 and ALTX1 QID mapping to group 5 */ + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET0), 0x6012345f); + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET1), 0xedcba987); +} + +void mt7615_dma_start(struct mt7615_dev *dev) +{ + /* start dma engine */ + mt76_set(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_TX_DMA_EN | + MT_WPDMA_GLO_CFG_RX_DMA_EN | + MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); + + if (is_mt7622(&dev->mt76)) + mt7622_dma_sched_init(dev); + + if (is_mt7663(&dev->mt76)) { + mt7663_dma_sched_init(dev); + + mt76_wr(dev, MT_MCU2HOST_INT_ENABLE, MT7663_MCU_CMD_ERROR_MASK); + } + +} + +int mt7615_dma_init(struct mt7615_dev *dev) +{ + int rx_ring_size = MT7615_RX_RING_SIZE; + u32 mask; + int ret; + + mt76_dma_attach(&dev->mt76); + + mt76_wr(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE | + MT_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN | + MT_WPDMA_GLO_CFG_OMIT_TX_INFO); + + mt76_rmw_field(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT0, 0x1); + + mt76_rmw_field(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT21, 0x1); + + mt76_rmw_field(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 0x3); + + mt76_rmw_field(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_MULTI_DMA_EN, 0x3); + + if (is_mt7615(&dev->mt76)) { + mt76_set(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY); + + mt76_wr(dev, MT_WPDMA_GLO_CFG1, 0x1); + mt76_wr(dev, MT_WPDMA_TX_PRE_CFG, 0xf0000); + mt76_wr(dev, MT_WPDMA_RX_PRE_CFG, 0xf7f0000); + mt76_wr(dev, MT_WPDMA_ABT_CFG, 0x4000026); + mt76_wr(dev, MT_WPDMA_ABT_CFG1, 0x18811881); + mt76_set(dev, 0x7158, BIT(16)); + mt76_clear(dev, 0x7000, BIT(23)); + } + + mt76_wr(dev, MT_WPDMA_RST_IDX, ~0); + + ret = mt7615_init_tx_queues(dev); + if (ret) + return ret; + + /* init rx queues */ + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1, + MT7615_RX_MCU_RING_SIZE, MT_RX_BUF_SIZE, + MT_RX_RING_BASE); + if (ret) + return ret; + + if (!is_mt7615(&dev->mt76)) + rx_ring_size /= 2; + + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0, + rx_ring_size, MT_RX_BUF_SIZE, MT_RX_RING_BASE); + if (ret) + return ret; + + mt76_wr(dev, MT_DELAY_INT_CFG, 0); + + ret = mt76_init_queues(dev, mt7615_poll_rx); + if (ret < 0) + return ret; + + netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, + mt7615_poll_tx); + napi_enable(&dev->mt76.tx_napi); + + mt76_poll(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_TX_DMA_BUSY | + MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 1000); + + /* enable interrupts for TX/RX rings */ + + mask = MT_INT_RX_DONE_ALL | mt7615_tx_mcu_int_mask(dev); + if (is_mt7663(&dev->mt76)) + mask |= MT7663_INT_MCU_CMD; + else + mask |= MT_INT_MCU_CMD; + + mt7615_irq_enable(dev, mask); + + mt7615_dma_start(dev); + + return 0; +} + +void mt7615_dma_cleanup(struct mt7615_dev *dev) +{ + mt76_clear(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_TX_DMA_EN | + MT_WPDMA_GLO_CFG_RX_DMA_EN); + mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_SW_RESET); + + mt76_dma_cleanup(&dev->mt76); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c new file mode 100644 index 000000000..2092aa373 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c @@ -0,0 +1,353 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2019 MediaTek Inc. + * + * Author: Ryder Lee + * Felix Fietkau + */ + +#include +#include "mt7615.h" +#include "eeprom.h" + +static int mt7615_efuse_read(struct mt7615_dev *dev, u32 base, + u16 addr, u8 *data) +{ + u32 val; + int i; + + val = mt76_rr(dev, base + MT_EFUSE_CTRL); + val &= ~(MT_EFUSE_CTRL_AIN | MT_EFUSE_CTRL_MODE); + val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf); + val |= MT_EFUSE_CTRL_KICK; + mt76_wr(dev, base + MT_EFUSE_CTRL, val); + + if (!mt76_poll(dev, base + MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000)) + return -ETIMEDOUT; + + udelay(2); + + val = mt76_rr(dev, base + MT_EFUSE_CTRL); + if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT || + WARN_ON_ONCE(!(val & MT_EFUSE_CTRL_VALID))) { + memset(data, 0x0, 16); + return 0; + } + + for (i = 0; i < 4; i++) { + val = mt76_rr(dev, base + MT_EFUSE_RDATA(i)); + put_unaligned_le32(val, data + 4 * i); + } + + return 0; +} + +static int mt7615_efuse_init(struct mt7615_dev *dev, u32 base) +{ + int i, len = MT7615_EEPROM_SIZE; + void *buf; + u32 val; + + val = mt76_rr(dev, base + MT_EFUSE_BASE_CTRL); + if (val & MT_EFUSE_BASE_CTRL_EMPTY) + return 0; + + dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, len, GFP_KERNEL); + dev->mt76.otp.size = len; + if (!dev->mt76.otp.data) + return -ENOMEM; + + buf = dev->mt76.otp.data; + for (i = 0; i + 16 <= len; i += 16) { + int ret; + + ret = mt7615_efuse_read(dev, base, i, buf + i); + if (ret) + return ret; + } + + return 0; +} + +static int mt7615_eeprom_load(struct mt7615_dev *dev, u32 addr) +{ + int ret; + + ret = mt76_eeprom_init(&dev->mt76, MT7615_EEPROM_FULL_SIZE); + if (ret < 0) + return ret; + + return mt7615_efuse_init(dev, addr); +} + +static int mt7615_check_eeprom(struct mt76_dev *dev) +{ + u16 val = get_unaligned_le16(dev->eeprom.data); + + switch (val) { + case 0x7615: + case 0x7622: + case 0x7663: + return 0; + default: + return -EINVAL; + } +} + +static void +mt7615_eeprom_parse_hw_band_cap(struct mt7615_dev *dev) +{ + u8 val, *eeprom = dev->mt76.eeprom.data; + + if (is_mt7663(&dev->mt76)) { + /* dual band */ + dev->mphy.cap.has_2ghz = true; + dev->mphy.cap.has_5ghz = true; + return; + } + + if (is_mt7622(&dev->mt76)) { + /* 2GHz only */ + dev->mphy.cap.has_2ghz = true; + return; + } + + if (is_mt7611(&dev->mt76)) { + /* 5GHz only */ + dev->mphy.cap.has_5ghz = true; + return; + } + + val = FIELD_GET(MT_EE_NIC_WIFI_CONF_BAND_SEL, + eeprom[MT_EE_WIFI_CONF]); + switch (val) { + case MT_EE_5GHZ: + dev->mphy.cap.has_5ghz = true; + break; + case MT_EE_DBDC: + dev->dbdc_support = true; + fallthrough; + case MT_EE_2GHZ: + dev->mphy.cap.has_2ghz = true; + break; + default: + dev->mphy.cap.has_2ghz = true; + dev->mphy.cap.has_5ghz = true; + break; + } +} + +static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev) +{ + u8 *eeprom = dev->mt76.eeprom.data; + u8 tx_mask, max_nss; + + mt7615_eeprom_parse_hw_band_cap(dev); + + if (is_mt7663(&dev->mt76)) { + max_nss = 2; + tx_mask = FIELD_GET(MT_EE_HW_CONF1_TX_MASK, + eeprom[MT7663_EE_HW_CONF1]); + } else { + u32 val; + + /* read tx-rx mask from eeprom */ + val = mt76_rr(dev, MT_TOP_STRAP_STA); + max_nss = val & MT_TOP_3NSS ? 3 : 4; + + tx_mask = FIELD_GET(MT_EE_NIC_CONF_TX_MASK, + eeprom[MT_EE_NIC_CONF_0]); + } + if (!tx_mask || tx_mask > max_nss) + tx_mask = max_nss; + + dev->chainmask = BIT(tx_mask) - 1; + dev->mphy.antenna_mask = dev->chainmask; + dev->mphy.chainmask = dev->chainmask; +} + +static int mt7663_eeprom_get_target_power_index(struct mt7615_dev *dev, + struct ieee80211_channel *chan, + u8 chain_idx) +{ + int index, group; + + if (chain_idx > 1) + return -EINVAL; + + if (chan->band == NL80211_BAND_2GHZ) + return MT7663_EE_TX0_2G_TARGET_POWER + (chain_idx << 4); + + group = mt7615_get_channel_group(chan->hw_value); + if (chain_idx == 1) + index = MT7663_EE_TX1_5G_G0_TARGET_POWER; + else + index = MT7663_EE_TX0_5G_G0_TARGET_POWER; + + return index + group * 3; +} + +int mt7615_eeprom_get_target_power_index(struct mt7615_dev *dev, + struct ieee80211_channel *chan, + u8 chain_idx) +{ + int index; + + if (is_mt7663(&dev->mt76)) + return mt7663_eeprom_get_target_power_index(dev, chan, + chain_idx); + + if (chain_idx > 3) + return -EINVAL; + + /* TSSI disabled */ + if (mt7615_ext_pa_enabled(dev, chan->band)) { + if (chan->band == NL80211_BAND_2GHZ) + return MT_EE_EXT_PA_2G_TARGET_POWER; + else + return MT_EE_EXT_PA_5G_TARGET_POWER; + } + + /* TSSI enabled */ + if (chan->band == NL80211_BAND_2GHZ) { + index = MT_EE_TX0_2G_TARGET_POWER + chain_idx * 6; + } else { + int group = mt7615_get_channel_group(chan->hw_value); + + switch (chain_idx) { + case 1: + index = MT_EE_TX1_5G_G0_TARGET_POWER; + break; + case 2: + index = MT_EE_TX2_5G_G0_TARGET_POWER; + break; + case 3: + index = MT_EE_TX3_5G_G0_TARGET_POWER; + break; + case 0: + default: + index = MT_EE_TX0_5G_G0_TARGET_POWER; + break; + } + index += 5 * group; + } + + return index; +} + +int mt7615_eeprom_get_power_delta_index(struct mt7615_dev *dev, + enum nl80211_band band) +{ + /* assume the first rate has the highest power offset */ + if (is_mt7663(&dev->mt76)) { + if (band == NL80211_BAND_2GHZ) + return MT_EE_TX0_5G_G0_TARGET_POWER; + else + return MT7663_EE_5G_RATE_POWER; + } + + if (band == NL80211_BAND_2GHZ) + return MT_EE_2G_RATE_POWER; + else + return MT_EE_5G_RATE_POWER; +} + +static void mt7615_apply_cal_free_data(struct mt7615_dev *dev) +{ + static const u16 ical[] = { + 0x53, 0x54, 0x55, 0x56, 0x57, 0x5c, 0x5d, 0x62, 0x63, 0x68, + 0x69, 0x6e, 0x6f, 0x73, 0x74, 0x78, 0x79, 0x82, 0x83, 0x87, + 0x88, 0x8c, 0x8d, 0x91, 0x92, 0x96, 0x97, 0x9b, 0x9c, 0xa0, + 0xa1, 0xaa, 0xab, 0xaf, 0xb0, 0xb4, 0xb5, 0xb9, 0xba, 0xf4, + 0xf7, 0xff, + 0x140, 0x141, 0x145, 0x146, 0x14a, 0x14b, 0x154, 0x155, 0x159, + 0x15a, 0x15e, 0x15f, 0x163, 0x164, 0x168, 0x169, 0x16d, 0x16e, + 0x172, 0x173, 0x17c, 0x17d, 0x181, 0x182, 0x186, 0x187, 0x18b, + 0x18c + }; + static const u16 ical_nocheck[] = { + 0x110, 0x111, 0x112, 0x113, 0x114, 0x115, 0x116, 0x117, 0x118, + 0x1b5, 0x1b6, 0x1b7, 0x3ac, 0x3ad, 0x3ae, 0x3af, 0x3b0, 0x3b1, + 0x3b2 + }; + u8 *eeprom = dev->mt76.eeprom.data; + u8 *otp = dev->mt76.otp.data; + int i; + + if (!otp) + return; + + for (i = 0; i < ARRAY_SIZE(ical); i++) + if (!otp[ical[i]]) + return; + + for (i = 0; i < ARRAY_SIZE(ical); i++) + eeprom[ical[i]] = otp[ical[i]]; + + for (i = 0; i < ARRAY_SIZE(ical_nocheck); i++) + eeprom[ical_nocheck[i]] = otp[ical_nocheck[i]]; +} + +static void mt7622_apply_cal_free_data(struct mt7615_dev *dev) +{ + static const u16 ical[] = { + 0x53, 0x54, 0x55, 0x56, 0xf4, 0xf7, 0x144, 0x156, 0x15b + }; + u8 *eeprom = dev->mt76.eeprom.data; + u8 *otp = dev->mt76.otp.data; + int i; + + if (!otp) + return; + + for (i = 0; i < ARRAY_SIZE(ical); i++) { + if (!otp[ical[i]]) + continue; + + eeprom[ical[i]] = otp[ical[i]]; + } +} + +static void mt7615_cal_free_data(struct mt7615_dev *dev) +{ + struct device_node *np = dev->mt76.dev->of_node; + + if (!np || !of_property_read_bool(np, "mediatek,eeprom-merge-otp")) + return; + + switch (mt76_chip(&dev->mt76)) { + case 0x7622: + mt7622_apply_cal_free_data(dev); + break; + case 0x7615: + case 0x7611: + mt7615_apply_cal_free_data(dev); + break; + } +} + +int mt7615_eeprom_init(struct mt7615_dev *dev, u32 addr) +{ + int ret; + + ret = mt7615_eeprom_load(dev, addr); + if (ret < 0) + return ret; + + ret = mt7615_check_eeprom(&dev->mt76); + if (ret && dev->mt76.otp.data) { + memcpy(dev->mt76.eeprom.data, dev->mt76.otp.data, + MT7615_EEPROM_SIZE); + } else { + dev->flash_eeprom = true; + mt7615_cal_free_data(dev); + } + + mt7615_eeprom_parse_hw_cap(dev); + memcpy(dev->mphy.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR, + ETH_ALEN); + + mt76_eeprom_override(&dev->mphy); + + return 0; +} +EXPORT_SYMBOL_GPL(mt7615_eeprom_init); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h new file mode 100644 index 000000000..a024dee10 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: ISC */ +/* Copyright (C) 2019 MediaTek Inc. */ + +#ifndef __MT7615_EEPROM_H +#define __MT7615_EEPROM_H + +#include "mt7615.h" + + +#define MT7615_EEPROM_DCOC_OFFSET MT7615_EEPROM_SIZE +#define MT7615_EEPROM_DCOC_SIZE 256 +#define MT7615_EEPROM_DCOC_COUNT 34 + +#define MT7615_EEPROM_TXDPD_OFFSET (MT7615_EEPROM_SIZE + \ + MT7615_EEPROM_DCOC_COUNT * \ + MT7615_EEPROM_DCOC_SIZE) +#define MT7615_EEPROM_TXDPD_SIZE 216 +#define MT7615_EEPROM_TXDPD_COUNT (44 + 3) + +#define MT7615_EEPROM_FULL_SIZE (MT7615_EEPROM_TXDPD_OFFSET + \ + MT7615_EEPROM_TXDPD_COUNT * \ + MT7615_EEPROM_TXDPD_SIZE) + +enum mt7615_eeprom_field { + MT_EE_CHIP_ID = 0x000, + MT_EE_VERSION = 0x002, + MT_EE_MAC_ADDR = 0x004, + MT_EE_NIC_CONF_0 = 0x034, + MT_EE_NIC_CONF_1 = 0x036, + MT_EE_WIFI_CONF = 0x03e, + MT_EE_CALDATA_FLASH = 0x052, + MT_EE_TX0_2G_TARGET_POWER = 0x058, + MT_EE_TX0_5G_G0_TARGET_POWER = 0x070, + MT7663_EE_5G_RATE_POWER = 0x089, + MT_EE_TX1_5G_G0_TARGET_POWER = 0x098, + MT_EE_2G_RATE_POWER = 0x0be, + MT_EE_5G_RATE_POWER = 0x0d5, + MT7663_EE_TX0_2G_TARGET_POWER = 0x0e3, + MT_EE_EXT_PA_2G_TARGET_POWER = 0x0f2, + MT_EE_EXT_PA_5G_TARGET_POWER = 0x0f3, + MT_EE_TX2_5G_G0_TARGET_POWER = 0x142, + MT_EE_TX3_5G_G0_TARGET_POWER = 0x16a, + MT7663_EE_HW_CONF1 = 0x1b0, + MT7663_EE_TX0_5G_G0_TARGET_POWER = 0x245, + MT7663_EE_TX1_5G_G0_TARGET_POWER = 0x2b5, + + MT7615_EE_MAX = 0x3bf, + MT7622_EE_MAX = 0x3db, + MT7663_EE_MAX = 0x400, +}; + +#define MT_EE_RATE_POWER_MASK GENMASK(5, 0) +#define MT_EE_RATE_POWER_SIGN BIT(6) +#define MT_EE_RATE_POWER_EN BIT(7) + +#define MT_EE_CALDATA_FLASH_TX_DPD BIT(0) +#define MT_EE_CALDATA_FLASH_RX_CAL BIT(1) + +#define MT_EE_NIC_CONF_TX_MASK GENMASK(7, 4) +#define MT_EE_NIC_CONF_RX_MASK GENMASK(3, 0) + +#define MT_EE_HW_CONF1_TX_MASK GENMASK(2, 0) + +#define MT_EE_NIC_CONF_TSSI_2G BIT(5) +#define MT_EE_NIC_CONF_TSSI_5G BIT(6) + +#define MT_EE_NIC_WIFI_CONF_BAND_SEL GENMASK(5, 4) +enum mt7615_eeprom_band { + MT_EE_DUAL_BAND, + MT_EE_5GHZ, + MT_EE_2GHZ, + MT_EE_DBDC, +}; + +enum mt7615_channel_group { + MT_CH_5G_JAPAN, + MT_CH_5G_UNII_1, + MT_CH_5G_UNII_2A, + MT_CH_5G_UNII_2B, + MT_CH_5G_UNII_2E_1, + MT_CH_5G_UNII_2E_2, + MT_CH_5G_UNII_2E_3, + MT_CH_5G_UNII_3, + __MT_CH_MAX +}; + +static inline enum mt7615_channel_group +mt7615_get_channel_group(int channel) +{ + if (channel >= 184 && channel <= 196) + return MT_CH_5G_JAPAN; + if (channel <= 48) + return MT_CH_5G_UNII_1; + if (channel <= 64) + return MT_CH_5G_UNII_2A; + if (channel <= 114) + return MT_CH_5G_UNII_2E_1; + if (channel <= 144) + return MT_CH_5G_UNII_2E_2; + if (channel <= 161) + return MT_CH_5G_UNII_2E_3; + return MT_CH_5G_UNII_3; +} + +static inline bool +mt7615_ext_pa_enabled(struct mt7615_dev *dev, enum nl80211_band band) +{ + u8 *eep = dev->mt76.eeprom.data; + + if (band == NL80211_BAND_5GHZ) + return !(eep[MT_EE_NIC_CONF_1 + 1] & MT_EE_NIC_CONF_TSSI_5G); + else + return !(eep[MT_EE_NIC_CONF_1 + 1] & MT_EE_NIC_CONF_TSSI_2G); +} + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c new file mode 100644 index 000000000..07a1fea94 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c @@ -0,0 +1,561 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2019 MediaTek Inc. + * + * Author: Roy Luo + * Ryder Lee + * Felix Fietkau + * Lorenzo Bianconi + */ + +#include +#include +#include +#include "mt7615.h" +#include "mac.h" +#include "mcu.h" +#include "eeprom.h" + +static ssize_t mt7615_thermal_show_temp(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mt7615_dev *mdev = dev_get_drvdata(dev); + int temperature; + + if (!mt7615_wait_for_mcu_init(mdev)) + return 0; + + mt7615_mutex_acquire(mdev); + temperature = mt7615_mcu_get_temperature(mdev); + mt7615_mutex_release(mdev); + + if (temperature < 0) + return temperature; + + /* display in millidegree celcius */ + return sprintf(buf, "%u\n", temperature * 1000); +} + +static SENSOR_DEVICE_ATTR(temp1_input, 0444, mt7615_thermal_show_temp, + NULL, 0); + +static struct attribute *mt7615_hwmon_attrs[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + NULL, +}; +ATTRIBUTE_GROUPS(mt7615_hwmon); + +int mt7615_thermal_init(struct mt7615_dev *dev) +{ + struct wiphy *wiphy = mt76_hw(dev)->wiphy; + struct device *hwmon; + const char *name; + + if (!IS_REACHABLE(CONFIG_HWMON)) + return 0; + + name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7615_%s", + wiphy_name(wiphy)); + hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, dev, + mt7615_hwmon_groups); + if (IS_ERR(hwmon)) + return PTR_ERR(hwmon); + + return 0; +} +EXPORT_SYMBOL_GPL(mt7615_thermal_init); + +static void +mt7615_phy_init(struct mt7615_dev *dev) +{ + /* disable rf low power beacon mode */ + mt76_set(dev, MT_WF_PHY_WF2_RFCTRL0(0), MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN); + mt76_set(dev, MT_WF_PHY_WF2_RFCTRL0(1), MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN); +} + +static void +mt7615_init_mac_chain(struct mt7615_dev *dev, int chain) +{ + u32 val; + + if (!chain) + val = MT_CFG_CCR_MAC_D0_1X_GC_EN | MT_CFG_CCR_MAC_D0_2X_GC_EN; + else + val = MT_CFG_CCR_MAC_D1_1X_GC_EN | MT_CFG_CCR_MAC_D1_2X_GC_EN; + + /* enable band 0/1 clk */ + mt76_set(dev, MT_CFG_CCR, val); + + mt76_rmw(dev, MT_TMAC_TRCR(chain), + MT_TMAC_TRCR_CCA_SEL | MT_TMAC_TRCR_SEC_CCA_SEL, + FIELD_PREP(MT_TMAC_TRCR_CCA_SEL, 2) | + FIELD_PREP(MT_TMAC_TRCR_SEC_CCA_SEL, 0)); + + mt76_wr(dev, MT_AGG_ACR(chain), + MT_AGG_ACR_PKT_TIME_EN | MT_AGG_ACR_NO_BA_AR_RULE | + FIELD_PREP(MT_AGG_ACR_CFEND_RATE, MT7615_CFEND_RATE_DEFAULT) | + FIELD_PREP(MT_AGG_ACR_BAR_RATE, MT7615_BAR_RATE_DEFAULT)); + + mt76_wr(dev, MT_AGG_ARUCR(chain), + FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 7) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(1), 2) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), 2) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), 2) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), 1) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(5), 1) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), 1) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), 1)); + + mt76_wr(dev, MT_AGG_ARDCR(chain), + FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), MT7615_RATE_RETRY - 1) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(1), MT7615_RATE_RETRY - 1) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), MT7615_RATE_RETRY - 1) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), MT7615_RATE_RETRY - 1) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), MT7615_RATE_RETRY - 1) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(5), MT7615_RATE_RETRY - 1) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), MT7615_RATE_RETRY - 1) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), MT7615_RATE_RETRY - 1)); + + mt76_clear(dev, MT_DMA_RCFR0(chain), MT_DMA_RCFR0_MCU_RX_TDLS); + if (!mt7615_firmware_offload(dev)) { + u32 mask, set; + + mask = MT_DMA_RCFR0_MCU_RX_MGMT | + MT_DMA_RCFR0_MCU_RX_CTL_NON_BAR | + MT_DMA_RCFR0_MCU_RX_CTL_BAR | + MT_DMA_RCFR0_MCU_RX_BYPASS | + MT_DMA_RCFR0_RX_DROPPED_UCAST | + MT_DMA_RCFR0_RX_DROPPED_MCAST; + set = FIELD_PREP(MT_DMA_RCFR0_RX_DROPPED_UCAST, 2) | + FIELD_PREP(MT_DMA_RCFR0_RX_DROPPED_MCAST, 2); + mt76_rmw(dev, MT_DMA_RCFR0(chain), mask, set); + } +} + +static void +mt7615_mac_init(struct mt7615_dev *dev) +{ + int i; + + mt7615_init_mac_chain(dev, 0); + + mt76_rmw_field(dev, MT_TMAC_CTCR0, + MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f); + mt76_rmw_field(dev, MT_TMAC_CTCR0, + MT_TMAC_CTCR0_INS_DDLMT_DENSITY, 0x3); + mt76_rmw(dev, MT_TMAC_CTCR0, + MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN | + MT_TMAC_CTCR0_INS_DDLMT_EN, + MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN | + MT_TMAC_CTCR0_INS_DDLMT_EN); + + mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b, 0); + mt7615_mac_set_scs(&dev->phy, true); + + mt76_rmw(dev, MT_AGG_SCR, MT_AGG_SCR_NLNAV_MID_PTEC_DIS, + MT_AGG_SCR_NLNAV_MID_PTEC_DIS); + + mt76_wr(dev, MT_AGG_ARCR, + FIELD_PREP(MT_AGG_ARCR_RTS_RATE_THR, 2) | + MT_AGG_ARCR_RATE_DOWN_RATIO_EN | + FIELD_PREP(MT_AGG_ARCR_RATE_DOWN_RATIO, 1) | + FIELD_PREP(MT_AGG_ARCR_RATE_UP_EXTRA_TH, 4)); + + for (i = 0; i < MT7615_WTBL_SIZE; i++) + mt7615_mac_wtbl_update(dev, i, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + + mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_EN); + mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_EN); + + mt76_wr(dev, MT_DMA_DCR0, + FIELD_PREP(MT_DMA_DCR0_MAX_RX_LEN, 3072) | + MT_DMA_DCR0_RX_VEC_DROP | MT_DMA_DCR0_DAMSDU_EN | + MT_DMA_DCR0_RX_HDR_TRANS_EN); + /* disable TDLS filtering */ + mt76_clear(dev, MT_WF_PFCR, MT_WF_PFCR_TDLS_EN); + mt76_set(dev, MT_WF_MIB_SCR0, MT_MIB_SCR0_AGG_CNT_RANGE_EN); + if (is_mt7663(&dev->mt76)) { + mt76_wr(dev, MT_WF_AGG(0x160), 0x5c341c02); + mt76_wr(dev, MT_WF_AGG(0x164), 0x70708040); + } else { + mt7615_init_mac_chain(dev, 1); + } + mt7615_mcu_set_rx_hdr_trans_blacklist(dev); +} + +static void +mt7615_check_offload_capability(struct mt7615_dev *dev) +{ + struct ieee80211_hw *hw = mt76_hw(dev); + struct wiphy *wiphy = hw->wiphy; + + if (mt7615_firmware_offload(dev)) { + ieee80211_hw_set(hw, SUPPORTS_PS); + ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); + + wiphy->flags &= ~WIPHY_FLAG_4ADDR_STATION; + wiphy->max_remain_on_channel_duration = 5000; + wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | + NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR | + WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | + NL80211_FEATURE_P2P_GO_CTWIN | + NL80211_FEATURE_P2P_GO_OPPPS; + } else { + dev->ops->hw_scan = NULL; + dev->ops->cancel_hw_scan = NULL; + dev->ops->sched_scan_start = NULL; + dev->ops->sched_scan_stop = NULL; + dev->ops->set_rekey_data = NULL; + dev->ops->remain_on_channel = NULL; + dev->ops->cancel_remain_on_channel = NULL; + + wiphy->max_sched_scan_plan_interval = 0; + wiphy->max_sched_scan_ie_len = 0; + wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; + wiphy->max_sched_scan_ssids = 0; + wiphy->max_match_sets = 0; + wiphy->max_sched_scan_reqs = 0; + } +} + +bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev) +{ + flush_work(&dev->mcu_work); + + return test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); +} +EXPORT_SYMBOL_GPL(mt7615_wait_for_mcu_init); + +static const struct ieee80211_iface_limit if_limits[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_ADHOC) + }, { + .max = MT7615_MAX_INTERFACES, + .types = BIT(NL80211_IFTYPE_AP) | +#ifdef CONFIG_MAC80211_MESH + BIT(NL80211_IFTYPE_MESH_POINT) | +#endif + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO) | + BIT(NL80211_IFTYPE_STATION) + } +}; + +static const struct ieee80211_iface_combination if_comb_radar[] = { + { + .limits = if_limits, + .n_limits = ARRAY_SIZE(if_limits), + .max_interfaces = MT7615_MAX_INTERFACES, + .num_different_channels = 1, + .beacon_int_infra_match = true, + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80) | + BIT(NL80211_CHAN_WIDTH_160) | + BIT(NL80211_CHAN_WIDTH_80P80), + } +}; + +static const struct ieee80211_iface_combination if_comb[] = { + { + .limits = if_limits, + .n_limits = ARRAY_SIZE(if_limits), + .max_interfaces = MT7615_MAX_INTERFACES, + .num_different_channels = 1, + .beacon_int_infra_match = true, + } +}; + +void mt7615_init_txpower(struct mt7615_dev *dev, + struct ieee80211_supported_band *sband) +{ + int i, n_chains = hweight8(dev->mphy.antenna_mask), target_chains; + int delta_idx, delta = mt76_tx_power_nss_delta(n_chains); + u8 *eep = (u8 *)dev->mt76.eeprom.data; + enum nl80211_band band = sband->band; + struct mt76_power_limits limits; + u8 rate_val; + + delta_idx = mt7615_eeprom_get_power_delta_index(dev, band); + rate_val = eep[delta_idx]; + if ((rate_val & ~MT_EE_RATE_POWER_MASK) == + (MT_EE_RATE_POWER_EN | MT_EE_RATE_POWER_SIGN)) + delta += rate_val & MT_EE_RATE_POWER_MASK; + + if (!is_mt7663(&dev->mt76) && mt7615_ext_pa_enabled(dev, band)) + target_chains = 1; + else + target_chains = n_chains; + + for (i = 0; i < sband->n_channels; i++) { + struct ieee80211_channel *chan = &sband->channels[i]; + u8 target_power = 0; + int j; + + for (j = 0; j < target_chains; j++) { + int index; + + index = mt7615_eeprom_get_target_power_index(dev, chan, j); + if (index < 0) + continue; + + target_power = max(target_power, eep[index]); + } + + target_power = mt76_get_rate_power_limits(&dev->mphy, chan, + &limits, + target_power); + target_power += delta; + target_power = DIV_ROUND_UP(target_power, 2); + chan->max_power = min_t(int, chan->max_reg_power, + target_power); + chan->orig_mpwr = target_power; + } +} +EXPORT_SYMBOL_GPL(mt7615_init_txpower); + +void mt7615_init_work(struct mt7615_dev *dev) +{ + mt7615_mcu_set_eeprom(dev); + mt7615_mac_init(dev); + mt7615_phy_init(dev); + mt7615_mcu_del_wtbl_all(dev); + mt7615_check_offload_capability(dev); +} +EXPORT_SYMBOL_GPL(mt7615_init_work); + +static void +mt7615_regd_notifier(struct wiphy *wiphy, + struct regulatory_request *request) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct mt76_phy *mphy = hw->priv; + struct mt7615_phy *phy = mphy->priv; + struct cfg80211_chan_def *chandef = &mphy->chandef; + + memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2)); + dev->mt76.region = request->dfs_region; + + mt7615_init_txpower(dev, &mphy->sband_2g.sband); + mt7615_init_txpower(dev, &mphy->sband_5g.sband); + + mt7615_mutex_acquire(dev); + + if (chandef->chan->flags & IEEE80211_CHAN_RADAR) + mt7615_dfs_init_radar_detector(phy); + + if (mt7615_firmware_offload(phy->dev)) { + mt76_connac_mcu_set_channel_domain(mphy); + mt76_connac_mcu_set_rate_txpower(mphy); + } + + mt7615_mutex_release(dev); +} + +static void +mt7615_init_wiphy(struct ieee80211_hw *hw) +{ + struct mt7615_phy *phy = mt7615_hw_phy(hw); + struct wiphy *wiphy = hw->wiphy; + + hw->queues = 4; + hw->max_rates = 3; + hw->max_report_rates = 7; + hw->max_rate_tries = 11; + hw->netdev_features = NETIF_F_RXCSUM; + + hw->radiotap_timestamp.units_pos = + IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US; + + phy->slottime = 9; + + hw->sta_data_size = sizeof(struct mt7615_sta); + hw->vif_data_size = sizeof(struct mt7615_vif); + + if (is_mt7663(&phy->dev->mt76)) { + wiphy->iface_combinations = if_comb; + wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); + } else { + wiphy->iface_combinations = if_comb_radar; + wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_radar); + } + wiphy->reg_notifier = mt7615_regd_notifier; + + wiphy->max_sched_scan_plan_interval = + MT76_CONNAC_MAX_TIME_SCHED_SCAN_INTERVAL; + wiphy->max_sched_scan_ie_len = IEEE80211_MAX_DATA_LEN; + wiphy->max_scan_ie_len = MT76_CONNAC_SCAN_IE_LEN; + wiphy->max_sched_scan_ssids = MT76_CONNAC_MAX_SCHED_SCAN_SSID; + wiphy->max_match_sets = MT76_CONNAC_MAX_SCAN_MATCH; + wiphy->max_sched_scan_reqs = 1; + wiphy->max_scan_ssids = 4; + + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS); + + ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); + ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN); + ieee80211_hw_set(hw, WANT_MONITOR_VIF); + ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD); + ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); + + if (is_mt7615(&phy->dev->mt76)) + hw->max_tx_fragments = MT_TXP_MAX_BUF_NUM; + else + hw->max_tx_fragments = MT_HW_TXP_MAX_BUF_NUM; + + phy->mt76->sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; + phy->mt76->sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; + phy->mt76->sband_5g.sband.vht_cap.cap |= + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; +} + +static void +mt7615_cap_dbdc_enable(struct mt7615_dev *dev) +{ + dev->mphy.sband_5g.sband.vht_cap.cap &= + ~(IEEE80211_VHT_CAP_SHORT_GI_160 | + IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ); + if (dev->chainmask == 0xf) + dev->mphy.antenna_mask = dev->chainmask >> 2; + else + dev->mphy.antenna_mask = dev->chainmask >> 1; + dev->mphy.chainmask = dev->mphy.antenna_mask; + dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask; + dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask; + mt76_set_stream_caps(&dev->mphy, true); +} + +static void +mt7615_cap_dbdc_disable(struct mt7615_dev *dev) +{ + dev->mphy.sband_5g.sband.vht_cap.cap |= + IEEE80211_VHT_CAP_SHORT_GI_160 | + IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ; + dev->mphy.antenna_mask = dev->chainmask; + dev->mphy.chainmask = dev->chainmask; + dev->mphy.hw->wiphy->available_antennas_rx = dev->chainmask; + dev->mphy.hw->wiphy->available_antennas_tx = dev->chainmask; + mt76_set_stream_caps(&dev->mphy, true); +} + +int mt7615_register_ext_phy(struct mt7615_dev *dev) +{ + struct mt7615_phy *phy = mt7615_ext_phy(dev); + struct mt76_phy *mphy; + int i, ret; + + if (!is_mt7615(&dev->mt76)) + return -EOPNOTSUPP; + + if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) + return -EINVAL; + + if (phy) + return 0; + + mt7615_cap_dbdc_enable(dev); + mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7615_ops, MT_BAND1); + if (!mphy) + return -ENOMEM; + + phy = mphy->priv; + phy->dev = dev; + phy->mt76 = mphy; + mphy->chainmask = dev->chainmask & ~dev->mphy.chainmask; + mphy->antenna_mask = BIT(hweight8(mphy->chainmask)) - 1; + mt7615_init_wiphy(mphy->hw); + + INIT_DELAYED_WORK(&mphy->mac_work, mt7615_mac_work); + INIT_DELAYED_WORK(&phy->scan_work, mt7615_scan_work); + skb_queue_head_init(&phy->scan_event_list); + + INIT_WORK(&phy->roc_work, mt7615_roc_work); + timer_setup(&phy->roc_timer, mt7615_roc_timer, 0); + init_waitqueue_head(&phy->roc_wait); + + mt7615_mac_set_scs(phy, true); + + /* + * Make the secondary PHY MAC address local without overlapping with + * the usual MAC address allocation scheme on multiple virtual interfaces + */ + memcpy(mphy->macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR, + ETH_ALEN); + mphy->macaddr[0] |= 2; + mphy->macaddr[0] ^= BIT(7); + mt76_eeprom_override(mphy); + + /* second phy can only handle 5 GHz */ + mphy->cap.has_5ghz = true; + + /* mt7615 second phy shares the same hw queues with the primary one */ + for (i = 0; i <= MT_TXQ_PSD ; i++) + mphy->q_tx[i] = dev->mphy.q_tx[i]; + + ret = mt76_register_phy(mphy, true, mt76_rates, + ARRAY_SIZE(mt76_rates)); + if (ret) + ieee80211_free_hw(mphy->hw); + + return ret; +} +EXPORT_SYMBOL_GPL(mt7615_register_ext_phy); + +void mt7615_unregister_ext_phy(struct mt7615_dev *dev) +{ + struct mt7615_phy *phy = mt7615_ext_phy(dev); + struct mt76_phy *mphy = dev->mt76.phys[MT_BAND1]; + + if (!phy) + return; + + mt7615_cap_dbdc_disable(dev); + mt76_unregister_phy(mphy); + ieee80211_free_hw(mphy->hw); +} +EXPORT_SYMBOL_GPL(mt7615_unregister_ext_phy); + +void mt7615_init_device(struct mt7615_dev *dev) +{ + struct ieee80211_hw *hw = mt76_hw(dev); + + dev->phy.dev = dev; + dev->phy.mt76 = &dev->mt76.phy; + dev->mt76.phy.priv = &dev->phy; + dev->mt76.tx_worker.fn = mt7615_tx_worker; + + INIT_DELAYED_WORK(&dev->pm.ps_work, mt7615_pm_power_save_work); + INIT_WORK(&dev->pm.wake_work, mt7615_pm_wake_work); + spin_lock_init(&dev->pm.wake.lock); + mutex_init(&dev->pm.mutex); + init_waitqueue_head(&dev->pm.wait); + spin_lock_init(&dev->pm.txq_lock); + INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7615_mac_work); + INIT_DELAYED_WORK(&dev->phy.scan_work, mt7615_scan_work); + INIT_DELAYED_WORK(&dev->coredump.work, mt7615_coredump_work); + skb_queue_head_init(&dev->phy.scan_event_list); + skb_queue_head_init(&dev->coredump.msg_list); + INIT_LIST_HEAD(&dev->sta_poll_list); + spin_lock_init(&dev->sta_poll_lock); + init_waitqueue_head(&dev->reset_wait); + init_waitqueue_head(&dev->phy.roc_wait); + + INIT_WORK(&dev->phy.roc_work, mt7615_roc_work); + timer_setup(&dev->phy.roc_timer, mt7615_roc_timer, 0); + + mt7615_init_wiphy(hw); + dev->pm.idle_timeout = MT7615_PM_TIMEOUT; + dev->pm.stats.last_wake_event = jiffies; + dev->pm.stats.last_doze_event = jiffies; + mt7615_cap_dbdc_disable(dev); + +#ifdef CONFIG_NL80211_TESTMODE + dev->mt76.test_ops = &mt7615_testmode_ops; +#endif +} +EXPORT_SYMBOL_GPL(mt7615_init_device); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c new file mode 100644 index 000000000..40c80d09d --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -0,0 +1,2363 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2019 MediaTek Inc. + * + * Author: Ryder Lee + * Roy Luo + * Felix Fietkau + * Lorenzo Bianconi + */ + +#include +#include +#include +#include "mt7615.h" +#include "../trace.h" +#include "../dma.h" +#include "mt7615_trace.h" +#include "mac.h" +#include "mcu.h" + +#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2) + +static const struct mt7615_dfs_radar_spec etsi_radar_specs = { + .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, + .radar_pattern = { + [5] = { 1, 0, 6, 32, 28, 0, 17, 990, 5010, 1, 1 }, + [6] = { 1, 0, 9, 32, 28, 0, 27, 615, 5010, 1, 1 }, + [7] = { 1, 0, 15, 32, 28, 0, 27, 240, 445, 1, 1 }, + [8] = { 1, 0, 12, 32, 28, 0, 42, 240, 510, 1, 1 }, + [9] = { 1, 1, 0, 0, 0, 0, 14, 2490, 3343, 0, 0, 12, 32, 28 }, + [10] = { 1, 1, 0, 0, 0, 0, 14, 2490, 3343, 0, 0, 15, 32, 24 }, + [11] = { 1, 1, 0, 0, 0, 0, 14, 823, 2510, 0, 0, 18, 32, 28 }, + [12] = { 1, 1, 0, 0, 0, 0, 14, 823, 2510, 0, 0, 27, 32, 24 }, + }, +}; + +static const struct mt7615_dfs_radar_spec fcc_radar_specs = { + .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, + .radar_pattern = { + [0] = { 1, 0, 9, 32, 28, 0, 13, 508, 3076, 1, 1 }, + [1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 }, + [2] = { 1, 0, 8, 32, 28, 0, 22, 190, 510, 1, 1 }, + [3] = { 1, 0, 6, 32, 28, 0, 32, 190, 510, 1, 1 }, + [4] = { 1, 0, 9, 255, 28, 0, 13, 323, 343, 1, 32 }, + }, +}; + +static const struct mt7615_dfs_radar_spec jp_radar_specs = { + .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, + .radar_pattern = { + [0] = { 1, 0, 8, 32, 28, 0, 13, 508, 3076, 1, 1 }, + [1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 }, + [2] = { 1, 0, 8, 32, 28, 0, 22, 190, 510, 1, 1 }, + [3] = { 1, 0, 6, 32, 28, 0, 32, 190, 510, 1, 1 }, + [4] = { 1, 0, 9, 32, 28, 0, 13, 323, 343, 1, 32 }, + [13] = { 1, 0, 8, 32, 28, 0, 14, 3836, 3856, 1, 1 }, + [14] = { 1, 0, 8, 32, 28, 0, 14, 3990, 4010, 1, 1 }, + }, +}; + +static enum mt76_cipher_type +mt7615_mac_get_cipher(int cipher) +{ + switch (cipher) { + case WLAN_CIPHER_SUITE_WEP40: + return MT_CIPHER_WEP40; + case WLAN_CIPHER_SUITE_WEP104: + return MT_CIPHER_WEP104; + case WLAN_CIPHER_SUITE_TKIP: + return MT_CIPHER_TKIP; + case WLAN_CIPHER_SUITE_AES_CMAC: + return MT_CIPHER_BIP_CMAC_128; + case WLAN_CIPHER_SUITE_CCMP: + return MT_CIPHER_AES_CCMP; + case WLAN_CIPHER_SUITE_CCMP_256: + return MT_CIPHER_CCMP_256; + case WLAN_CIPHER_SUITE_GCMP: + return MT_CIPHER_GCMP; + case WLAN_CIPHER_SUITE_GCMP_256: + return MT_CIPHER_GCMP_256; + case WLAN_CIPHER_SUITE_SMS4: + return MT_CIPHER_WAPI; + default: + return MT_CIPHER_NONE; + } +} + +static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev, + u8 idx, bool unicast) +{ + struct mt7615_sta *sta; + struct mt76_wcid *wcid; + + if (idx >= MT7615_WTBL_SIZE) + return NULL; + + wcid = rcu_dereference(dev->mt76.wcid[idx]); + if (unicast || !wcid) + return wcid; + + if (!wcid->sta) + return NULL; + + sta = container_of(wcid, struct mt7615_sta, wcid); + if (!sta->vif) + return NULL; + + return &sta->vif->sta.wcid; +} + +void mt7615_mac_reset_counters(struct mt7615_dev *dev) +{ + struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1]; + int i; + + for (i = 0; i < 4; i++) { + mt76_rr(dev, MT_TX_AGG_CNT(0, i)); + mt76_rr(dev, MT_TX_AGG_CNT(1, i)); + } + + memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats)); + dev->mt76.phy.survey_time = ktime_get_boottime(); + if (mphy_ext) + mphy_ext->survey_time = ktime_get_boottime(); + + /* reset airtime counters */ + mt76_rr(dev, MT_MIB_SDR9(0)); + mt76_rr(dev, MT_MIB_SDR9(1)); + + mt76_rr(dev, MT_MIB_SDR36(0)); + mt76_rr(dev, MT_MIB_SDR36(1)); + + mt76_rr(dev, MT_MIB_SDR37(0)); + mt76_rr(dev, MT_MIB_SDR37(1)); + + mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR); + mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_CLR); +} + +void mt7615_mac_set_timing(struct mt7615_phy *phy) +{ + s16 coverage_class = phy->coverage_class; + struct mt7615_dev *dev = phy->dev; + bool ext_phy = phy != &dev->phy; + u32 val, reg_offset; + u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | + FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); + u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | + FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); + int sifs, offset; + bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ; + + if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) + return; + + if (is_5ghz) + sifs = 16; + else + sifs = 10; + + if (ext_phy) { + coverage_class = max_t(s16, dev->phy.coverage_class, + coverage_class); + mt76_set(dev, MT_ARB_SCR, + MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE); + } else { + struct mt7615_phy *phy_ext = mt7615_ext_phy(dev); + + if (phy_ext) + coverage_class = max_t(s16, phy_ext->coverage_class, + coverage_class); + mt76_set(dev, MT_ARB_SCR, + MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE); + } + udelay(1); + + offset = 3 * coverage_class; + reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | + FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); + mt76_wr(dev, MT_TMAC_CDTR, cck + reg_offset); + mt76_wr(dev, MT_TMAC_ODTR, ofdm + reg_offset); + + mt76_wr(dev, MT_TMAC_ICR(ext_phy), + FIELD_PREP(MT_IFS_EIFS, 360) | + FIELD_PREP(MT_IFS_RIFS, 2) | + FIELD_PREP(MT_IFS_SIFS, sifs) | + FIELD_PREP(MT_IFS_SLOT, phy->slottime)); + + if (phy->slottime < 20 || is_5ghz) + val = MT7615_CFEND_RATE_DEFAULT; + else + val = MT7615_CFEND_RATE_11B; + + mt76_rmw_field(dev, MT_AGG_ACR(ext_phy), MT_AGG_ACR_CFEND_RATE, val); + if (ext_phy) + mt76_clear(dev, MT_ARB_SCR, + MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE); + else + mt76_clear(dev, MT_ARB_SCR, + MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE); + +} + +static void +mt7615_get_status_freq_info(struct mt7615_dev *dev, struct mt76_phy *mphy, + struct mt76_rx_status *status, u8 chfreq) +{ + if (!test_bit(MT76_HW_SCANNING, &mphy->state) && + !test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) && + !test_bit(MT76_STATE_ROC, &mphy->state)) { + status->freq = mphy->chandef.chan->center_freq; + status->band = mphy->chandef.chan->band; + return; + } + + status->band = chfreq <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; + status->freq = ieee80211_channel_to_frequency(chfreq, status->band); +} + +static void mt7615_mac_fill_tm_rx(struct mt7615_phy *phy, __le32 *rxv) +{ +#ifdef CONFIG_NL80211_TESTMODE + u32 rxv1 = le32_to_cpu(rxv[0]); + u32 rxv3 = le32_to_cpu(rxv[2]); + u32 rxv4 = le32_to_cpu(rxv[3]); + u32 rxv5 = le32_to_cpu(rxv[4]); + u8 cbw = FIELD_GET(MT_RXV1_FRAME_MODE, rxv1); + u8 mode = FIELD_GET(MT_RXV1_TX_MODE, rxv1); + s16 foe = FIELD_GET(MT_RXV5_FOE, rxv5); + u32 foe_const = (BIT(cbw + 1) & 0xf) * 10000; + + if (!mode) { + /* CCK */ + foe &= ~BIT(11); + foe *= 1000; + foe >>= 11; + } else { + if (foe > 2048) + foe -= 4096; + + foe = (foe * foe_const) >> 15; + } + + phy->test.last_freq_offset = foe; + phy->test.last_rcpi[0] = FIELD_GET(MT_RXV4_RCPI0, rxv4); + phy->test.last_rcpi[1] = FIELD_GET(MT_RXV4_RCPI1, rxv4); + phy->test.last_rcpi[2] = FIELD_GET(MT_RXV4_RCPI2, rxv4); + phy->test.last_rcpi[3] = FIELD_GET(MT_RXV4_RCPI3, rxv4); + phy->test.last_ib_rssi[0] = FIELD_GET(MT_RXV3_IB_RSSI, rxv3); + phy->test.last_wb_rssi[0] = FIELD_GET(MT_RXV3_WB_RSSI, rxv3); +#endif +} + +/* The HW does not translate the mac header to 802.3 for mesh point */ +static int mt7615_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap) +{ + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; + struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap); + struct mt7615_sta *msta = (struct mt7615_sta *)status->wcid; + __le32 *rxd = (__le32 *)skb->data; + struct ieee80211_sta *sta; + struct ieee80211_vif *vif; + struct ieee80211_hdr hdr; + u16 frame_control; + + if (le32_get_bits(rxd[1], MT_RXD1_NORMAL_ADDR_TYPE) != + MT_RXD1_NORMAL_U2M) + return -EINVAL; + + if (!(le32_to_cpu(rxd[0]) & MT_RXD0_NORMAL_GROUP_4)) + return -EINVAL; + + if (!msta || !msta->vif) + return -EINVAL; + + sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); + vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); + + /* store the info from RXD and ethhdr to avoid being overridden */ + frame_control = le32_get_bits(rxd[4], MT_RXD4_FRAME_CONTROL); + hdr.frame_control = cpu_to_le16(frame_control); + hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[6], MT_RXD6_SEQ_CTRL)); + hdr.duration_id = 0; + + ether_addr_copy(hdr.addr1, vif->addr); + ether_addr_copy(hdr.addr2, sta->addr); + switch (frame_control & (IEEE80211_FCTL_TODS | + IEEE80211_FCTL_FROMDS)) { + case 0: + ether_addr_copy(hdr.addr3, vif->bss_conf.bssid); + break; + case IEEE80211_FCTL_FROMDS: + ether_addr_copy(hdr.addr3, eth_hdr->h_source); + break; + case IEEE80211_FCTL_TODS: + ether_addr_copy(hdr.addr3, eth_hdr->h_dest); + break; + case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS: + ether_addr_copy(hdr.addr3, eth_hdr->h_dest); + ether_addr_copy(hdr.addr4, eth_hdr->h_source); + break; + default: + break; + } + + skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2); + if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) || + eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX)) + ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header); + else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN) + ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header); + else + skb_pull(skb, 2); + + if (ieee80211_has_order(hdr.frame_control)) + memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[7], + IEEE80211_HT_CTL_LEN); + + if (ieee80211_is_data_qos(hdr.frame_control)) { + __le16 qos_ctrl; + + qos_ctrl = cpu_to_le16(le32_get_bits(rxd[6], MT_RXD6_QOS_CTL)); + memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl, + IEEE80211_QOS_CTL_LEN); + } + + if (ieee80211_has_a4(hdr.frame_control)) + memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr)); + else + memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6); + + status->flag &= ~(RX_FLAG_RADIOTAP_HE | RX_FLAG_RADIOTAP_HE_MU); + return 0; +} + +static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) +{ + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt7615_phy *phy = &dev->phy; + struct ieee80211_supported_band *sband; + struct ieee80211_hdr *hdr; + struct mt7615_phy *phy2; + __le32 *rxd = (__le32 *)skb->data; + u32 rxd0 = le32_to_cpu(rxd[0]); + u32 rxd1 = le32_to_cpu(rxd[1]); + u32 rxd2 = le32_to_cpu(rxd[2]); + u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; + u32 csum_status = *(u32 *)skb->cb; + bool unicast, hdr_trans, remove_pad, insert_ccmp_hdr = false; + u16 hdr_gap; + int phy_idx; + int i, idx; + u8 chfreq, amsdu_info, qos_ctl = 0; + u16 seq_ctrl = 0; + __le16 fc = 0; + + memset(status, 0, sizeof(*status)); + + chfreq = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1); + + phy2 = dev->mt76.phys[MT_BAND1] ? dev->mt76.phys[MT_BAND1]->priv : NULL; + if (!phy2) + phy_idx = 0; + else if (phy2->chfreq == phy->chfreq) + phy_idx = -1; + else if (phy->chfreq == chfreq) + phy_idx = 0; + else if (phy2->chfreq == chfreq) + phy_idx = 1; + else + phy_idx = -1; + + if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) + return -EINVAL; + + hdr_trans = rxd1 & MT_RXD1_NORMAL_HDR_TRANS; + if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_CM)) + return -EINVAL; + + /* ICV error or CCMP/BIP/WPI MIC error */ + if (rxd2 & MT_RXD2_NORMAL_ICV_ERR) + status->flag |= RX_FLAG_ONLY_MONITOR; + + unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M; + idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2); + status->wcid = mt7615_rx_get_wcid(dev, idx, unicast); + + if (status->wcid) { + struct mt7615_sta *msta; + + msta = container_of(status->wcid, struct mt7615_sta, wcid); + spin_lock_bh(&dev->sta_poll_lock); + if (list_empty(&msta->poll_list)) + list_add_tail(&msta->poll_list, &dev->sta_poll_list); + spin_unlock_bh(&dev->sta_poll_lock); + } + + if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask && + !(csum_status & (BIT(0) | BIT(2) | BIT(3)))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (rxd2 & MT_RXD2_NORMAL_FCS_ERR) + status->flag |= RX_FLAG_FAILED_FCS_CRC; + + if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR) + status->flag |= RX_FLAG_MMIC_ERROR; + + if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 && + !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) { + status->flag |= RX_FLAG_DECRYPTED; + status->flag |= RX_FLAG_IV_STRIPPED; + status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; + } + + remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET; + + if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) + return -EINVAL; + + rxd += 4; + if (rxd0 & MT_RXD0_NORMAL_GROUP_4) { + u32 v0 = le32_to_cpu(rxd[0]); + u32 v2 = le32_to_cpu(rxd[2]); + + fc = cpu_to_le16(FIELD_GET(MT_RXD4_FRAME_CONTROL, v0)); + qos_ctl = FIELD_GET(MT_RXD6_QOS_CTL, v2); + seq_ctrl = FIELD_GET(MT_RXD6_SEQ_CTRL, v2); + + rxd += 4; + if ((u8 *)rxd - skb->data >= skb->len) + return -EINVAL; + } + + if (rxd0 & MT_RXD0_NORMAL_GROUP_1) { + u8 *data = (u8 *)rxd; + + if (status->flag & RX_FLAG_DECRYPTED) { + switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) { + case MT_CIPHER_AES_CCMP: + case MT_CIPHER_CCMP_CCX: + case MT_CIPHER_CCMP_256: + insert_ccmp_hdr = + FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); + fallthrough; + case MT_CIPHER_TKIP: + case MT_CIPHER_TKIP_NO_MIC: + case MT_CIPHER_GCMP: + case MT_CIPHER_GCMP_256: + status->iv[0] = data[5]; + status->iv[1] = data[4]; + status->iv[2] = data[3]; + status->iv[3] = data[2]; + status->iv[4] = data[1]; + status->iv[5] = data[0]; + break; + default: + break; + } + } + rxd += 4; + if ((u8 *)rxd - skb->data >= skb->len) + return -EINVAL; + } + + if (rxd0 & MT_RXD0_NORMAL_GROUP_2) { + status->timestamp = le32_to_cpu(rxd[0]); + status->flag |= RX_FLAG_MACTIME_START; + + if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB | + MT_RXD2_NORMAL_NON_AMPDU))) { + status->flag |= RX_FLAG_AMPDU_DETAILS; + + /* all subframes of an A-MPDU have the same timestamp */ + if (phy->rx_ampdu_ts != status->timestamp) { + if (!++phy->ampdu_ref) + phy->ampdu_ref++; + } + phy->rx_ampdu_ts = status->timestamp; + + status->ampdu_ref = phy->ampdu_ref; + } + + rxd += 2; + if ((u8 *)rxd - skb->data >= skb->len) + return -EINVAL; + } + + if (rxd0 & MT_RXD0_NORMAL_GROUP_3) { + u32 rxdg5 = le32_to_cpu(rxd[5]); + + /* + * If both PHYs are on the same channel and we don't have a WCID, + * we need to figure out which PHY this packet was received on. + * On the primary PHY, the noise value for the chains belonging to the + * second PHY will be set to the noise value of the last packet from + * that PHY. + */ + if (phy_idx < 0) { + int first_chain = ffs(phy2->mt76->chainmask) - 1; + + phy_idx = ((rxdg5 >> (first_chain * 8)) & 0xff) == 0; + } + } + + if (phy_idx == 1 && phy2) { + mphy = dev->mt76.phys[MT_BAND1]; + phy = phy2; + status->phy_idx = phy_idx; + } + + if (!mt7615_firmware_offload(dev) && chfreq != phy->chfreq) + return -EINVAL; + + mt7615_get_status_freq_info(dev, mphy, status, chfreq); + if (status->band == NL80211_BAND_5GHZ) + sband = &mphy->sband_5g.sband; + else + sband = &mphy->sband_2g.sband; + + if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) + return -EINVAL; + + if (!sband->channels) + return -EINVAL; + + if (rxd0 & MT_RXD0_NORMAL_GROUP_3) { + u32 rxdg0 = le32_to_cpu(rxd[0]); + u32 rxdg1 = le32_to_cpu(rxd[1]); + u32 rxdg3 = le32_to_cpu(rxd[3]); + u8 stbc = FIELD_GET(MT_RXV1_HT_STBC, rxdg0); + bool cck = false; + + i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0); + switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) { + case MT_PHY_TYPE_CCK: + cck = true; + fallthrough; + case MT_PHY_TYPE_OFDM: + i = mt76_get_rate(&dev->mt76, sband, i, cck); + break; + case MT_PHY_TYPE_HT_GF: + case MT_PHY_TYPE_HT: + status->encoding = RX_ENC_HT; + if (i > 31) + return -EINVAL; + break; + case MT_PHY_TYPE_VHT: + status->nss = FIELD_GET(MT_RXV2_NSTS, rxdg1) + 1; + status->encoding = RX_ENC_VHT; + break; + default: + return -EINVAL; + } + status->rate_idx = i; + + switch (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0)) { + case MT_PHY_BW_20: + break; + case MT_PHY_BW_40: + status->bw = RATE_INFO_BW_40; + break; + case MT_PHY_BW_80: + status->bw = RATE_INFO_BW_80; + break; + case MT_PHY_BW_160: + status->bw = RATE_INFO_BW_160; + break; + default: + return -EINVAL; + } + + if (rxdg0 & MT_RXV1_HT_SHORT_GI) + status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + if (rxdg0 & MT_RXV1_HT_AD_CODE) + status->enc_flags |= RX_ENC_FLAG_LDPC; + + status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc; + + status->chains = mphy->antenna_mask; + status->chain_signal[0] = to_rssi(MT_RXV4_RCPI0, rxdg3); + status->chain_signal[1] = to_rssi(MT_RXV4_RCPI1, rxdg3); + status->chain_signal[2] = to_rssi(MT_RXV4_RCPI2, rxdg3); + status->chain_signal[3] = to_rssi(MT_RXV4_RCPI3, rxdg3); + + mt7615_mac_fill_tm_rx(mphy->priv, rxd); + + rxd += 6; + if ((u8 *)rxd - skb->data >= skb->len) + return -EINVAL; + } + + amsdu_info = FIELD_GET(MT_RXD1_NORMAL_PAYLOAD_FORMAT, rxd1); + status->amsdu = !!amsdu_info; + if (status->amsdu) { + status->first_amsdu = amsdu_info == MT_RXD1_FIRST_AMSDU_FRAME; + status->last_amsdu = amsdu_info == MT_RXD1_LAST_AMSDU_FRAME; + } + + hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad; + if (hdr_trans && ieee80211_has_morefrags(fc)) { + if (mt7615_reverse_frag0_hdr_trans(skb, hdr_gap)) + return -EINVAL; + hdr_trans = false; + } else { + int pad_start = 0; + + skb_pull(skb, hdr_gap); + if (!hdr_trans && status->amsdu) { + pad_start = ieee80211_get_hdrlen_from_skb(skb); + } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) { + /* + * When header translation failure is indicated, + * the hardware will insert an extra 2-byte field + * containing the data length after the protocol + * type field. This happens either when the LLC-SNAP + * pattern did not match, or if a VLAN header was + * detected. + */ + pad_start = 12; + if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) + pad_start += 4; + else + pad_start = 0; + } + + if (pad_start) { + memmove(skb->data + 2, skb->data, pad_start); + skb_pull(skb, 2); + } + } + + if (insert_ccmp_hdr && !hdr_trans) { + u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); + + mt76_insert_ccmp_hdr(skb, key_id); + } + + if (!hdr_trans) { + hdr = (struct ieee80211_hdr *)skb->data; + fc = hdr->frame_control; + if (ieee80211_is_data_qos(fc)) { + seq_ctrl = le16_to_cpu(hdr->seq_ctrl); + qos_ctl = *ieee80211_get_qos_ctl(hdr); + } + } else { + status->flag |= RX_FLAG_8023; + } + + if (!status->wcid || !ieee80211_is_data_qos(fc)) + return 0; + + status->aggr = unicast && + !ieee80211_is_qos_nullfunc(fc); + status->qos_ctl = qos_ctl; + status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl); + + return 0; +} + +void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) +{ +} +EXPORT_SYMBOL_GPL(mt7615_sta_ps); + +static u16 +mt7615_mac_tx_rate_val(struct mt7615_dev *dev, + struct mt76_phy *mphy, + const struct ieee80211_tx_rate *rate, + bool stbc, u8 *bw) +{ + u8 phy, nss, rate_idx; + u16 rateval = 0; + + *bw = 0; + + if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { + rate_idx = ieee80211_rate_get_vht_mcs(rate); + nss = ieee80211_rate_get_vht_nss(rate); + phy = MT_PHY_TYPE_VHT; + if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + *bw = 1; + else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) + *bw = 2; + else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH) + *bw = 3; + } else if (rate->flags & IEEE80211_TX_RC_MCS) { + rate_idx = rate->idx; + nss = 1 + (rate->idx >> 3); + phy = MT_PHY_TYPE_HT; + if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD) + phy = MT_PHY_TYPE_HT_GF; + if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + *bw = 1; + } else { + const struct ieee80211_rate *r; + int band = mphy->chandef.chan->band; + u16 val; + + nss = 1; + r = &mphy->hw->wiphy->bands[band]->bitrates[rate->idx]; + if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) + val = r->hw_value_short; + else + val = r->hw_value; + + phy = val >> 8; + rate_idx = val & 0xff; + } + + if (stbc && nss == 1) { + nss++; + rateval |= MT_TX_RATE_STBC; + } + + rateval |= (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) | + FIELD_PREP(MT_TX_RATE_MODE, phy) | + FIELD_PREP(MT_TX_RATE_NSS, nss - 1)); + + return rateval; +} + +int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, + struct sk_buff *skb, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, int pid, + struct ieee80211_key_conf *key, + enum mt76_txq_id qid, bool beacon) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_tx_rate *rate = &info->control.rates[0]; + u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2; + bool multicast = is_multicast_ether_addr(hdr->addr1); + struct ieee80211_vif *vif = info->control.vif; + bool is_mmio = mt76_is_mmio(&dev->mt76); + u32 val, sz_txd = is_mmio ? MT_TXD_SIZE : MT_USB_TXD_SIZE; + struct mt76_phy *mphy = &dev->mphy; + __le16 fc = hdr->frame_control; + int tx_count = 8; + u16 seqno = 0; + + if (vif) { + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + + omac_idx = mvif->omac_idx; + wmm_idx = mvif->wmm_idx; + } + + if (sta) { + struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv; + + tx_count = msta->rate_count; + } + + if (phy_idx && dev->mt76.phys[MT_BAND1]) + mphy = dev->mt76.phys[MT_BAND1]; + + fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; + fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; + + if (beacon) { + p_fmt = MT_TX_TYPE_FW; + q_idx = phy_idx ? MT_LMAC_BCN1 : MT_LMAC_BCN0; + } else if (qid >= MT_TXQ_PSD) { + p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF; + q_idx = phy_idx ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0; + } else { + p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF; + q_idx = wmm_idx * MT7615_MAX_WMM_SETS + + mt7615_lmac_mapping(dev, skb_get_queue_mapping(skb)); + } + + val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) | + FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_LMAC) | + FIELD_PREP(MT_TXD0_Q_IDX, q_idx); + txwi[0] = cpu_to_le32(val); + + val = MT_TXD1_LONG_FORMAT | + FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) | + FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | + FIELD_PREP(MT_TXD1_HDR_INFO, + ieee80211_get_hdrlen_from_skb(skb) / 2) | + FIELD_PREP(MT_TXD1_TID, + skb->priority & IEEE80211_QOS_CTL_TID_MASK) | + FIELD_PREP(MT_TXD1_PKT_FMT, p_fmt) | + FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx); + txwi[1] = cpu_to_le32(val); + + val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | + FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) | + FIELD_PREP(MT_TXD2_MULTICAST, multicast); + if (key) { + if (multicast && ieee80211_is_robust_mgmt_frame(skb) && + key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { + val |= MT_TXD2_BIP; + txwi[3] = 0; + } else { + txwi[3] = cpu_to_le32(MT_TXD3_PROTECT_FRAME); + } + } else { + txwi[3] = 0; + } + txwi[2] = cpu_to_le32(val); + + if (!(info->flags & IEEE80211_TX_CTL_AMPDU)) + txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE); + + txwi[4] = 0; + txwi[6] = 0; + + if (rate->idx >= 0 && rate->count && + !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { + bool stbc = info->flags & IEEE80211_TX_CTL_STBC; + u8 bw; + u16 rateval = mt7615_mac_tx_rate_val(dev, mphy, rate, stbc, + &bw); + + txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE); + + val = MT_TXD6_FIXED_BW | + FIELD_PREP(MT_TXD6_BW, bw) | + FIELD_PREP(MT_TXD6_TX_RATE, rateval); + txwi[6] |= cpu_to_le32(val); + + if (rate->flags & IEEE80211_TX_RC_SHORT_GI) + txwi[6] |= cpu_to_le32(MT_TXD6_SGI); + + if (info->flags & IEEE80211_TX_CTL_LDPC) + txwi[6] |= cpu_to_le32(MT_TXD6_LDPC); + + if (!(rate->flags & (IEEE80211_TX_RC_MCS | + IEEE80211_TX_RC_VHT_MCS))) + txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE); + + tx_count = rate->count; + } + + if (!ieee80211_is_beacon(fc)) { + struct ieee80211_hw *hw = mt76_hw(dev); + + val = MT_TXD5_TX_STATUS_HOST | FIELD_PREP(MT_TXD5_PID, pid); + if (!ieee80211_hw_check(hw, SUPPORTS_PS)) + val |= MT_TXD5_SW_POWER_MGMT; + txwi[5] = cpu_to_le32(val); + } else { + txwi[5] = 0; + /* use maximum tx count for beacons */ + tx_count = 0x1f; + } + + val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count); + if (info->flags & IEEE80211_TX_CTL_INJECTED) { + seqno = le16_to_cpu(hdr->seq_ctrl); + + if (ieee80211_is_back_req(hdr->frame_control)) { + struct ieee80211_bar *bar; + + bar = (struct ieee80211_bar *)skb->data; + seqno = le16_to_cpu(bar->start_seq_num); + } + + val |= MT_TXD3_SN_VALID | + FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); + } + + txwi[3] |= cpu_to_le32(val); + + if (info->flags & IEEE80211_TX_CTL_NO_ACK) + txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK); + + val = FIELD_PREP(MT_TXD7_TYPE, fc_type) | + FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype) | + FIELD_PREP(MT_TXD7_SPE_IDX, 0x18); + txwi[7] = cpu_to_le32(val); + if (!is_mmio) { + val = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) | + FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype); + txwi[8] = cpu_to_le32(val); + } + + return 0; +} +EXPORT_SYMBOL_GPL(mt7615_mac_write_txwi); + +bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask) +{ + mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, + FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); + + return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, + 0, 5000); +} + +void mt7615_mac_sta_poll(struct mt7615_dev *dev) +{ + static const u8 ac_to_tid[4] = { + [IEEE80211_AC_BE] = 0, + [IEEE80211_AC_BK] = 1, + [IEEE80211_AC_VI] = 4, + [IEEE80211_AC_VO] = 6 + }; + static const u8 hw_queue_map[] = { + [IEEE80211_AC_BK] = 0, + [IEEE80211_AC_BE] = 1, + [IEEE80211_AC_VI] = 2, + [IEEE80211_AC_VO] = 3, + }; + struct ieee80211_sta *sta; + struct mt7615_sta *msta; + u32 addr, tx_time[4], rx_time[4]; + struct list_head sta_poll_list; + int i; + + INIT_LIST_HEAD(&sta_poll_list); + spin_lock_bh(&dev->sta_poll_lock); + list_splice_init(&dev->sta_poll_list, &sta_poll_list); + spin_unlock_bh(&dev->sta_poll_lock); + + while (!list_empty(&sta_poll_list)) { + bool clear = false; + + msta = list_first_entry(&sta_poll_list, struct mt7615_sta, + poll_list); + + spin_lock_bh(&dev->sta_poll_lock); + list_del_init(&msta->poll_list); + spin_unlock_bh(&dev->sta_poll_lock); + + addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4; + + for (i = 0; i < 4; i++, addr += 8) { + u32 tx_last = msta->airtime_ac[i]; + u32 rx_last = msta->airtime_ac[i + 4]; + + msta->airtime_ac[i] = mt76_rr(dev, addr); + msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); + tx_time[i] = msta->airtime_ac[i] - tx_last; + rx_time[i] = msta->airtime_ac[i + 4] - rx_last; + + if ((tx_last | rx_last) & BIT(30)) + clear = true; + } + + if (clear) { + mt7615_mac_wtbl_update(dev, msta->wcid.idx, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); + } + + if (!msta->wcid.sta) + continue; + + sta = container_of((void *)msta, struct ieee80211_sta, + drv_priv); + for (i = 0; i < 4; i++) { + u32 tx_cur = tx_time[i]; + u32 rx_cur = rx_time[hw_queue_map[i]]; + u8 tid = ac_to_tid[i]; + + if (!tx_cur && !rx_cur) + continue; + + ieee80211_sta_register_airtime(sta, tid, tx_cur, + rx_cur); + } + } +} +EXPORT_SYMBOL_GPL(mt7615_mac_sta_poll); + +static void +mt7615_mac_update_rate_desc(struct mt7615_phy *phy, struct mt7615_sta *sta, + struct ieee80211_tx_rate *probe_rate, + struct ieee80211_tx_rate *rates, + struct mt7615_rate_desc *rd) +{ + struct mt7615_dev *dev = phy->dev; + struct mt76_phy *mphy = phy->mt76; + struct ieee80211_tx_rate *ref; + bool rateset, stbc = false; + int n_rates = sta->n_rates; + u8 bw, bw_prev; + int i, j; + + for (i = n_rates; i < 4; i++) + rates[i] = rates[n_rates - 1]; + + rateset = !(sta->rate_set_tsf & BIT(0)); + memcpy(sta->rateset[rateset].rates, rates, + sizeof(sta->rateset[rateset].rates)); + if (probe_rate) { + sta->rateset[rateset].probe_rate = *probe_rate; + ref = &sta->rateset[rateset].probe_rate; + } else { + sta->rateset[rateset].probe_rate.idx = -1; + ref = &sta->rateset[rateset].rates[0]; + } + + rates = sta->rateset[rateset].rates; + for (i = 0; i < ARRAY_SIZE(sta->rateset[rateset].rates); i++) { + /* + * We don't support switching between short and long GI + * within the rate set. For accurate tx status reporting, we + * need to make sure that flags match. + * For improved performance, avoid duplicate entries by + * decrementing the MCS index if necessary + */ + if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI) + rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI; + + for (j = 0; j < i; j++) { + if (rates[i].idx != rates[j].idx) + continue; + if ((rates[i].flags ^ rates[j].flags) & + (IEEE80211_TX_RC_40_MHZ_WIDTH | + IEEE80211_TX_RC_80_MHZ_WIDTH | + IEEE80211_TX_RC_160_MHZ_WIDTH)) + continue; + + if (!rates[i].idx) + continue; + + rates[i].idx--; + } + } + + rd->val[0] = mt7615_mac_tx_rate_val(dev, mphy, &rates[0], stbc, &bw); + bw_prev = bw; + + if (probe_rate) { + rd->probe_val = mt7615_mac_tx_rate_val(dev, mphy, probe_rate, + stbc, &bw); + if (bw) + rd->bw_idx = 1; + else + bw_prev = 0; + } else { + rd->probe_val = rd->val[0]; + } + + rd->val[1] = mt7615_mac_tx_rate_val(dev, mphy, &rates[1], stbc, &bw); + if (bw_prev) { + rd->bw_idx = 3; + bw_prev = bw; + } + + rd->val[2] = mt7615_mac_tx_rate_val(dev, mphy, &rates[2], stbc, &bw); + if (bw_prev) { + rd->bw_idx = 5; + bw_prev = bw; + } + + rd->val[3] = mt7615_mac_tx_rate_val(dev, mphy, &rates[3], stbc, &bw); + if (bw_prev) + rd->bw_idx = 7; + + rd->rateset = rateset; + rd->bw = bw; +} + +static int +mt7615_mac_queue_rate_update(struct mt7615_phy *phy, struct mt7615_sta *sta, + struct ieee80211_tx_rate *probe_rate, + struct ieee80211_tx_rate *rates) +{ + struct mt7615_dev *dev = phy->dev; + struct mt7615_wtbl_rate_desc *wrd; + + if (work_pending(&dev->rate_work)) + return -EBUSY; + + wrd = kzalloc(sizeof(*wrd), GFP_ATOMIC); + if (!wrd) + return -ENOMEM; + + wrd->sta = sta; + mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates, + &wrd->rate); + list_add_tail(&wrd->node, &dev->wrd_head); + queue_work(dev->mt76.wq, &dev->rate_work); + + return 0; +} + +u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid) +{ + u32 addr, val, val2; + u8 offset; + + addr = mt7615_mac_wtbl_addr(dev, wcid) + 11 * 4; + + offset = tid * 12; + addr += 4 * (offset / 32); + offset %= 32; + + val = mt76_rr(dev, addr); + val >>= offset; + + if (offset > 20) { + addr += 4; + val2 = mt76_rr(dev, addr); + val |= val2 << (32 - offset); + } + + return val & GENMASK(11, 0); +} + +void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, + struct ieee80211_tx_rate *probe_rate, + struct ieee80211_tx_rate *rates) +{ + int wcid = sta->wcid.idx, n_rates = sta->n_rates; + struct mt7615_dev *dev = phy->dev; + struct mt7615_rate_desc rd; + u32 w5, w27, addr; + u16 idx = sta->vif->mt76.omac_idx; + + if (!mt76_is_mmio(&dev->mt76)) { + mt7615_mac_queue_rate_update(phy, sta, probe_rate, rates); + return; + } + + if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) + return; + + memset(&rd, 0, sizeof(struct mt7615_rate_desc)); + mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates, &rd); + + addr = mt7615_mac_wtbl_addr(dev, wcid); + w27 = mt76_rr(dev, addr + 27 * 4); + w27 &= ~MT_WTBL_W27_CC_BW_SEL; + w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, rd.bw); + + w5 = mt76_rr(dev, addr + 5 * 4); + w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE | + MT_WTBL_W5_MPDU_OK_COUNT | + MT_WTBL_W5_MPDU_FAIL_COUNT | + MT_WTBL_W5_RATE_IDX); + w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, rd.bw) | + FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE, + rd.bw_idx ? rd.bw_idx - 1 : 7); + + mt76_wr(dev, MT_WTBL_RIUCR0, w5); + + mt76_wr(dev, MT_WTBL_RIUCR1, + FIELD_PREP(MT_WTBL_RIUCR1_RATE0, rd.probe_val) | + FIELD_PREP(MT_WTBL_RIUCR1_RATE1, rd.val[0]) | + FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, rd.val[1])); + + mt76_wr(dev, MT_WTBL_RIUCR2, + FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, rd.val[1] >> 8) | + FIELD_PREP(MT_WTBL_RIUCR2_RATE3, rd.val[1]) | + FIELD_PREP(MT_WTBL_RIUCR2_RATE4, rd.val[2]) | + FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, rd.val[2])); + + mt76_wr(dev, MT_WTBL_RIUCR3, + FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, rd.val[2] >> 4) | + FIELD_PREP(MT_WTBL_RIUCR3_RATE6, rd.val[3]) | + FIELD_PREP(MT_WTBL_RIUCR3_RATE7, rd.val[3])); + + mt76_wr(dev, MT_WTBL_UPDATE, + FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) | + MT_WTBL_UPDATE_RATE_UPDATE | + MT_WTBL_UPDATE_TX_COUNT_CLEAR); + + mt76_wr(dev, addr + 27 * 4, w27); + + idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx; + addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx); + + mt76_rmw(dev, addr, MT_LPON_TCR_MODE, MT_LPON_TCR_READ); /* TSF read */ + sta->rate_set_tsf = mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0); + sta->rate_set_tsf |= rd.rateset; + + if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET)) + mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); + + sta->rate_count = 2 * MT7615_RATE_RETRY * n_rates; + sta->wcid.tx_info |= MT_WCID_TX_INFO_SET; + sta->rate_probe = !!probe_rate; +} +EXPORT_SYMBOL_GPL(mt7615_mac_set_rates); + +static int +mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid, + struct ieee80211_key_conf *key, + enum mt76_cipher_type cipher, u16 cipher_mask) +{ + u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4; + u8 data[32] = {}; + + if (key->keylen > sizeof(data)) + return -EINVAL; + + mt76_rr_copy(dev, addr, data, sizeof(data)); + if (cipher == MT_CIPHER_TKIP) { + /* Rx/Tx MIC keys are swapped */ + memcpy(data, key->key, 16); + memcpy(data + 16, key->key + 24, 8); + memcpy(data + 24, key->key + 16, 8); + } else { + if (cipher_mask == BIT(cipher)) + memcpy(data, key->key, key->keylen); + else if (cipher != MT_CIPHER_BIP_CMAC_128) + memcpy(data, key->key, 16); + if (cipher == MT_CIPHER_BIP_CMAC_128) + memcpy(data + 16, key->key, 16); + } + + mt76_wr_copy(dev, addr, data, sizeof(data)); + + return 0; +} + +static int +mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid, + enum mt76_cipher_type cipher, u16 cipher_mask, + int keyidx) +{ + u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1; + + if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) + return -ETIMEDOUT; + + w0 = mt76_rr(dev, addr); + w1 = mt76_rr(dev, addr + 4); + + if (cipher_mask) + w0 |= MT_WTBL_W0_RX_KEY_VALID; + else + w0 &= ~(MT_WTBL_W0_RX_KEY_VALID | MT_WTBL_W0_KEY_IDX); + if (cipher_mask & BIT(MT_CIPHER_BIP_CMAC_128)) + w0 |= MT_WTBL_W0_RX_IK_VALID; + else + w0 &= ~MT_WTBL_W0_RX_IK_VALID; + + if (cipher != MT_CIPHER_BIP_CMAC_128 || cipher_mask == BIT(cipher)) { + w0 &= ~MT_WTBL_W0_KEY_IDX; + w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx); + } + + mt76_wr(dev, MT_WTBL_RICR0, w0); + mt76_wr(dev, MT_WTBL_RICR1, w1); + + if (!mt7615_mac_wtbl_update(dev, wcid->idx, + MT_WTBL_UPDATE_RXINFO_UPDATE)) + return -ETIMEDOUT; + + return 0; +} + +static void +mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid, + enum mt76_cipher_type cipher, u16 cipher_mask) +{ + u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx); + + if (cipher == MT_CIPHER_BIP_CMAC_128 && + cipher_mask & ~BIT(MT_CIPHER_BIP_CMAC_128)) + return; + + mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE, + FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher)); +} + +int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, + struct mt76_wcid *wcid, + struct ieee80211_key_conf *key) +{ + enum mt76_cipher_type cipher; + u16 cipher_mask = wcid->cipher; + int err; + + cipher = mt7615_mac_get_cipher(key->cipher); + if (cipher == MT_CIPHER_NONE) + return -EOPNOTSUPP; + + cipher_mask |= BIT(cipher); + mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask); + err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask); + if (err < 0) + return err; + + err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, cipher_mask, + key->keyidx); + if (err < 0) + return err; + + wcid->cipher = cipher_mask; + + return 0; +} + +int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, + struct mt76_wcid *wcid, + struct ieee80211_key_conf *key) +{ + int err; + + spin_lock_bh(&dev->mt76.lock); + err = __mt7615_mac_wtbl_set_key(dev, wcid, key); + spin_unlock_bh(&dev->mt76.lock); + + return err; +} + +static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta, + struct ieee80211_tx_info *info, __le32 *txs_data) +{ + struct ieee80211_supported_band *sband; + struct mt7615_rate_set *rs; + struct mt76_phy *mphy; + int first_idx = 0, last_idx; + int i, idx, count; + bool fixed_rate, ack_timeout; + bool ampdu, cck = false; + bool rs_idx; + u32 rate_set_tsf; + u32 final_rate, final_rate_flags, final_nss, txs; + + txs = le32_to_cpu(txs_data[1]); + ampdu = txs & MT_TXS1_AMPDU; + + txs = le32_to_cpu(txs_data[3]); + count = FIELD_GET(MT_TXS3_TX_COUNT, txs); + last_idx = FIELD_GET(MT_TXS3_LAST_TX_RATE, txs); + + txs = le32_to_cpu(txs_data[0]); + fixed_rate = txs & MT_TXS0_FIXED_RATE; + final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs); + ack_timeout = txs & MT_TXS0_ACK_TIMEOUT; + + if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT)) + return false; + + if (txs & MT_TXS0_QUEUE_TIMEOUT) + return false; + + if (!ack_timeout) + info->flags |= IEEE80211_TX_STAT_ACK; + + info->status.ampdu_len = 1; + info->status.ampdu_ack_len = !!(info->flags & + IEEE80211_TX_STAT_ACK); + + if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU)) + info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU; + + first_idx = max_t(int, 0, last_idx - (count - 1) / MT7615_RATE_RETRY); + + if (fixed_rate) { + info->status.rates[0].count = count; + i = 0; + goto out; + } + + rate_set_tsf = READ_ONCE(sta->rate_set_tsf); + rs_idx = !((u32)(le32_get_bits(txs_data[4], MT_TXS4_F0_TIMESTAMP) - + rate_set_tsf) < 1000000); + rs_idx ^= rate_set_tsf & BIT(0); + rs = &sta->rateset[rs_idx]; + + if (!first_idx && rs->probe_rate.idx >= 0) { + info->status.rates[0] = rs->probe_rate; + + spin_lock_bh(&dev->mt76.lock); + if (sta->rate_probe) { + struct mt7615_phy *phy = &dev->phy; + + if (sta->wcid.phy_idx && dev->mt76.phys[MT_BAND1]) + phy = dev->mt76.phys[MT_BAND1]->priv; + + mt7615_mac_set_rates(phy, sta, NULL, sta->rates); + } + spin_unlock_bh(&dev->mt76.lock); + } else { + info->status.rates[0] = rs->rates[first_idx / 2]; + } + info->status.rates[0].count = 0; + + for (i = 0, idx = first_idx; count && idx <= last_idx; idx++) { + struct ieee80211_tx_rate *cur_rate; + int cur_count; + + cur_rate = &rs->rates[idx / 2]; + cur_count = min_t(int, MT7615_RATE_RETRY, count); + count -= cur_count; + + if (idx && (cur_rate->idx != info->status.rates[i].idx || + cur_rate->flags != info->status.rates[i].flags)) { + i++; + if (i == ARRAY_SIZE(info->status.rates)) { + i--; + break; + } + + info->status.rates[i] = *cur_rate; + info->status.rates[i].count = 0; + } + + info->status.rates[i].count += cur_count; + } + +out: + final_rate_flags = info->status.rates[i].flags; + + switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) { + case MT_PHY_TYPE_CCK: + cck = true; + fallthrough; + case MT_PHY_TYPE_OFDM: + mphy = &dev->mphy; + if (sta->wcid.phy_idx && dev->mt76.phys[MT_BAND1]) + mphy = dev->mt76.phys[MT_BAND1]; + + if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) + sband = &mphy->sband_5g.sband; + else + sband = &mphy->sband_2g.sband; + final_rate &= MT_TX_RATE_IDX; + final_rate = mt76_get_rate(&dev->mt76, sband, final_rate, + cck); + final_rate_flags = 0; + break; + case MT_PHY_TYPE_HT_GF: + case MT_PHY_TYPE_HT: + final_rate_flags |= IEEE80211_TX_RC_MCS; + final_rate &= MT_TX_RATE_IDX; + if (final_rate > 31) + return false; + break; + case MT_PHY_TYPE_VHT: + final_nss = FIELD_GET(MT_TX_RATE_NSS, final_rate); + + if ((final_rate & MT_TX_RATE_STBC) && final_nss) + final_nss--; + + final_rate_flags |= IEEE80211_TX_RC_VHT_MCS; + final_rate = (final_rate & MT_TX_RATE_IDX) | (final_nss << 4); + break; + default: + return false; + } + + info->status.rates[i].idx = final_rate; + info->status.rates[i].flags = final_rate_flags; + + return true; +} + +static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev, + struct mt7615_sta *sta, int pid, + __le32 *txs_data) +{ + struct mt76_dev *mdev = &dev->mt76; + struct sk_buff_head list; + struct sk_buff *skb; + + if (pid < MT_PACKET_ID_FIRST) + return false; + + trace_mac_txdone(mdev, sta->wcid.idx, pid); + + mt76_tx_status_lock(mdev, &list); + skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list); + if (skb) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + if (!mt7615_fill_txs(dev, sta, info, txs_data)) { + info->status.rates[0].count = 0; + info->status.rates[0].idx = -1; + } + + mt76_tx_status_skb_done(mdev, skb, &list); + } + mt76_tx_status_unlock(mdev, &list); + + return !!skb; +} + +static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data) +{ + struct ieee80211_tx_info info = {}; + struct ieee80211_sta *sta = NULL; + struct mt7615_sta *msta = NULL; + struct mt76_wcid *wcid; + struct mt76_phy *mphy = &dev->mt76.phy; + __le32 *txs_data = data; + u8 wcidx; + u8 pid; + + pid = le32_get_bits(txs_data[0], MT_TXS0_PID); + wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID); + + if (pid == MT_PACKET_ID_NO_ACK) + return; + + if (wcidx >= MT7615_WTBL_SIZE) + return; + + rcu_read_lock(); + + wcid = rcu_dereference(dev->mt76.wcid[wcidx]); + if (!wcid) + goto out; + + msta = container_of(wcid, struct mt7615_sta, wcid); + sta = wcid_to_sta(wcid); + + spin_lock_bh(&dev->sta_poll_lock); + if (list_empty(&msta->poll_list)) + list_add_tail(&msta->poll_list, &dev->sta_poll_list); + spin_unlock_bh(&dev->sta_poll_lock); + + if (mt7615_mac_add_txs_skb(dev, msta, pid, txs_data)) + goto out; + + if (wcidx >= MT7615_WTBL_STA || !sta) + goto out; + + if (wcid->phy_idx && dev->mt76.phys[MT_BAND1]) + mphy = dev->mt76.phys[MT_BAND1]; + + if (mt7615_fill_txs(dev, msta, &info, txs_data)) { + spin_lock_bh(&dev->mt76.rx_lock); + ieee80211_tx_status_noskb(mphy->hw, sta, &info); + spin_unlock_bh(&dev->mt76.rx_lock); + } + +out: + rcu_read_unlock(); +} + +static void +mt7615_txwi_free(struct mt7615_dev *dev, struct mt76_txwi_cache *txwi) +{ + struct mt76_dev *mdev = &dev->mt76; + __le32 *txwi_data; + u32 val; + u8 wcid; + + mt76_connac_txp_skb_unmap(mdev, txwi); + if (!txwi->skb) + goto out; + + txwi_data = (__le32 *)mt76_get_txwi_ptr(mdev, txwi); + val = le32_to_cpu(txwi_data[1]); + wcid = FIELD_GET(MT_TXD1_WLAN_IDX, val); + mt76_tx_complete_skb(mdev, wcid, txwi->skb); + +out: + txwi->skb = NULL; + mt76_put_txwi(mdev, txwi); +} + +static void +mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token) +{ + struct mt76_dev *mdev = &dev->mt76; + struct mt76_txwi_cache *txwi; + + trace_mac_tx_free(dev, token); + txwi = mt76_token_put(mdev, token); + if (!txwi) + return; + + mt7615_txwi_free(dev, txwi); +} + +static void mt7615_mac_tx_free(struct mt7615_dev *dev, void *data, int len) +{ + struct mt76_connac_tx_free *free = data; + void *tx_token = data + sizeof(*free); + void *end = data + len; + u8 i, count; + + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); + if (is_mt7615(&dev->mt76)) { + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); + } else { + for (i = 0; i < IEEE80211_NUM_ACS; i++) + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false); + } + + count = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_ID_CNT); + if (is_mt7615(&dev->mt76)) { + __le16 *token = tx_token; + + if (WARN_ON_ONCE((void *)&token[count] > end)) + return; + + for (i = 0; i < count; i++) + mt7615_mac_tx_free_token(dev, le16_to_cpu(token[i])); + } else { + __le32 *token = tx_token; + + if (WARN_ON_ONCE((void *)&token[count] > end)) + return; + + for (i = 0; i < count; i++) + mt7615_mac_tx_free_token(dev, le32_to_cpu(token[i])); + } + + rcu_read_lock(); + mt7615_mac_sta_poll(dev); + rcu_read_unlock(); + + mt76_worker_schedule(&dev->mt76.tx_worker); +} + +bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + __le32 *rxd = (__le32 *)data; + __le32 *end = (__le32 *)&rxd[len / 4]; + enum rx_pkt_type type; + + type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); + + switch (type) { + case PKT_TYPE_TXRX_NOTIFY: + mt7615_mac_tx_free(dev, data, len); + return false; + case PKT_TYPE_TXS: + for (rxd++; rxd + 7 <= end; rxd += 7) + mt7615_mac_add_txs(dev, rxd); + return false; + default: + return true; + } +} +EXPORT_SYMBOL_GPL(mt7615_rx_check); + +void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, + struct sk_buff *skb) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + __le32 *rxd = (__le32 *)skb->data; + __le32 *end = (__le32 *)&skb->data[skb->len]; + enum rx_pkt_type type; + u16 flag; + + type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); + flag = le32_get_bits(rxd[0], MT_RXD0_PKT_FLAG); + if (type == PKT_TYPE_RX_EVENT && flag == 0x1) + type = PKT_TYPE_NORMAL_MCU; + + switch (type) { + case PKT_TYPE_TXS: + for (rxd++; rxd + 7 <= end; rxd += 7) + mt7615_mac_add_txs(dev, rxd); + dev_kfree_skb(skb); + break; + case PKT_TYPE_TXRX_NOTIFY: + mt7615_mac_tx_free(dev, skb->data, skb->len); + dev_kfree_skb(skb); + break; + case PKT_TYPE_RX_EVENT: + mt7615_mcu_rx_event(dev, skb); + break; + case PKT_TYPE_NORMAL_MCU: + case PKT_TYPE_NORMAL: + if (!mt7615_mac_fill_rx(dev, skb)) { + mt76_rx(&dev->mt76, q, skb); + return; + } + fallthrough; + default: + dev_kfree_skb(skb); + break; + } +} +EXPORT_SYMBOL_GPL(mt7615_queue_rx_skb); + +static void +mt7615_mac_set_sensitivity(struct mt7615_phy *phy, int val, bool ofdm) +{ + struct mt7615_dev *dev = phy->dev; + bool ext_phy = phy != &dev->phy; + + if (is_mt7663(&dev->mt76)) { + if (ofdm) + mt76_rmw(dev, MT7663_WF_PHY_MIN_PRI_PWR(ext_phy), + MT_WF_PHY_PD_OFDM_MASK(0), + MT_WF_PHY_PD_OFDM(0, val)); + else + mt76_rmw(dev, MT7663_WF_PHY_RXTD_CCK_PD(ext_phy), + MT_WF_PHY_PD_CCK_MASK(ext_phy), + MT_WF_PHY_PD_CCK(ext_phy, val)); + return; + } + + if (ofdm) + mt76_rmw(dev, MT_WF_PHY_MIN_PRI_PWR(ext_phy), + MT_WF_PHY_PD_OFDM_MASK(ext_phy), + MT_WF_PHY_PD_OFDM(ext_phy, val)); + else + mt76_rmw(dev, MT_WF_PHY_RXTD_CCK_PD(ext_phy), + MT_WF_PHY_PD_CCK_MASK(ext_phy), + MT_WF_PHY_PD_CCK(ext_phy, val)); +} + +static void +mt7615_mac_set_default_sensitivity(struct mt7615_phy *phy) +{ + /* ofdm */ + mt7615_mac_set_sensitivity(phy, 0x13c, true); + /* cck */ + mt7615_mac_set_sensitivity(phy, 0x92, false); + + phy->ofdm_sensitivity = -98; + phy->cck_sensitivity = -110; + phy->last_cca_adj = jiffies; +} + +void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable) +{ + struct mt7615_dev *dev = phy->dev; + bool ext_phy = phy != &dev->phy; + u32 reg, mask; + + mt7615_mutex_acquire(dev); + + if (phy->scs_en == enable) + goto out; + + if (is_mt7663(&dev->mt76)) { + reg = MT7663_WF_PHY_MIN_PRI_PWR(ext_phy); + mask = MT_WF_PHY_PD_BLK(0); + } else { + reg = MT_WF_PHY_MIN_PRI_PWR(ext_phy); + mask = MT_WF_PHY_PD_BLK(ext_phy); + } + + if (enable) { + mt76_set(dev, reg, mask); + if (is_mt7622(&dev->mt76)) { + mt76_set(dev, MT_MIB_M0_MISC_CR(0), 0x7 << 8); + mt76_set(dev, MT_MIB_M0_MISC_CR(0), 0x7); + } + } else { + mt76_clear(dev, reg, mask); + } + + mt7615_mac_set_default_sensitivity(phy); + phy->scs_en = enable; + +out: + mt7615_mutex_release(dev); +} + +void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy) +{ + u32 rxtd, reg; + + if (is_mt7663(&dev->mt76)) + reg = MT7663_WF_PHY_R0_PHYMUX_5; + else + reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy); + + if (ext_phy) + rxtd = MT_WF_PHY_RXTD2(10); + else + rxtd = MT_WF_PHY_RXTD(12); + + mt76_set(dev, rxtd, BIT(18) | BIT(29)); + mt76_set(dev, reg, 0x5 << 12); +} + +void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy) +{ + struct mt7615_dev *dev = phy->dev; + bool ext_phy = phy != &dev->phy; + u32 reg; + + if (is_mt7663(&dev->mt76)) + reg = MT7663_WF_PHY_R0_PHYMUX_5; + else + reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy); + + /* reset PD and MDRDY counters */ + mt76_clear(dev, reg, GENMASK(22, 20)); + mt76_set(dev, reg, BIT(22) | BIT(20)); +} + +static void +mt7615_mac_adjust_sensitivity(struct mt7615_phy *phy, + u32 rts_err_rate, bool ofdm) +{ + struct mt7615_dev *dev = phy->dev; + int false_cca = ofdm ? phy->false_cca_ofdm : phy->false_cca_cck; + bool ext_phy = phy != &dev->phy; + s16 def_th = ofdm ? -98 : -110; + bool update = false; + s8 *sensitivity; + int signal; + + sensitivity = ofdm ? &phy->ofdm_sensitivity : &phy->cck_sensitivity; + signal = mt76_get_min_avg_rssi(&dev->mt76, ext_phy); + if (!signal) { + mt7615_mac_set_default_sensitivity(phy); + return; + } + + signal = min(signal, -72); + if (false_cca > 500) { + if (rts_err_rate > MT_FRAC(40, 100)) + return; + + /* decrease coverage */ + if (*sensitivity == def_th && signal > -90) { + *sensitivity = -90; + update = true; + } else if (*sensitivity + 2 < signal) { + *sensitivity += 2; + update = true; + } + } else if ((false_cca > 0 && false_cca < 50) || + rts_err_rate > MT_FRAC(60, 100)) { + /* increase coverage */ + if (*sensitivity - 2 >= def_th) { + *sensitivity -= 2; + update = true; + } + } + + if (*sensitivity > signal) { + *sensitivity = signal; + update = true; + } + + if (update) { + u16 val = ofdm ? *sensitivity * 2 + 512 : *sensitivity + 256; + + mt7615_mac_set_sensitivity(phy, val, ofdm); + phy->last_cca_adj = jiffies; + } +} + +static void +mt7615_mac_scs_check(struct mt7615_phy *phy) +{ + struct mt7615_dev *dev = phy->dev; + struct mib_stats *mib = &phy->mib; + u32 val, rts_err_rate = 0; + u32 mdrdy_cck, mdrdy_ofdm, pd_cck, pd_ofdm; + bool ext_phy = phy != &dev->phy; + + if (!phy->scs_en) + return; + + if (is_mt7663(&dev->mt76)) + val = mt76_rr(dev, MT7663_WF_PHY_R0_PHYCTRL_STS0(ext_phy)); + else + val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS0(ext_phy)); + pd_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_CCK, val); + pd_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_OFDM, val); + + if (is_mt7663(&dev->mt76)) + val = mt76_rr(dev, MT7663_WF_PHY_R0_PHYCTRL_STS5(ext_phy)); + else + val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS5(ext_phy)); + mdrdy_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_CCK, val); + mdrdy_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_OFDM, val); + + phy->false_cca_ofdm = pd_ofdm - mdrdy_ofdm; + phy->false_cca_cck = pd_cck - mdrdy_cck; + mt7615_mac_cca_stats_reset(phy); + + if (mib->rts_cnt + mib->rts_retries_cnt) + rts_err_rate = MT_FRAC(mib->rts_retries_cnt, + mib->rts_cnt + mib->rts_retries_cnt); + + /* cck */ + mt7615_mac_adjust_sensitivity(phy, rts_err_rate, false); + /* ofdm */ + mt7615_mac_adjust_sensitivity(phy, rts_err_rate, true); + + if (time_after(jiffies, phy->last_cca_adj + 10 * HZ)) + mt7615_mac_set_default_sensitivity(phy); +} + +static u8 +mt7615_phy_get_nf(struct mt7615_dev *dev, int idx) +{ + static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 }; + u32 reg, val, sum = 0, n = 0; + int i; + + if (is_mt7663(&dev->mt76)) + reg = MT7663_WF_PHY_RXTD(20); + else + reg = idx ? MT_WF_PHY_RXTD2(17) : MT_WF_PHY_RXTD(20); + + for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) { + val = mt76_rr(dev, reg); + sum += val * nf_power[i]; + n += val; + } + + if (!n) + return 0; + + return sum / n; +} + +static void +mt7615_phy_update_channel(struct mt76_phy *mphy, int idx) +{ + struct mt7615_dev *dev = container_of(mphy->dev, struct mt7615_dev, mt76); + struct mt7615_phy *phy = mphy->priv; + struct mt76_channel_state *state; + u64 busy_time, tx_time, rx_time, obss_time; + u32 obss_reg = idx ? MT_WF_RMAC_MIB_TIME6 : MT_WF_RMAC_MIB_TIME5; + int nf; + + busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx), + MT_MIB_SDR9_BUSY_MASK); + tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx), + MT_MIB_SDR36_TXTIME_MASK); + rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx), + MT_MIB_SDR37_RXTIME_MASK); + obss_time = mt76_get_field(dev, obss_reg, MT_MIB_OBSSTIME_MASK); + + nf = mt7615_phy_get_nf(dev, idx); + if (!phy->noise) + phy->noise = nf << 4; + else if (nf) + phy->noise += nf - (phy->noise >> 4); + + state = mphy->chan_state; + state->cc_busy += busy_time; + state->cc_tx += tx_time; + state->cc_rx += rx_time + obss_time; + state->cc_bss_rx += rx_time; + state->noise = -(phy->noise >> 4); +} + +static void mt7615_update_survey(struct mt7615_dev *dev) +{ + struct mt76_dev *mdev = &dev->mt76; + struct mt76_phy *mphy_ext = mdev->phys[MT_BAND1]; + ktime_t cur_time; + + /* MT7615 can only update both phys simultaneously + * since some reisters are shared across bands. + */ + + mt7615_phy_update_channel(&mdev->phy, 0); + if (mphy_ext) + mt7615_phy_update_channel(mphy_ext, 1); + + cur_time = ktime_get_boottime(); + + mt76_update_survey_active_time(&mdev->phy, cur_time); + if (mphy_ext) + mt76_update_survey_active_time(mphy_ext, cur_time); + + /* reset obss airtime */ + mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR); +} + +void mt7615_update_channel(struct mt76_phy *mphy) +{ + struct mt7615_dev *dev = container_of(mphy->dev, struct mt7615_dev, mt76); + + if (mt76_connac_pm_wake(&dev->mphy, &dev->pm)) + return; + + mt7615_update_survey(dev); + mt76_connac_power_save_sched(&dev->mphy, &dev->pm); +} +EXPORT_SYMBOL_GPL(mt7615_update_channel); + +static void +mt7615_mac_update_mib_stats(struct mt7615_phy *phy) +{ + struct mt7615_dev *dev = phy->dev; + struct mib_stats *mib = &phy->mib; + bool ext_phy = phy != &dev->phy; + int i, aggr; + u32 val, val2; + + mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy), + MT_MIB_SDR3_FCS_ERR_MASK); + + val = mt76_get_field(dev, MT_MIB_SDR14(ext_phy), + MT_MIB_AMPDU_MPDU_COUNT); + if (val) { + val2 = mt76_get_field(dev, MT_MIB_SDR15(ext_phy), + MT_MIB_AMPDU_ACK_COUNT); + mib->aggr_per = 1000 * (val - val2) / val; + } + + aggr = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0; + for (i = 0; i < 4; i++) { + val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i)); + mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val); + mib->ack_fail_cnt += FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, + val); + + val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i)); + mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val); + mib->rts_retries_cnt += FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, + val); + + val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i)); + dev->mt76.aggr_stats[aggr++] += val & 0xffff; + dev->mt76.aggr_stats[aggr++] += val >> 16; + } +} + +void mt7615_pm_wake_work(struct work_struct *work) +{ + struct mt7615_dev *dev; + struct mt76_phy *mphy; + + dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, + pm.wake_work); + mphy = dev->phy.mt76; + + if (!mt7615_mcu_set_drv_ctrl(dev)) { + struct mt76_dev *mdev = &dev->mt76; + int i; + + if (mt76_is_sdio(mdev)) { + mt76_connac_pm_dequeue_skbs(mphy, &dev->pm); + mt76_worker_schedule(&mdev->sdio.txrx_worker); + } else { + local_bh_disable(); + mt76_for_each_q_rx(mdev, i) + napi_schedule(&mdev->napi[i]); + local_bh_enable(); + mt76_connac_pm_dequeue_skbs(mphy, &dev->pm); + mt76_queue_tx_cleanup(dev, mdev->q_mcu[MT_MCUQ_WM], + false); + } + + if (test_bit(MT76_STATE_RUNNING, &mphy->state)) { + unsigned long timeout; + + timeout = mt7615_get_macwork_timeout(dev); + ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, + timeout); + } + } + + ieee80211_wake_queues(mphy->hw); + wake_up(&dev->pm.wait); +} + +void mt7615_pm_power_save_work(struct work_struct *work) +{ + struct mt7615_dev *dev; + unsigned long delta; + + dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, + pm.ps_work.work); + + delta = dev->pm.idle_timeout; + if (test_bit(MT76_HW_SCANNING, &dev->mphy.state) || + test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state)) + goto out; + + if (mutex_is_locked(&dev->mt76.mutex)) + /* if mt76 mutex is held we should not put the device + * to sleep since we are currently accessing device + * register map. We need to wait for the next power_save + * trigger. + */ + goto out; + + if (time_is_after_jiffies(dev->pm.last_activity + delta)) { + delta = dev->pm.last_activity + delta - jiffies; + goto out; + } + + if (!mt7615_mcu_set_fw_ctrl(dev)) + return; +out: + queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta); +} + +void mt7615_mac_work(struct work_struct *work) +{ + struct mt7615_phy *phy; + struct mt76_phy *mphy; + unsigned long timeout; + + mphy = (struct mt76_phy *)container_of(work, struct mt76_phy, + mac_work.work); + phy = mphy->priv; + + mt7615_mutex_acquire(phy->dev); + + mt7615_update_survey(phy->dev); + if (++mphy->mac_work_count == 5) { + mphy->mac_work_count = 0; + + mt7615_mac_update_mib_stats(phy); + mt7615_mac_scs_check(phy); + } + + mt7615_mutex_release(phy->dev); + + mt76_tx_status_check(mphy->dev, false); + + timeout = mt7615_get_macwork_timeout(phy->dev); + ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, timeout); +} + +void mt7615_tx_token_put(struct mt7615_dev *dev) +{ + struct mt76_txwi_cache *txwi; + int id; + + spin_lock_bh(&dev->mt76.token_lock); + idr_for_each_entry(&dev->mt76.token, txwi, id) + mt7615_txwi_free(dev, txwi); + spin_unlock_bh(&dev->mt76.token_lock); + idr_destroy(&dev->mt76.token); +} +EXPORT_SYMBOL_GPL(mt7615_tx_token_put); + +static void mt7615_dfs_stop_radar_detector(struct mt7615_phy *phy) +{ + struct mt7615_dev *dev = phy->dev; + + if (phy->rdd_state & BIT(0)) + mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0, + MT_RX_SEL0, 0); + if (phy->rdd_state & BIT(1)) + mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1, + MT_RX_SEL0, 0); +} + +static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain) +{ + int err; + + err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain, + MT_RX_SEL0, 0); + if (err < 0) + return err; + + return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain, + MT_RX_SEL0, 1); +} + +static int mt7615_dfs_start_radar_detector(struct mt7615_phy *phy) +{ + struct cfg80211_chan_def *chandef = &phy->mt76->chandef; + struct mt7615_dev *dev = phy->dev; + bool ext_phy = phy != &dev->phy; + int err; + + /* start CAC */ + err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, ext_phy, + MT_RX_SEL0, 0); + if (err < 0) + return err; + + err = mt7615_dfs_start_rdd(dev, ext_phy); + if (err < 0) + return err; + + phy->rdd_state |= BIT(ext_phy); + + if (chandef->width == NL80211_CHAN_WIDTH_160 || + chandef->width == NL80211_CHAN_WIDTH_80P80) { + err = mt7615_dfs_start_rdd(dev, 1); + if (err < 0) + return err; + + phy->rdd_state |= BIT(1); + } + + return 0; +} + +static int +mt7615_dfs_init_radar_specs(struct mt7615_phy *phy) +{ + const struct mt7615_dfs_radar_spec *radar_specs; + struct mt7615_dev *dev = phy->dev; + int err, i, lpn = 500; + + switch (dev->mt76.region) { + case NL80211_DFS_FCC: + radar_specs = &fcc_radar_specs; + lpn = 8; + break; + case NL80211_DFS_ETSI: + radar_specs = &etsi_radar_specs; + break; + case NL80211_DFS_JP: + radar_specs = &jp_radar_specs; + break; + default: + return -EINVAL; + } + + /* avoid FCC radar detection in non-FCC region */ + err = mt7615_mcu_set_fcc5_lpn(dev, lpn); + if (err < 0) + return err; + + for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) { + err = mt7615_mcu_set_radar_th(dev, i, + &radar_specs->radar_pattern[i]); + if (err < 0) + return err; + } + + return mt7615_mcu_set_pulse_th(dev, &radar_specs->pulse_th); +} + +int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy) +{ + struct cfg80211_chan_def *chandef = &phy->mt76->chandef; + struct mt7615_dev *dev = phy->dev; + bool ext_phy = phy != &dev->phy; + enum mt76_dfs_state dfs_state, prev_state; + int err; + + if (is_mt7663(&dev->mt76)) + return 0; + + prev_state = phy->mt76->dfs_state; + dfs_state = mt76_phy_dfs_state(phy->mt76); + if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) && + dfs_state < MT_DFS_STATE_CAC) + dfs_state = MT_DFS_STATE_ACTIVE; + + if (prev_state == dfs_state) + return 0; + + if (dfs_state == MT_DFS_STATE_DISABLED) + goto stop; + + if (prev_state <= MT_DFS_STATE_DISABLED) { + err = mt7615_dfs_init_radar_specs(phy); + if (err < 0) + return err; + + err = mt7615_dfs_start_radar_detector(phy); + if (err < 0) + return err; + + phy->mt76->dfs_state = MT_DFS_STATE_CAC; + } + + if (dfs_state == MT_DFS_STATE_CAC) + return 0; + + err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END, + ext_phy, MT_RX_SEL0, 0); + if (err < 0) { + phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN; + return err; + } + + phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE; + return 0; + +stop: + err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START, ext_phy, + MT_RX_SEL0, 0); + if (err < 0) + return err; + + mt7615_dfs_stop_radar_detector(phy); + phy->mt76->dfs_state = MT_DFS_STATE_DISABLED; + + return 0; +} + +int mt7615_mac_set_beacon_filter(struct mt7615_phy *phy, + struct ieee80211_vif *vif, + bool enable) +{ + struct mt7615_dev *dev = phy->dev; + bool ext_phy = phy != &dev->phy; + int err; + + if (!mt7615_firmware_offload(dev)) + return -EOPNOTSUPP; + + switch (vif->type) { + case NL80211_IFTYPE_MONITOR: + return 0; + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_AP: + if (enable) + phy->n_beacon_vif++; + else + phy->n_beacon_vif--; + fallthrough; + default: + break; + } + + err = mt7615_mcu_set_bss_pm(dev, vif, !phy->n_beacon_vif); + if (err) + return err; + + if (phy->n_beacon_vif) { + vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; + mt76_clear(dev, MT_WF_RFCR(ext_phy), + MT_WF_RFCR_DROP_OTHER_BEACON); + } else { + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; + mt76_set(dev, MT_WF_RFCR(ext_phy), + MT_WF_RFCR_DROP_OTHER_BEACON); + } + + return 0; +} + +void mt7615_coredump_work(struct work_struct *work) +{ + struct mt7615_dev *dev; + char *dump, *data; + + dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, + coredump.work.work); + + if (time_is_after_jiffies(dev->coredump.last_activity + + 4 * MT76_CONNAC_COREDUMP_TIMEOUT)) { + queue_delayed_work(dev->mt76.wq, &dev->coredump.work, + MT76_CONNAC_COREDUMP_TIMEOUT); + return; + } + + dump = vzalloc(MT76_CONNAC_COREDUMP_SZ); + data = dump; + + while (true) { + struct sk_buff *skb; + + spin_lock_bh(&dev->mt76.lock); + skb = __skb_dequeue(&dev->coredump.msg_list); + spin_unlock_bh(&dev->mt76.lock); + + if (!skb) + break; + + skb_pull(skb, sizeof(struct mt7615_mcu_rxd)); + if (!dump || data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) { + dev_kfree_skb(skb); + continue; + } + + memcpy(data, skb->data, skb->len); + data += skb->len; + + dev_kfree_skb(skb); + } + + if (dump) + dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ, + GFP_KERNEL); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h new file mode 100644 index 000000000..880c9f74a --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h @@ -0,0 +1,337 @@ +/* SPDX-License-Identifier: ISC */ +/* Copyright (C) 2019 MediaTek Inc. */ + +#ifndef __MT7615_MAC_H +#define __MT7615_MAC_H + +#define MT_CT_PARSE_LEN 72 +#define MT_CT_DMA_BUF_NUM 2 + +#define MT_RXD0_LENGTH GENMASK(15, 0) +#define MT_RXD0_PKT_FLAG GENMASK(19, 16) +#define MT_RXD0_PKT_TYPE GENMASK(31, 29) + +#define MT_RXD0_NORMAL_ETH_TYPE_OFS GENMASK(22, 16) +#define MT_RXD0_NORMAL_IP_SUM BIT(23) +#define MT_RXD0_NORMAL_UDP_TCP_SUM BIT(24) +#define MT_RXD0_NORMAL_GROUP_1 BIT(25) +#define MT_RXD0_NORMAL_GROUP_2 BIT(26) +#define MT_RXD0_NORMAL_GROUP_3 BIT(27) +#define MT_RXD0_NORMAL_GROUP_4 BIT(28) + +enum rx_pkt_type { + PKT_TYPE_TXS, + PKT_TYPE_TXRXV, + PKT_TYPE_NORMAL, + PKT_TYPE_RX_DUP_RFB, + PKT_TYPE_RX_TMR, + PKT_TYPE_RETRIEVE, + PKT_TYPE_TXRX_NOTIFY, + PKT_TYPE_RX_EVENT, + PKT_TYPE_NORMAL_MCU, +}; + +#define MT_RXD1_NORMAL_BSSID GENMASK(31, 26) +#define MT_RXD1_NORMAL_PAYLOAD_FORMAT GENMASK(25, 24) +#define MT_RXD1_FIRST_AMSDU_FRAME GENMASK(1, 0) +#define MT_RXD1_MID_AMSDU_FRAME BIT(1) +#define MT_RXD1_LAST_AMSDU_FRAME BIT(0) +#define MT_RXD1_NORMAL_HDR_TRANS BIT(23) +#define MT_RXD1_NORMAL_HDR_OFFSET BIT(22) +#define MT_RXD1_NORMAL_MAC_HDR_LEN GENMASK(21, 16) +#define MT_RXD1_NORMAL_CH_FREQ GENMASK(15, 8) +#define MT_RXD1_NORMAL_KEY_ID GENMASK(7, 6) +#define MT_RXD1_NORMAL_BEACON_UC BIT(5) +#define MT_RXD1_NORMAL_BEACON_MC BIT(4) +#define MT_RXD1_NORMAL_BF_REPORT BIT(3) +#define MT_RXD1_NORMAL_ADDR_TYPE GENMASK(2, 1) +#define MT_RXD1_NORMAL_BCAST GENMASK(2, 1) +#define MT_RXD1_NORMAL_MCAST BIT(2) +#define MT_RXD1_NORMAL_U2M BIT(1) +#define MT_RXD1_NORMAL_HTC_VLD BIT(0) + +#define MT_RXD2_NORMAL_NON_AMPDU BIT(31) +#define MT_RXD2_NORMAL_NON_AMPDU_SUB BIT(30) +#define MT_RXD2_NORMAL_NDATA BIT(29) +#define MT_RXD2_NORMAL_NULL_FRAME BIT(28) +#define MT_RXD2_NORMAL_FRAG BIT(27) +#define MT_RXD2_NORMAL_INT_FRAME BIT(26) +#define MT_RXD2_NORMAL_HDR_TRANS_ERROR BIT(25) +#define MT_RXD2_NORMAL_MAX_LEN_ERROR BIT(24) +#define MT_RXD2_NORMAL_AMSDU_ERR BIT(23) +#define MT_RXD2_NORMAL_LEN_MISMATCH BIT(22) +#define MT_RXD2_NORMAL_TKIP_MIC_ERR BIT(21) +#define MT_RXD2_NORMAL_ICV_ERR BIT(20) +#define MT_RXD2_NORMAL_CLM BIT(19) +#define MT_RXD2_NORMAL_CM BIT(18) +#define MT_RXD2_NORMAL_FCS_ERR BIT(17) +#define MT_RXD2_NORMAL_SW_BIT BIT(16) +#define MT_RXD2_NORMAL_SEC_MODE GENMASK(15, 12) +#define MT_RXD2_NORMAL_TID GENMASK(11, 8) +#define MT_RXD2_NORMAL_WLAN_IDX GENMASK(7, 0) + +#define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30) +#define MT_RXD3_NORMAL_PF_MODE BIT(29) +#define MT_RXD3_NORMAL_CLS_BITMAP GENMASK(28, 19) +#define MT_RXD3_NORMAL_WOL GENMASK(18, 14) +#define MT_RXD3_NORMAL_MAGIC_PKT BIT(13) +#define MT_RXD3_NORMAL_OFLD GENMASK(12, 11) +#define MT_RXD3_NORMAL_CLS BIT(10) +#define MT_RXD3_NORMAL_PATTERN_DROP BIT(9) +#define MT_RXD3_NORMAL_TSF_COMPARE_LOSS BIT(8) +#define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0) + +#define MT_RXD4_FRAME_CONTROL GENMASK(15, 0) + +#define MT_RXD6_SEQ_CTRL GENMASK(15, 0) +#define MT_RXD6_QOS_CTL GENMASK(31, 16) + +#define MT_RXD7_HT_CONTROL GENMASK(31, 0) + +#define MT_RXV1_ACID_DET_H BIT(31) +#define MT_RXV1_ACID_DET_L BIT(30) +#define MT_RXV1_VHTA2_B8_B3 GENMASK(29, 24) +#define MT_RXV1_NUM_RX GENMASK(23, 22) +#define MT_RXV1_HT_NO_SOUND BIT(21) +#define MT_RXV1_HT_SMOOTH BIT(20) +#define MT_RXV1_HT_SHORT_GI BIT(19) +#define MT_RXV1_HT_AGGR BIT(18) +#define MT_RXV1_VHTA1_B22 BIT(17) +#define MT_RXV1_FRAME_MODE GENMASK(16, 15) +#define MT_RXV1_TX_MODE GENMASK(14, 12) +#define MT_RXV1_HT_EXT_LTF GENMASK(11, 10) +#define MT_RXV1_HT_AD_CODE BIT(9) +#define MT_RXV1_HT_STBC GENMASK(8, 7) +#define MT_RXV1_TX_RATE GENMASK(6, 0) + +#define MT_RXV2_SEL_ANT BIT(31) +#define MT_RXV2_VALID_BIT BIT(30) +#define MT_RXV2_NSTS GENMASK(29, 27) +#define MT_RXV2_GROUP_ID GENMASK(26, 21) +#define MT_RXV2_LENGTH GENMASK(20, 0) + +#define MT_RXV3_WB_RSSI GENMASK(31, 24) +#define MT_RXV3_IB_RSSI GENMASK(23, 16) + +#define MT_RXV4_RCPI3 GENMASK(31, 24) +#define MT_RXV4_RCPI2 GENMASK(23, 16) +#define MT_RXV4_RCPI1 GENMASK(15, 8) +#define MT_RXV4_RCPI0 GENMASK(7, 0) + +#define MT_RXV5_FOE GENMASK(11, 0) + +#define MT_RXV6_NF3 GENMASK(31, 24) +#define MT_RXV6_NF2 GENMASK(23, 16) +#define MT_RXV6_NF1 GENMASK(15, 8) +#define MT_RXV6_NF0 GENMASK(7, 0) + +enum tx_header_format { + MT_HDR_FORMAT_802_3, + MT_HDR_FORMAT_CMD, + MT_HDR_FORMAT_802_11, + MT_HDR_FORMAT_802_11_EXT, +}; + +enum tx_pkt_type { + MT_TX_TYPE_CT, + MT_TX_TYPE_SF, + MT_TX_TYPE_CMD, + MT_TX_TYPE_FW, +}; + +enum tx_port_idx { + MT_TX_PORT_IDX_LMAC, + MT_TX_PORT_IDX_MCU +}; + +enum tx_mcu_port_q_idx { + MT_TX_MCU_PORT_RX_Q0 = 0, + MT_TX_MCU_PORT_RX_Q1, + MT_TX_MCU_PORT_RX_Q2, + MT_TX_MCU_PORT_RX_Q3, + MT_TX_MCU_PORT_RX_FWDL = 0x1e +}; + +enum tx_phy_bandwidth { + MT_PHY_BW_20, + MT_PHY_BW_40, + MT_PHY_BW_80, + MT_PHY_BW_160, +}; + +#define MT_CT_INFO_APPLY_TXD BIT(0) +#define MT_CT_INFO_COPY_HOST_TXD_ALL BIT(1) +#define MT_CT_INFO_MGMT_FRAME BIT(2) +#define MT_CT_INFO_NONE_CIPHER_FRAME BIT(3) +#define MT_CT_INFO_HSR2_TX BIT(4) + +#define MT_TXD0_P_IDX BIT(31) +#define MT_TXD0_Q_IDX GENMASK(30, 26) +#define MT_TXD0_UDP_TCP_SUM BIT(24) +#define MT_TXD0_IP_SUM BIT(23) +#define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16) +#define MT_TXD0_TX_BYTES GENMASK(15, 0) + +#define MT_TXD1_OWN_MAC GENMASK(31, 26) +#define MT_TXD1_PKT_FMT GENMASK(25, 24) +#define MT_TXD1_TID GENMASK(23, 21) +#define MT_TXD1_AMSDU BIT(20) +#define MT_TXD1_UNXV BIT(19) +#define MT_TXD1_HDR_PAD GENMASK(18, 17) +#define MT_TXD1_TXD_LEN BIT(16) +#define MT_TXD1_LONG_FORMAT BIT(15) +#define MT_TXD1_HDR_FORMAT GENMASK(14, 13) +#define MT_TXD1_HDR_INFO GENMASK(12, 8) +#define MT_TXD1_WLAN_IDX GENMASK(7, 0) + +#define MT_TXD2_FIX_RATE BIT(31) +#define MT_TXD2_TIMING_MEASURE BIT(30) +#define MT_TXD2_BA_DISABLE BIT(29) +#define MT_TXD2_POWER_OFFSET GENMASK(28, 24) +#define MT_TXD2_MAX_TX_TIME GENMASK(23, 16) +#define MT_TXD2_FRAG GENMASK(15, 14) +#define MT_TXD2_HTC_VLD BIT(13) +#define MT_TXD2_DURATION BIT(12) +#define MT_TXD2_BIP BIT(11) +#define MT_TXD2_MULTICAST BIT(10) +#define MT_TXD2_RTS BIT(9) +#define MT_TXD2_SOUNDING BIT(8) +#define MT_TXD2_NDPA BIT(7) +#define MT_TXD2_NDP BIT(6) +#define MT_TXD2_FRAME_TYPE GENMASK(5, 4) +#define MT_TXD2_SUB_TYPE GENMASK(3, 0) + +#define MT_TXD3_SN_VALID BIT(31) +#define MT_TXD3_PN_VALID BIT(30) +#define MT_TXD3_SEQ GENMASK(27, 16) +#define MT_TXD3_REM_TX_COUNT GENMASK(15, 11) +#define MT_TXD3_TX_COUNT GENMASK(10, 6) +#define MT_TXD3_PROTECT_FRAME BIT(1) +#define MT_TXD3_NO_ACK BIT(0) + +#define MT_TXD4_PN_LOW GENMASK(31, 0) + +#define MT_TXD5_PN_HIGH GENMASK(31, 16) +#define MT_TXD5_SW_POWER_MGMT BIT(13) +#define MT_TXD5_DA_SELECT BIT(11) +#define MT_TXD5_TX_STATUS_HOST BIT(10) +#define MT_TXD5_TX_STATUS_MCU BIT(9) +#define MT_TXD5_TX_STATUS_FMT BIT(8) +#define MT_TXD5_PID GENMASK(7, 0) + +#define MT_TXD6_FIXED_RATE BIT(31) +#define MT_TXD6_SGI BIT(30) +#define MT_TXD6_LDPC BIT(29) +#define MT_TXD6_TX_BF BIT(28) +#define MT_TXD6_TX_RATE GENMASK(27, 16) +#define MT_TXD6_ANT_ID GENMASK(15, 4) +#define MT_TXD6_DYN_BW BIT(3) +#define MT_TXD6_FIXED_BW BIT(2) +#define MT_TXD6_BW GENMASK(1, 0) + +/* MT7663 DW7 HW-AMSDU */ +#define MT_TXD7_HW_AMSDU_CAP BIT(30) +#define MT_TXD7_TYPE GENMASK(21, 20) +#define MT_TXD7_SUB_TYPE GENMASK(19, 16) +#define MT_TXD7_SPE_IDX GENMASK(15, 11) +#define MT_TXD7_SPE_IDX_SLE BIT(10) + +#define MT_TXD8_L_TYPE GENMASK(5, 4) +#define MT_TXD8_L_SUB_TYPE GENMASK(3, 0) + +#define MT_TX_RATE_STBC BIT(11) +#define MT_TX_RATE_NSS GENMASK(10, 9) +#define MT_TX_RATE_MODE GENMASK(8, 6) +#define MT_TX_RATE_IDX GENMASK(5, 0) + +#define MT_TX_FREE_MSDU_ID_CNT GENMASK(6, 0) + +#define MT_TXS0_PID GENMASK(31, 24) +#define MT_TXS0_BA_ERROR BIT(22) +#define MT_TXS0_PS_FLAG BIT(21) +#define MT_TXS0_TXOP_TIMEOUT BIT(20) +#define MT_TXS0_BIP_ERROR BIT(19) + +#define MT_TXS0_QUEUE_TIMEOUT BIT(18) +#define MT_TXS0_RTS_TIMEOUT BIT(17) +#define MT_TXS0_ACK_TIMEOUT BIT(16) +#define MT_TXS0_ACK_ERROR_MASK GENMASK(18, 16) + +#define MT_TXS0_TX_STATUS_HOST BIT(15) +#define MT_TXS0_TX_STATUS_MCU BIT(14) +#define MT_TXS0_TXS_FORMAT BIT(13) +#define MT_TXS0_FIXED_RATE BIT(12) +#define MT_TXS0_TX_RATE GENMASK(11, 0) + +#define MT_TXS1_ANT_ID GENMASK(31, 20) +#define MT_TXS1_RESP_RATE GENMASK(19, 16) +#define MT_TXS1_BW GENMASK(15, 14) +#define MT_TXS1_I_TXBF BIT(13) +#define MT_TXS1_E_TXBF BIT(12) +#define MT_TXS1_TID GENMASK(11, 9) +#define MT_TXS1_AMPDU BIT(8) +#define MT_TXS1_ACKED_MPDU BIT(7) +#define MT_TXS1_TX_POWER_DBM GENMASK(6, 0) + +#define MT_TXS2_WCID GENMASK(31, 24) +#define MT_TXS2_RXV_SEQNO GENMASK(23, 16) +#define MT_TXS2_TX_DELAY GENMASK(15, 0) + +#define MT_TXS3_LAST_TX_RATE GENMASK(31, 29) +#define MT_TXS3_TX_COUNT GENMASK(28, 24) +#define MT_TXS3_F1_TSSI1 GENMASK(23, 12) +#define MT_TXS3_F1_TSSI0 GENMASK(11, 0) +#define MT_TXS3_F0_SEQNO GENMASK(11, 0) + +#define MT_TXS4_F0_TIMESTAMP GENMASK(31, 0) +#define MT_TXS4_F1_TSSI3 GENMASK(23, 12) +#define MT_TXS4_F1_TSSI2 GENMASK(11, 0) + +#define MT_TXS5_F0_FRONT_TIME GENMASK(24, 0) +#define MT_TXS5_F1_NOISE_2 GENMASK(23, 16) +#define MT_TXS5_F1_NOISE_1 GENMASK(15, 8) +#define MT_TXS5_F1_NOISE_0 GENMASK(7, 0) + +#define MT_TXS6_F1_RCPI_3 GENMASK(31, 24) +#define MT_TXS6_F1_RCPI_2 GENMASK(23, 16) +#define MT_TXS6_F1_RCPI_1 GENMASK(15, 8) +#define MT_TXS6_F1_RCPI_0 GENMASK(7, 0) + +struct mt7615_dfs_pulse { + u32 max_width; /* us */ + int max_pwr; /* dbm */ + int min_pwr; /* dbm */ + u32 min_stgr_pri; /* us */ + u32 max_stgr_pri; /* us */ + u32 min_cr_pri; /* us */ + u32 max_cr_pri; /* us */ +}; + +struct mt7615_dfs_pattern { + u8 enb; + u8 stgr; + u8 min_crpn; + u8 max_crpn; + u8 min_crpr; + u8 min_pw; + u8 max_pw; + u32 min_pri; + u32 max_pri; + u8 min_crbn; + u8 max_crbn; + u8 min_stgpn; + u8 max_stgpn; + u8 min_stgpr; +}; + +struct mt7615_dfs_radar_spec { + struct mt7615_dfs_pulse pulse_th; + struct mt7615_dfs_pattern radar_pattern[16]; +}; + +static inline u32 mt7615_mac_wtbl_addr(struct mt7615_dev *dev, int wcid) +{ + return MT_WTBL_BASE(dev) + wcid * MT_WTBL_ENTRY_SIZE; +} + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c new file mode 100644 index 000000000..a3b0d4e91 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -0,0 +1,1346 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2019 MediaTek Inc. + * + * Author: Roy Luo + * Ryder Lee + * Felix Fietkau + * Lorenzo Bianconi + */ + +#include +#include +#include "mt7615.h" +#include "mcu.h" + +static bool mt7615_dev_running(struct mt7615_dev *dev) +{ + struct mt7615_phy *phy; + + if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) + return true; + + phy = mt7615_ext_phy(dev); + + return phy && test_bit(MT76_STATE_RUNNING, &phy->mt76->state); +} + +static int mt7615_start(struct ieee80211_hw *hw) +{ + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct mt7615_phy *phy = mt7615_hw_phy(hw); + unsigned long timeout; + bool running; + int ret; + + if (!mt7615_wait_for_mcu_init(dev)) + return -EIO; + + mt7615_mutex_acquire(dev); + + running = mt7615_dev_running(dev); + + if (!running) { + ret = mt7615_mcu_set_pm(dev, 0, 0); + if (ret) + goto out; + + ret = mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, true, false); + if (ret) + goto out; + + mt7615_mac_enable_nf(dev, 0); + } + + if (phy != &dev->phy) { + ret = mt7615_mcu_set_pm(dev, 1, 0); + if (ret) + goto out; + + ret = mt76_connac_mcu_set_mac_enable(&dev->mt76, 1, true, false); + if (ret) + goto out; + + mt7615_mac_enable_nf(dev, 1); + } + + if (mt7615_firmware_offload(dev)) { + ret = mt76_connac_mcu_set_channel_domain(phy->mt76); + if (ret) + goto out; + + ret = mt76_connac_mcu_set_rate_txpower(phy->mt76); + if (ret) + goto out; + } + + ret = mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH)); + if (ret) + goto out; + + set_bit(MT76_STATE_RUNNING, &phy->mt76->state); + + timeout = mt7615_get_macwork_timeout(dev); + ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work, timeout); + + if (!running) + mt7615_mac_reset_counters(dev); + +out: + mt7615_mutex_release(dev); + + return ret; +} + +static void mt7615_stop(struct ieee80211_hw *hw) +{ + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct mt7615_phy *phy = mt7615_hw_phy(hw); + + cancel_delayed_work_sync(&phy->mt76->mac_work); + del_timer_sync(&phy->roc_timer); + cancel_work_sync(&phy->roc_work); + + cancel_delayed_work_sync(&dev->pm.ps_work); + cancel_work_sync(&dev->pm.wake_work); + + mt76_connac_free_pending_tx_skbs(&dev->pm, NULL); + + mt7615_mutex_acquire(dev); + + mt76_testmode_reset(phy->mt76, true); + + clear_bit(MT76_STATE_RUNNING, &phy->mt76->state); + cancel_delayed_work_sync(&phy->scan_work); + + if (phy != &dev->phy) { + mt7615_mcu_set_pm(dev, 1, 1); + mt76_connac_mcu_set_mac_enable(&dev->mt76, 1, false, false); + } + + if (!mt7615_dev_running(dev)) { + mt7615_mcu_set_pm(dev, 0, 1); + mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, false, false); + } + + mt7615_mutex_release(dev); +} + +static inline int get_free_idx(u32 mask, u8 start, u8 end) +{ + return ffs(~mask & GENMASK(end, start)); +} + +static int get_omac_idx(enum nl80211_iftype type, u64 mask) +{ + int i; + + switch (type) { + case NL80211_IFTYPE_STATION: + /* prefer hw bssid slot 1-3 */ + i = get_free_idx(mask, HW_BSSID_1, HW_BSSID_3); + if (i) + return i - 1; + + /* next, try to find a free repeater entry for the sta */ + i = get_free_idx(mask >> REPEATER_BSSID_START, 0, + REPEATER_BSSID_MAX - REPEATER_BSSID_START); + if (i) + return i + 32 - 1; + + i = get_free_idx(mask, EXT_BSSID_1, EXT_BSSID_MAX); + if (i) + return i - 1; + + if (~mask & BIT(HW_BSSID_0)) + return HW_BSSID_0; + + break; + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_AP: + /* ap uses hw bssid 0 and ext bssid */ + if (~mask & BIT(HW_BSSID_0)) + return HW_BSSID_0; + + i = get_free_idx(mask, EXT_BSSID_1, EXT_BSSID_MAX); + if (i) + return i - 1; + + break; + default: + WARN_ON(1); + break; + } + + return -1; +} + +static int mt7615_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct mt7615_phy *phy = mt7615_hw_phy(hw); + struct mt76_txq *mtxq; + bool ext_phy = phy != &dev->phy; + int idx, ret = 0; + + mt7615_mutex_acquire(dev); + + mt76_testmode_reset(phy->mt76, true); + + if (vif->type == NL80211_IFTYPE_MONITOR && + is_zero_ether_addr(vif->addr)) + phy->monitor_vif = vif; + + mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask); + if (mvif->mt76.idx >= MT7615_MAX_INTERFACES) { + ret = -ENOSPC; + goto out; + } + + idx = get_omac_idx(vif->type, dev->omac_mask); + if (idx < 0) { + ret = -ENOSPC; + goto out; + } + mvif->mt76.omac_idx = idx; + + mvif->mt76.band_idx = ext_phy; + mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP; + if (ext_phy) + mvif->mt76.wmm_idx += 2; + + dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx); + dev->omac_mask |= BIT_ULL(mvif->mt76.omac_idx); + phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx); + + ret = mt7615_mcu_set_dbdc(dev); + if (ret) + goto out; + + idx = MT7615_WTBL_RESERVED - mvif->mt76.idx; + + INIT_LIST_HEAD(&mvif->sta.poll_list); + mvif->sta.wcid.idx = idx; + mvif->sta.wcid.phy_idx = mvif->mt76.band_idx; + mvif->sta.wcid.hw_key_idx = -1; + mt76_packet_id_init(&mvif->sta.wcid); + + mt7615_mac_wtbl_update(dev, idx, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + + rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid); + if (vif->txq) { + mtxq = (struct mt76_txq *)vif->txq->drv_priv; + mtxq->wcid = idx; + } + + ret = mt7615_mcu_add_dev_info(phy, vif, true); +out: + mt7615_mutex_release(dev); + + return ret; +} + +static void mt7615_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct mt7615_sta *msta = &mvif->sta; + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct mt7615_phy *phy = mt7615_hw_phy(hw); + int idx = msta->wcid.idx; + + mt7615_mutex_acquire(dev); + + mt7615_mcu_add_bss_info(phy, vif, NULL, false); + mt7615_mcu_sta_add(phy, vif, NULL, false); + + mt76_testmode_reset(phy->mt76, true); + if (vif == phy->monitor_vif) + phy->monitor_vif = NULL; + + mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid); + + mt7615_mcu_add_dev_info(phy, vif, false); + + rcu_assign_pointer(dev->mt76.wcid[idx], NULL); + + dev->mt76.vif_mask &= ~BIT_ULL(mvif->mt76.idx); + dev->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx); + phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx); + + mt7615_mutex_release(dev); + + spin_lock_bh(&dev->sta_poll_lock); + if (!list_empty(&msta->poll_list)) + list_del_init(&msta->poll_list); + spin_unlock_bh(&dev->sta_poll_lock); + + mt76_packet_id_flush(&dev->mt76, &mvif->sta.wcid); +} + +int mt7615_set_channel(struct mt7615_phy *phy) +{ + struct mt7615_dev *dev = phy->dev; + bool ext_phy = phy != &dev->phy; + int ret; + + cancel_delayed_work_sync(&phy->mt76->mac_work); + + mt7615_mutex_acquire(dev); + + set_bit(MT76_RESET, &phy->mt76->state); + + mt76_set_channel(phy->mt76); + + if (is_mt7615(&dev->mt76) && dev->flash_eeprom) { + ret = mt7615_mcu_apply_rx_dcoc(phy); + if (ret) + goto out; + + ret = mt7615_mcu_apply_tx_dpd(phy); + if (ret) + goto out; + } + + ret = mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD(CHANNEL_SWITCH)); + if (ret) + goto out; + + mt7615_mac_set_timing(phy); + ret = mt7615_dfs_init_radar_detector(phy); + if (ret) + goto out; + + mt7615_mac_cca_stats_reset(phy); + ret = mt7615_mcu_set_sku_en(phy, true); + if (ret) + goto out; + + mt7615_mac_reset_counters(dev); + phy->noise = 0; + phy->chfreq = mt76_rr(dev, MT_CHFREQ(ext_phy)); + +out: + clear_bit(MT76_RESET, &phy->mt76->state); + + mt7615_mutex_release(dev); + + mt76_worker_schedule(&dev->mt76.tx_worker); + if (!mt76_testmode_enabled(phy->mt76)) { + unsigned long timeout = mt7615_get_macwork_timeout(dev); + + ieee80211_queue_delayed_work(phy->mt76->hw, + &phy->mt76->mac_work, timeout); + } + + return ret; +} + +static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct mt7615_phy *phy = mt7615_hw_phy(hw); + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv : + &mvif->sta; + struct mt76_wcid *wcid = &msta->wcid; + int idx = key->keyidx, err = 0; + u8 *wcid_keyidx = &wcid->hw_key_idx; + + /* The hardware does not support per-STA RX GTK, fallback + * to software mode for these. + */ + if ((vif->type == NL80211_IFTYPE_ADHOC || + vif->type == NL80211_IFTYPE_MESH_POINT) && + (key->cipher == WLAN_CIPHER_SUITE_TKIP || + key->cipher == WLAN_CIPHER_SUITE_CCMP) && + !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + return -EOPNOTSUPP; + + /* fall back to sw encryption for unsupported ciphers */ + switch (key->cipher) { + case WLAN_CIPHER_SUITE_AES_CMAC: + wcid_keyidx = &wcid->hw_key_idx2; + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE; + break; + case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + case WLAN_CIPHER_SUITE_SMS4: + break; + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + default: + return -EOPNOTSUPP; + } + + mt7615_mutex_acquire(dev); + + if (cmd == SET_KEY && !sta && !mvif->mt76.cipher) { + mvif->mt76.cipher = mt76_connac_mcu_get_cipher(key->cipher); + mt7615_mcu_add_bss_info(phy, vif, NULL, true); + } + + if (cmd == SET_KEY) + *wcid_keyidx = idx; + else { + if (idx == *wcid_keyidx) + *wcid_keyidx = -1; + goto out; + } + + mt76_wcid_key_setup(&dev->mt76, wcid, key); + if (mt76_is_mmio(&dev->mt76)) + err = mt7615_mac_wtbl_set_key(dev, wcid, key); + else + err = __mt7615_mac_wtbl_set_key(dev, wcid, key); + +out: + mt7615_mutex_release(dev); + + return err; +} + +static int mt7615_set_sar_specs(struct ieee80211_hw *hw, + const struct cfg80211_sar_specs *sar) +{ + struct mt7615_phy *phy = mt7615_hw_phy(hw); + int err; + + if (!cfg80211_chandef_valid(&phy->mt76->chandef)) + return -EINVAL; + + err = mt76_init_sar_power(hw, sar); + if (err) + return err; + + if (mt7615_firmware_offload(phy->dev)) + return mt76_connac_mcu_set_rate_txpower(phy->mt76); + + ieee80211_stop_queues(hw); + err = mt7615_set_channel(phy); + ieee80211_wake_queues(hw); + + return err; +} + +static int mt7615_config(struct ieee80211_hw *hw, u32 changed) +{ + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct mt7615_phy *phy = mt7615_hw_phy(hw); + bool band = phy != &dev->phy; + int ret = 0; + + if (changed & (IEEE80211_CONF_CHANGE_CHANNEL | + IEEE80211_CONF_CHANGE_POWER)) { +#ifdef CONFIG_NL80211_TESTMODE + if (phy->mt76->test.state != MT76_TM_STATE_OFF) { + mt7615_mutex_acquire(dev); + mt76_testmode_reset(phy->mt76, false); + mt7615_mutex_release(dev); + } +#endif + ieee80211_stop_queues(hw); + ret = mt7615_set_channel(phy); + ieee80211_wake_queues(hw); + } + + mt7615_mutex_acquire(dev); + + if (changed & IEEE80211_CONF_CHANGE_MONITOR) { + mt76_testmode_reset(phy->mt76, true); + + if (!(hw->conf.flags & IEEE80211_CONF_MONITOR)) + phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC; + else + phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC; + + mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter); + } + + mt7615_mutex_release(dev); + + return ret; +} + +static int +mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + unsigned int link_id, u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct mt7615_dev *dev = mt7615_hw_dev(hw); + int err; + + mt7615_mutex_acquire(dev); + + queue = mt7615_lmac_mapping(dev, queue); + queue += mvif->wmm_idx * MT7615_MAX_WMM_SETS; + err = mt7615_mcu_set_wmm(dev, queue, params); + + mt7615_mutex_release(dev); + + return err; +} + +static void mt7615_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct mt7615_phy *phy = mt7615_hw_phy(hw); + bool band = phy != &dev->phy; + + u32 ctl_flags = MT_WF_RFCR1_DROP_ACK | + MT_WF_RFCR1_DROP_BF_POLL | + MT_WF_RFCR1_DROP_BA | + MT_WF_RFCR1_DROP_CFEND | + MT_WF_RFCR1_DROP_CFACK; + u32 flags = 0; + + mt7615_mutex_acquire(dev); + +#define MT76_FILTER(_flag, _hw) do { \ + flags |= *total_flags & FIF_##_flag; \ + phy->rxfilter &= ~(_hw); \ + if (!mt76_testmode_enabled(phy->mt76)) \ + phy->rxfilter |= !(flags & FIF_##_flag) * (_hw);\ + } while (0) + + phy->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS | + MT_WF_RFCR_DROP_FRAME_REPORT | + MT_WF_RFCR_DROP_PROBEREQ | + MT_WF_RFCR_DROP_MCAST_FILTERED | + MT_WF_RFCR_DROP_MCAST | + MT_WF_RFCR_DROP_BCAST | + MT_WF_RFCR_DROP_DUPLICATE | + MT_WF_RFCR_DROP_A2_BSSID | + MT_WF_RFCR_DROP_UNWANTED_CTL | + MT_WF_RFCR_DROP_STBC_MULTI); + + if (phy->n_beacon_vif || !mt7615_firmware_offload(dev)) + phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_BEACON; + + MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM | + MT_WF_RFCR_DROP_A3_MAC | + MT_WF_RFCR_DROP_A3_BSSID); + + MT76_FILTER(FCSFAIL, MT_WF_RFCR_DROP_FCSFAIL); + + MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS | + MT_WF_RFCR_DROP_RTS | + MT_WF_RFCR_DROP_CTL_RSV | + MT_WF_RFCR_DROP_NDPA); + + *total_flags = flags; + mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter); + + if (*total_flags & FIF_CONTROL) + mt76_clear(dev, MT_WF_RFCR1(band), ctl_flags); + else + mt76_set(dev, MT_WF_RFCR1(band), ctl_flags); + + mt7615_mutex_release(dev); +} + +static void mt7615_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, + u64 changed) +{ + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct mt7615_phy *phy = mt7615_hw_phy(hw); + + mt7615_mutex_acquire(dev); + + if (changed & BSS_CHANGED_ERP_SLOT) { + int slottime = info->use_short_slot ? 9 : 20; + + if (slottime != phy->slottime) { + phy->slottime = slottime; + mt7615_mac_set_timing(phy); + } + } + + if (changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon) { + mt7615_mcu_add_bss_info(phy, vif, NULL, true); + mt7615_mcu_sta_add(phy, vif, NULL, true); + + if (mt7615_firmware_offload(dev) && vif->p2p) + mt76_connac_mcu_set_p2p_oppps(hw, vif); + } + + if (changed & (BSS_CHANGED_BEACON | + BSS_CHANGED_BEACON_ENABLED)) + mt7615_mcu_add_beacon(dev, hw, vif, info->enable_beacon); + + if (changed & BSS_CHANGED_PS) + mt76_connac_mcu_set_vif_ps(&dev->mt76, vif); + + if ((changed & BSS_CHANGED_ARP_FILTER) && + mt7615_firmware_offload(dev)) { + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + + mt76_connac_mcu_update_arp_filter(&dev->mt76, &mvif->mt76, + info); + } + + if (changed & BSS_CHANGED_ASSOC) + mt7615_mac_set_beacon_filter(phy, vif, vif->cfg.assoc); + + mt7615_mutex_release(dev); +} + +static void +mt7615_channel_switch_beacon(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_chan_def *chandef) +{ + struct mt7615_dev *dev = mt7615_hw_dev(hw); + + mt7615_mutex_acquire(dev); + mt7615_mcu_add_beacon(dev, hw, vif, true); + mt7615_mutex_release(dev); +} + +int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv; + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct mt7615_phy *phy; + int idx, err; + + idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1); + if (idx < 0) + return -ENOSPC; + + INIT_LIST_HEAD(&msta->poll_list); + msta->vif = mvif; + msta->wcid.sta = 1; + msta->wcid.idx = idx; + msta->wcid.phy_idx = mvif->mt76.band_idx; + + phy = mvif->mt76.band_idx ? mt7615_ext_phy(dev) : &dev->phy; + err = mt76_connac_pm_wake(phy->mt76, &dev->pm); + if (err) + return err; + + if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) { + err = mt7615_mcu_add_bss_info(phy, vif, sta, true); + if (err) + return err; + } + + mt7615_mac_wtbl_update(dev, idx, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + err = mt7615_mcu_sta_add(&dev->phy, vif, sta, true); + if (err) + return err; + + mt76_connac_power_save_sched(phy->mt76, &dev->pm); + + return err; +} +EXPORT_SYMBOL_GPL(mt7615_mac_sta_add); + +void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv; + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct mt7615_phy *phy; + + mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid); + + phy = mvif->mt76.band_idx ? mt7615_ext_phy(dev) : &dev->phy; + mt76_connac_pm_wake(phy->mt76, &dev->pm); + + mt7615_mcu_sta_add(&dev->phy, vif, sta, false); + mt7615_mac_wtbl_update(dev, msta->wcid.idx, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) + mt7615_mcu_add_bss_info(phy, vif, sta, false); + + spin_lock_bh(&dev->sta_poll_lock); + if (!list_empty(&msta->poll_list)) + list_del_init(&msta->poll_list); + spin_unlock_bh(&dev->sta_poll_lock); + + mt76_connac_power_save_sched(phy->mt76, &dev->pm); +} +EXPORT_SYMBOL_GPL(mt7615_mac_sta_remove); + +static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct mt7615_phy *phy = mt7615_hw_phy(hw); + struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv; + struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates); + int i; + + if (!sta_rates) + return; + + spin_lock_bh(&dev->mt76.lock); + for (i = 0; i < ARRAY_SIZE(msta->rates); i++) { + msta->rates[i].idx = sta_rates->rate[i].idx; + msta->rates[i].count = sta_rates->rate[i].count; + msta->rates[i].flags = sta_rates->rate[i].flags; + + if (msta->rates[i].idx < 0 || !msta->rates[i].count) + break; + } + msta->n_rates = i; + if (mt76_connac_pm_ref(phy->mt76, &dev->pm)) { + mt7615_mac_set_rates(phy, msta, NULL, msta->rates); + mt76_connac_pm_unref(phy->mt76, &dev->pm); + } + spin_unlock_bh(&dev->mt76.lock); +} + +void mt7615_tx_worker(struct mt76_worker *w) +{ + struct mt7615_dev *dev = container_of(w, struct mt7615_dev, + mt76.tx_worker); + + if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { + queue_work(dev->mt76.wq, &dev->pm.wake_work); + return; + } + + mt76_tx_worker_run(&dev->mt76); + mt76_connac_pm_unref(&dev->mphy, &dev->pm); +} + +static void mt7615_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct mt76_phy *mphy = hw->priv; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_vif *vif = info->control.vif; + struct mt76_wcid *wcid = &dev->mt76.global_wcid; + struct mt7615_sta *msta = NULL; + int qid; + + if (control->sta) { + msta = (struct mt7615_sta *)control->sta->drv_priv; + wcid = &msta->wcid; + } + + if (vif && !control->sta) { + struct mt7615_vif *mvif; + + mvif = (struct mt7615_vif *)vif->drv_priv; + msta = &mvif->sta; + wcid = &msta->wcid; + } + + if (mt76_connac_pm_ref(mphy, &dev->pm)) { + mt76_tx(mphy, control->sta, wcid, skb); + mt76_connac_pm_unref(mphy, &dev->pm); + return; + } + + qid = skb_get_queue_mapping(skb); + if (qid >= MT_TXQ_PSD) { + qid = IEEE80211_AC_BE; + skb_set_queue_mapping(skb, qid); + } + + mt76_connac_pm_queue_skb(hw, &dev->pm, wcid, skb); +} + +static int mt7615_set_rts_threshold(struct ieee80211_hw *hw, u32 val) +{ + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct mt7615_phy *phy = mt7615_hw_phy(hw); + int err, band = phy != &dev->phy; + + mt7615_mutex_acquire(dev); + err = mt76_connac_mcu_set_rts_thresh(&dev->mt76, val, band); + mt7615_mutex_release(dev); + + return err; +} + +static int +mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params) +{ + enum ieee80211_ampdu_mlme_action action = params->action; + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct ieee80211_sta *sta = params->sta; + struct ieee80211_txq *txq = sta->txq[params->tid]; + struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv; + u16 tid = params->tid; + u16 ssn = params->ssn; + struct mt76_txq *mtxq; + int ret = 0; + + if (!txq) + return -EINVAL; + + mtxq = (struct mt76_txq *)txq->drv_priv; + + mt7615_mutex_acquire(dev); + + switch (action) { + case IEEE80211_AMPDU_RX_START: + mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn, + params->buf_size); + ret = mt7615_mcu_add_rx_ba(dev, params, true); + break; + case IEEE80211_AMPDU_RX_STOP: + mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid); + ret = mt7615_mcu_add_rx_ba(dev, params, false); + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + mtxq->aggr = true; + mtxq->send_bar = false; + ret = mt7615_mcu_add_tx_ba(dev, params, true); + ssn = mt7615_mac_get_sta_tid_sn(dev, msta->wcid.idx, tid); + ieee80211_send_bar(vif, sta->addr, tid, + IEEE80211_SN_TO_SEQ(ssn)); + break; + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: + mtxq->aggr = false; + ret = mt7615_mcu_add_tx_ba(dev, params, false); + break; + case IEEE80211_AMPDU_TX_START: + ssn = mt7615_mac_get_sta_tid_sn(dev, msta->wcid.idx, tid); + params->ssn = ssn; + ret = IEEE80211_AMPDU_TX_START_IMMEDIATE; + break; + case IEEE80211_AMPDU_TX_STOP_CONT: + mtxq->aggr = false; + ret = mt7615_mcu_add_tx_ba(dev, params, false); + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + } + mt7615_mutex_release(dev); + + return ret; +} + +static int +mt7615_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NOTEXIST, + IEEE80211_STA_NONE); +} + +static int +mt7615_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NONE, + IEEE80211_STA_NOTEXIST); +} + +static int +mt7615_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + struct mt7615_phy *phy = mt7615_hw_phy(hw); + struct mib_stats *mib = &phy->mib; + + mt7615_mutex_acquire(phy->dev); + + stats->dot11RTSSuccessCount = mib->rts_cnt; + stats->dot11RTSFailureCount = mib->rts_retries_cnt; + stats->dot11FCSErrorCount = mib->fcs_err_cnt; + stats->dot11ACKFailureCount = mib->ack_fail_cnt; + + mt7615_mutex_release(phy->dev); + + return 0; +} + +static u64 +mt7615_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct mt7615_dev *dev = mt7615_hw_dev(hw); + union { + u64 t64; + u32 t32[2]; + } tsf; + u16 idx = mvif->mt76.omac_idx; + u32 reg; + + idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx; + reg = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx); + + mt7615_mutex_acquire(dev); + + /* TSF read */ + mt76_rmw(dev, reg, MT_LPON_TCR_MODE, MT_LPON_TCR_READ); + tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0); + tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1); + + mt7615_mutex_release(dev); + + return tsf.t64; +} + +static void +mt7615_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u64 timestamp) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct mt7615_dev *dev = mt7615_hw_dev(hw); + union { + u64 t64; + u32 t32[2]; + } tsf = { .t64 = timestamp, }; + u16 idx = mvif->mt76.omac_idx; + u32 reg; + + idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx; + reg = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx); + + mt7615_mutex_acquire(dev); + + mt76_wr(dev, MT_LPON_UTTR0, tsf.t32[0]); + mt76_wr(dev, MT_LPON_UTTR1, tsf.t32[1]); + /* TSF software overwrite */ + mt76_rmw(dev, reg, MT_LPON_TCR_MODE, MT_LPON_TCR_WRITE); + + mt7615_mutex_release(dev); +} + +static void +mt7615_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + s64 timestamp) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct mt7615_dev *dev = mt7615_hw_dev(hw); + union { + u64 t64; + u32 t32[2]; + } tsf = { .t64 = timestamp, }; + u16 idx = mvif->mt76.omac_idx; + u32 reg; + + idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx; + reg = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx); + + mt7615_mutex_acquire(dev); + + mt76_wr(dev, MT_LPON_UTTR0, tsf.t32[0]); + mt76_wr(dev, MT_LPON_UTTR1, tsf.t32[1]); + /* TSF software adjust*/ + mt76_rmw(dev, reg, MT_LPON_TCR_MODE, MT_LPON_TCR_ADJUST); + + mt7615_mutex_release(dev); +} + +static void +mt7615_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) +{ + struct mt7615_phy *phy = mt7615_hw_phy(hw); + struct mt7615_dev *dev = phy->dev; + + mt7615_mutex_acquire(dev); + phy->coverage_class = max_t(s16, coverage_class, 0); + mt7615_mac_set_timing(phy); + mt7615_mutex_release(dev); +} + +static int +mt7615_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) +{ + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct mt7615_phy *phy = mt7615_hw_phy(hw); + int max_nss = hweight8(hw->wiphy->available_antennas_tx); + bool ext_phy = phy != &dev->phy; + + if (!tx_ant || tx_ant != rx_ant || ffs(tx_ant) > max_nss) + return -EINVAL; + + if ((BIT(hweight8(tx_ant)) - 1) != tx_ant) + tx_ant = BIT(ffs(tx_ant) - 1) - 1; + + mt7615_mutex_acquire(dev); + + phy->mt76->antenna_mask = tx_ant; + if (ext_phy) { + if (dev->chainmask == 0xf) + tx_ant <<= 2; + else + tx_ant <<= 1; + } + phy->mt76->chainmask = tx_ant; + + mt76_set_stream_caps(phy->mt76, true); + + mt7615_mutex_release(dev); + + return 0; +} + +static void mt7615_roc_iter(void *priv, u8 *mac, + struct ieee80211_vif *vif) +{ + struct mt7615_phy *phy = priv; + + mt7615_mcu_set_roc(phy, vif, NULL, 0); +} + +void mt7615_roc_work(struct work_struct *work) +{ + struct mt7615_phy *phy; + + phy = (struct mt7615_phy *)container_of(work, struct mt7615_phy, + roc_work); + + if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) + return; + + mt7615_mutex_acquire(phy->dev); + ieee80211_iterate_active_interfaces(phy->mt76->hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7615_roc_iter, phy); + mt7615_mutex_release(phy->dev); + ieee80211_remain_on_channel_expired(phy->mt76->hw); +} + +void mt7615_roc_timer(struct timer_list *timer) +{ + struct mt7615_phy *phy = from_timer(phy, timer, roc_timer); + + ieee80211_queue_work(phy->mt76->hw, &phy->roc_work); +} + +void mt7615_scan_work(struct work_struct *work) +{ + struct mt7615_phy *phy; + + phy = (struct mt7615_phy *)container_of(work, struct mt7615_phy, + scan_work.work); + + while (true) { + struct mt7615_mcu_rxd *rxd; + struct sk_buff *skb; + + spin_lock_bh(&phy->dev->mt76.lock); + skb = __skb_dequeue(&phy->scan_event_list); + spin_unlock_bh(&phy->dev->mt76.lock); + + if (!skb) + break; + + rxd = (struct mt7615_mcu_rxd *)skb->data; + if (rxd->eid == MCU_EVENT_SCHED_SCAN_DONE) { + ieee80211_sched_scan_results(phy->mt76->hw); + } else if (test_and_clear_bit(MT76_HW_SCANNING, + &phy->mt76->state)) { + struct cfg80211_scan_info info = { + .aborted = false, + }; + + ieee80211_scan_completed(phy->mt76->hw, &info); + } + dev_kfree_skb(skb); + } +} + +static int +mt7615_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_scan_request *req) +{ + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct mt76_phy *mphy = hw->priv; + int err; + + /* fall-back to sw-scan */ + if (!mt7615_firmware_offload(dev)) + return 1; + + mt7615_mutex_acquire(dev); + err = mt76_connac_mcu_hw_scan(mphy, vif, req); + mt7615_mutex_release(dev); + + return err; +} + +static void +mt7615_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct mt76_phy *mphy = hw->priv; + + mt7615_mutex_acquire(dev); + mt76_connac_mcu_cancel_hw_scan(mphy, vif); + mt7615_mutex_release(dev); +} + +static int +mt7615_start_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_scan_ies *ies) +{ + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct mt76_phy *mphy = hw->priv; + int err; + + if (!mt7615_firmware_offload(dev)) + return -EOPNOTSUPP; + + mt7615_mutex_acquire(dev); + + err = mt76_connac_mcu_sched_scan_req(mphy, vif, req); + if (err < 0) + goto out; + + err = mt76_connac_mcu_sched_scan_enable(mphy, vif, true); +out: + mt7615_mutex_release(dev); + + return err; +} + +static int +mt7615_stop_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct mt76_phy *mphy = hw->priv; + int err; + + if (!mt7615_firmware_offload(dev)) + return -EOPNOTSUPP; + + mt7615_mutex_acquire(dev); + err = mt76_connac_mcu_sched_scan_enable(mphy, vif, false); + mt7615_mutex_release(dev); + + return err; +} + +static int mt7615_remain_on_channel(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel *chan, + int duration, + enum ieee80211_roc_type type) +{ + struct mt7615_phy *phy = mt7615_hw_phy(hw); + int err; + + if (test_and_set_bit(MT76_STATE_ROC, &phy->mt76->state)) + return 0; + + mt7615_mutex_acquire(phy->dev); + + err = mt7615_mcu_set_roc(phy, vif, chan, duration); + if (err < 0) { + clear_bit(MT76_STATE_ROC, &phy->mt76->state); + goto out; + } + + if (!wait_event_timeout(phy->roc_wait, phy->roc_grant, HZ)) { + mt7615_mcu_set_roc(phy, vif, NULL, 0); + clear_bit(MT76_STATE_ROC, &phy->mt76->state); + err = -ETIMEDOUT; + } + +out: + mt7615_mutex_release(phy->dev); + + return err; +} + +static int mt7615_cancel_remain_on_channel(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mt7615_phy *phy = mt7615_hw_phy(hw); + int err; + + if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) + return 0; + + del_timer_sync(&phy->roc_timer); + cancel_work_sync(&phy->roc_work); + + mt7615_mutex_acquire(phy->dev); + err = mt7615_mcu_set_roc(phy, vif, NULL, 0); + mt7615_mutex_release(phy->dev); + + return err; +} + +static void mt7615_sta_set_decap_offload(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + bool enabled) +{ + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv; + + mt7615_mutex_acquire(dev); + + if (enabled) + set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); + else + clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); + + mt7615_mcu_set_sta_decap_offload(dev, vif, sta); + + mt7615_mutex_release(dev); +} + +#ifdef CONFIG_PM +static int mt7615_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan) +{ + struct mt7615_phy *phy = mt7615_hw_phy(hw); + struct mt7615_dev *dev = mt7615_hw_dev(hw); + int err = 0; + + cancel_delayed_work_sync(&dev->pm.ps_work); + mt76_connac_free_pending_tx_skbs(&dev->pm, NULL); + + mt7615_mutex_acquire(dev); + + clear_bit(MT76_STATE_RUNNING, &phy->mt76->state); + cancel_delayed_work_sync(&phy->scan_work); + cancel_delayed_work_sync(&phy->mt76->mac_work); + + set_bit(MT76_STATE_SUSPEND, &phy->mt76->state); + ieee80211_iterate_active_interfaces(hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt76_connac_mcu_set_suspend_iter, + phy->mt76); + + if (!mt7615_dev_running(dev)) + err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true); + + mt7615_mutex_release(dev); + + return err; +} + +static int mt7615_resume(struct ieee80211_hw *hw) +{ + struct mt7615_phy *phy = mt7615_hw_phy(hw); + struct mt7615_dev *dev = mt7615_hw_dev(hw); + unsigned long timeout; + bool running; + + mt7615_mutex_acquire(dev); + + running = mt7615_dev_running(dev); + set_bit(MT76_STATE_RUNNING, &phy->mt76->state); + + if (!running) { + int err; + + err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false); + if (err < 0) { + mt7615_mutex_release(dev); + return err; + } + } + + clear_bit(MT76_STATE_SUSPEND, &phy->mt76->state); + ieee80211_iterate_active_interfaces(hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt76_connac_mcu_set_suspend_iter, + phy->mt76); + + timeout = mt7615_get_macwork_timeout(dev); + ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work, timeout); + + mt7615_mutex_release(dev); + + return 0; +} + +static void mt7615_set_wakeup(struct ieee80211_hw *hw, bool enabled) +{ + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct mt76_dev *mdev = &dev->mt76; + + device_set_wakeup_enable(mdev->dev, enabled); +} + +static void mt7615_set_rekey_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data) +{ + struct mt7615_dev *dev = mt7615_hw_dev(hw); + + mt7615_mutex_acquire(dev); + mt76_connac_mcu_update_gtk_rekey(hw, vif, data); + mt7615_mutex_release(dev); +} +#endif /* CONFIG_PM */ + +const struct ieee80211_ops mt7615_ops = { + .tx = mt7615_tx, + .start = mt7615_start, + .stop = mt7615_stop, + .add_interface = mt7615_add_interface, + .remove_interface = mt7615_remove_interface, + .config = mt7615_config, + .conf_tx = mt7615_conf_tx, + .configure_filter = mt7615_configure_filter, + .bss_info_changed = mt7615_bss_info_changed, + .sta_add = mt7615_sta_add, + .sta_remove = mt7615_sta_remove, + .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove, + .set_key = mt7615_set_key, + .sta_set_decap_offload = mt7615_sta_set_decap_offload, + .ampdu_action = mt7615_ampdu_action, + .set_rts_threshold = mt7615_set_rts_threshold, + .wake_tx_queue = mt76_wake_tx_queue, + .sta_rate_tbl_update = mt7615_sta_rate_tbl_update, + .sw_scan_start = mt76_sw_scan, + .sw_scan_complete = mt76_sw_scan_complete, + .release_buffered_frames = mt76_release_buffered_frames, + .get_txpower = mt76_get_txpower, + .channel_switch_beacon = mt7615_channel_switch_beacon, + .get_stats = mt7615_get_stats, + .get_tsf = mt7615_get_tsf, + .set_tsf = mt7615_set_tsf, + .offset_tsf = mt7615_offset_tsf, + .get_survey = mt76_get_survey, + .get_antenna = mt76_get_antenna, + .set_antenna = mt7615_set_antenna, + .set_coverage_class = mt7615_set_coverage_class, + .hw_scan = mt7615_hw_scan, + .cancel_hw_scan = mt7615_cancel_hw_scan, + .sched_scan_start = mt7615_start_sched_scan, + .sched_scan_stop = mt7615_stop_sched_scan, + .remain_on_channel = mt7615_remain_on_channel, + .cancel_remain_on_channel = mt7615_cancel_remain_on_channel, + CFG80211_TESTMODE_CMD(mt76_testmode_cmd) + CFG80211_TESTMODE_DUMP(mt76_testmode_dump) +#ifdef CONFIG_PM + .suspend = mt7615_suspend, + .resume = mt7615_resume, + .set_wakeup = mt7615_set_wakeup, + .set_rekey_data = mt7615_set_rekey_data, +#endif /* CONFIG_PM */ + .set_sar_specs = mt7615_set_sar_specs, +}; +EXPORT_SYMBOL_GPL(mt7615_ops); + +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c new file mode 100644 index 000000000..3dac76e6d --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -0,0 +1,2570 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2019 MediaTek Inc. + * + * Author: Roy Luo + * Ryder Lee + */ + +#include +#include "mt7615.h" +#include "mcu.h" +#include "mac.h" +#include "eeprom.h" + +static bool prefer_offload_fw = true; +module_param(prefer_offload_fw, bool, 0644); +MODULE_PARM_DESC(prefer_offload_fw, + "Prefer client mode offload firmware (MT7663)"); + +struct mt7615_patch_hdr { + char build_date[16]; + char platform[4]; + __be32 hw_sw_ver; + __be32 patch_ver; + __be16 checksum; +} __packed; + +struct mt7615_fw_trailer { + __le32 addr; + u8 chip_id; + u8 feature_set; + u8 eco_code; + char fw_ver[10]; + char build_date[15]; + __le32 len; +} __packed; + +#define FW_V3_COMMON_TAILER_SIZE 36 +#define FW_V3_REGION_TAILER_SIZE 40 +#define FW_START_OVERRIDE BIT(0) +#define FW_START_DLYCAL BIT(1) +#define FW_START_WORKING_PDA_CR4 BIT(2) + +struct mt7663_fw_buf { + __le32 crc; + __le32 d_img_size; + __le32 block_size; + u8 rsv[4]; + __le32 img_dest_addr; + __le32 img_size; + u8 feature_set; +}; + +#define MT7615_PATCH_ADDRESS 0x80000 +#define MT7622_PATCH_ADDRESS 0x9c000 +#define MT7663_PATCH_ADDRESS 0xdc000 + +#define N9_REGION_NUM 2 +#define CR4_REGION_NUM 1 + +#define IMG_CRC_LEN 4 + +void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb, + int cmd, int *wait_seq) +{ + int txd_len, mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd); + struct mt7615_uni_txd *uni_txd; + struct mt7615_mcu_txd *mcu_txd; + u8 seq, q_idx, pkt_fmt; + __le32 *txd; + u32 val; + + /* TODO: make dynamic based on msg type */ + dev->mt76.mcu.timeout = 20 * HZ; + + seq = ++dev->mt76.mcu.msg_seq & 0xf; + if (!seq) + seq = ++dev->mt76.mcu.msg_seq & 0xf; + if (wait_seq) + *wait_seq = seq; + + txd_len = cmd & __MCU_CMD_FIELD_UNI ? sizeof(*uni_txd) : sizeof(*mcu_txd); + txd = (__le32 *)skb_push(skb, txd_len); + + if (cmd != MCU_CMD(FW_SCATTER)) { + q_idx = MT_TX_MCU_PORT_RX_Q0; + pkt_fmt = MT_TX_TYPE_CMD; + } else { + q_idx = MT_TX_MCU_PORT_RX_FWDL; + pkt_fmt = MT_TX_TYPE_FW; + } + + val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len) | + FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_MCU) | + FIELD_PREP(MT_TXD0_Q_IDX, q_idx); + txd[0] = cpu_to_le32(val); + + val = MT_TXD1_LONG_FORMAT | + FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_CMD) | + FIELD_PREP(MT_TXD1_PKT_FMT, pkt_fmt); + txd[1] = cpu_to_le32(val); + + if (cmd & __MCU_CMD_FIELD_UNI) { + uni_txd = (struct mt7615_uni_txd *)txd; + uni_txd->len = cpu_to_le16(skb->len - sizeof(uni_txd->txd)); + uni_txd->option = MCU_CMD_UNI_EXT_ACK; + uni_txd->cid = cpu_to_le16(mcu_cmd); + uni_txd->s2d_index = MCU_S2D_H2N; + uni_txd->pkt_type = MCU_PKT_ID; + uni_txd->seq = seq; + + return; + } + + mcu_txd = (struct mt7615_mcu_txd *)txd; + mcu_txd->len = cpu_to_le16(skb->len - sizeof(mcu_txd->txd)); + mcu_txd->pq_id = cpu_to_le16(MCU_PQ_ID(MT_TX_PORT_IDX_MCU, q_idx)); + mcu_txd->s2d_index = MCU_S2D_H2N; + mcu_txd->pkt_type = MCU_PKT_ID; + mcu_txd->seq = seq; + mcu_txd->cid = mcu_cmd; + mcu_txd->ext_cid = FIELD_GET(__MCU_CMD_FIELD_EXT_ID, cmd); + + if (mcu_txd->ext_cid || (cmd & __MCU_CMD_FIELD_CE)) { + if (cmd & __MCU_CMD_FIELD_QUERY) + mcu_txd->set_query = MCU_Q_QUERY; + else + mcu_txd->set_query = MCU_Q_SET; + mcu_txd->ext_cid_ack = !!mcu_txd->ext_cid; + } else { + mcu_txd->set_query = MCU_Q_NA; + } +} +EXPORT_SYMBOL_GPL(mt7615_mcu_fill_msg); + +int mt7615_mcu_parse_response(struct mt76_dev *mdev, int cmd, + struct sk_buff *skb, int seq) +{ + struct mt7615_mcu_rxd *rxd; + int ret = 0; + + if (!skb) { + dev_err(mdev->dev, "Message %08x (seq %d) timeout\n", + cmd, seq); + return -ETIMEDOUT; + } + + rxd = (struct mt7615_mcu_rxd *)skb->data; + if (seq != rxd->seq) + return -EAGAIN; + + if (cmd == MCU_CMD(PATCH_SEM_CONTROL)) { + skb_pull(skb, sizeof(*rxd) - 4); + ret = *skb->data; + } else if (cmd == MCU_EXT_CMD(THERMAL_CTRL)) { + skb_pull(skb, sizeof(*rxd)); + ret = le32_to_cpu(*(__le32 *)skb->data); + } else if (cmd == MCU_EXT_QUERY(RF_REG_ACCESS)) { + skb_pull(skb, sizeof(*rxd)); + ret = le32_to_cpu(*(__le32 *)&skb->data[8]); + } else if (cmd == MCU_UNI_CMD(DEV_INFO_UPDATE) || + cmd == MCU_UNI_CMD(BSS_INFO_UPDATE) || + cmd == MCU_UNI_CMD(STA_REC_UPDATE) || + cmd == MCU_UNI_CMD(HIF_CTRL) || + cmd == MCU_UNI_CMD(OFFLOAD) || + cmd == MCU_UNI_CMD(SUSPEND)) { + struct mt7615_mcu_uni_event *event; + + skb_pull(skb, sizeof(*rxd)); + event = (struct mt7615_mcu_uni_event *)skb->data; + ret = le32_to_cpu(event->status); + } else if (cmd == MCU_CE_QUERY(REG_READ)) { + struct mt7615_mcu_reg_event *event; + + skb_pull(skb, sizeof(*rxd)); + event = (struct mt7615_mcu_reg_event *)skb->data; + ret = (int)le32_to_cpu(event->val); + } + + return ret; +} +EXPORT_SYMBOL_GPL(mt7615_mcu_parse_response); + +static int +mt7615_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, + int cmd, int *seq) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + enum mt76_mcuq_id qid; + + mt7615_mcu_fill_msg(dev, skb, cmd, seq); + if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state)) + qid = MT_MCUQ_WM; + else + qid = MT_MCUQ_FWDL; + + return mt76_tx_queue_skb_raw(dev, dev->mt76.q_mcu[qid], skb, 0); +} + +u32 mt7615_rf_rr(struct mt7615_dev *dev, u32 wf, u32 reg) +{ + struct { + __le32 wifi_stream; + __le32 address; + __le32 data; + } req = { + .wifi_stream = cpu_to_le32(wf), + .address = cpu_to_le32(reg), + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_QUERY(RF_REG_ACCESS), + &req, sizeof(req), true); +} + +int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val) +{ + struct { + __le32 wifi_stream; + __le32 address; + __le32 data; + } req = { + .wifi_stream = cpu_to_le32(wf), + .address = cpu_to_le32(reg), + .data = cpu_to_le32(val), + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RF_REG_ACCESS), + &req, sizeof(req), false); +} + +void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en) +{ + if (!is_mt7622(&dev->mt76)) + return; + + regmap_update_bits(dev->infracfg, MT_INFRACFG_MISC, + MT_INFRACFG_MISC_AP2CONN_WAKE, + !en * MT_INFRACFG_MISC_AP2CONN_WAKE); +} +EXPORT_SYMBOL_GPL(mt7622_trigger_hif_int); + +static int mt7615_mcu_drv_pmctrl(struct mt7615_dev *dev) +{ + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt76_connac_pm *pm = &dev->pm; + struct mt76_dev *mdev = &dev->mt76; + u32 addr; + int err; + + if (is_mt7663(mdev)) { + /* Clear firmware own via N9 eint */ + mt76_wr(dev, MT_PCIE_DOORBELL_PUSH, MT_CFG_LPCR_HOST_DRV_OWN); + mt76_poll(dev, MT_CONN_ON_MISC, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000); + + addr = MT_CONN_HIF_ON_LPCTL; + } else { + addr = MT_CFG_LPCR_HOST; + } + + mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN); + + mt7622_trigger_hif_int(dev, true); + + err = !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000); + + mt7622_trigger_hif_int(dev, false); + + if (err) { + dev_err(mdev->dev, "driver own failed\n"); + return -ETIMEDOUT; + } + + clear_bit(MT76_STATE_PM, &mphy->state); + + pm->stats.last_wake_event = jiffies; + pm->stats.doze_time += pm->stats.last_wake_event - + pm->stats.last_doze_event; + + return 0; +} + +static int mt7615_mcu_lp_drv_pmctrl(struct mt7615_dev *dev) +{ + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt76_connac_pm *pm = &dev->pm; + int i, err = 0; + + mutex_lock(&pm->mutex); + + if (!test_bit(MT76_STATE_PM, &mphy->state)) + goto out; + + for (i = 0; i < MT7615_DRV_OWN_RETRY_COUNT; i++) { + mt76_wr(dev, MT_PCIE_DOORBELL_PUSH, MT_CFG_LPCR_HOST_DRV_OWN); + if (mt76_poll_msec(dev, MT_CONN_HIF_ON_LPCTL, + MT_CFG_LPCR_HOST_FW_OWN, 0, 50)) + break; + } + + if (i == MT7615_DRV_OWN_RETRY_COUNT) { + dev_err(dev->mt76.dev, "driver own failed\n"); + err = -EIO; + goto out; + } + clear_bit(MT76_STATE_PM, &mphy->state); + + pm->stats.last_wake_event = jiffies; + pm->stats.doze_time += pm->stats.last_wake_event - + pm->stats.last_doze_event; +out: + mutex_unlock(&pm->mutex); + + return err; +} + +static int mt7615_mcu_fw_pmctrl(struct mt7615_dev *dev) +{ + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt76_connac_pm *pm = &dev->pm; + int err = 0; + u32 addr; + + mutex_lock(&pm->mutex); + + if (mt76_connac_skip_fw_pmctrl(mphy, pm)) + goto out; + + mt7622_trigger_hif_int(dev, true); + + addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST; + mt76_wr(dev, addr, MT_CFG_LPCR_HOST_FW_OWN); + + if (is_mt7622(&dev->mt76) && + !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, + MT_CFG_LPCR_HOST_FW_OWN, 3000)) { + dev_err(dev->mt76.dev, "Timeout for firmware own\n"); + clear_bit(MT76_STATE_PM, &mphy->state); + err = -EIO; + } + + mt7622_trigger_hif_int(dev, false); + if (!err) { + pm->stats.last_doze_event = jiffies; + pm->stats.awake_time += pm->stats.last_doze_event - + pm->stats.last_wake_event; + } +out: + mutex_unlock(&pm->mutex); + + return err; +} + +static void +mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + if (vif->bss_conf.csa_active) + ieee80211_csa_finish(vif); +} + +static void +mt7615_mcu_rx_csa_notify(struct mt7615_dev *dev, struct sk_buff *skb) +{ + struct mt7615_phy *ext_phy = mt7615_ext_phy(dev); + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt7615_mcu_csa_notify *c; + + c = (struct mt7615_mcu_csa_notify *)skb->data; + + if (c->omac_idx > EXT_BSSID_MAX) + return; + + if (ext_phy && ext_phy->omac_mask & BIT_ULL(c->omac_idx)) + mphy = dev->mt76.phys[MT_BAND1]; + + ieee80211_iterate_active_interfaces_atomic(mphy->hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7615_mcu_csa_finish, mphy->hw); +} + +static void +mt7615_mcu_rx_radar_detected(struct mt7615_dev *dev, struct sk_buff *skb) +{ + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt7615_mcu_rdd_report *r; + + r = (struct mt7615_mcu_rdd_report *)skb->data; + + if (!dev->radar_pattern.n_pulses && !r->long_detected && + !r->constant_prf_detected && !r->staggered_prf_detected) + return; + + if (r->band_idx && dev->mt76.phys[MT_BAND1]) + mphy = dev->mt76.phys[MT_BAND1]; + + if (mt76_phy_dfs_state(mphy) < MT_DFS_STATE_CAC) + return; + + ieee80211_radar_detected(mphy->hw); + dev->hw_pattern++; +} + +static void +mt7615_mcu_rx_log_message(struct mt7615_dev *dev, struct sk_buff *skb) +{ + struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data; + const char *data = (char *)&rxd[1]; + const char *type; + + switch (rxd->s2d_index) { + case 0: + type = "N9"; + break; + case 2: + type = "CR4"; + break; + default: + type = "unknown"; + break; + } + + wiphy_info(mt76_hw(dev)->wiphy, "%s: %.*s", type, + (int)(skb->len - sizeof(*rxd)), data); +} + +static void +mt7615_mcu_rx_ext_event(struct mt7615_dev *dev, struct sk_buff *skb) +{ + struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data; + + switch (rxd->ext_eid) { + case MCU_EXT_EVENT_RDD_REPORT: + mt7615_mcu_rx_radar_detected(dev, skb); + break; + case MCU_EXT_EVENT_CSA_NOTIFY: + mt7615_mcu_rx_csa_notify(dev, skb); + break; + case MCU_EXT_EVENT_FW_LOG_2_HOST: + mt7615_mcu_rx_log_message(dev, skb); + break; + default: + break; + } +} + +static void +mt7615_mcu_scan_event(struct mt7615_dev *dev, struct sk_buff *skb) +{ + u8 *seq_num = skb->data + sizeof(struct mt7615_mcu_rxd); + struct mt7615_phy *phy; + struct mt76_phy *mphy; + + if (*seq_num & BIT(7) && dev->mt76.phys[MT_BAND1]) + mphy = dev->mt76.phys[MT_BAND1]; + else + mphy = &dev->mt76.phy; + + phy = (struct mt7615_phy *)mphy->priv; + + spin_lock_bh(&dev->mt76.lock); + __skb_queue_tail(&phy->scan_event_list, skb); + spin_unlock_bh(&dev->mt76.lock); + + ieee80211_queue_delayed_work(mphy->hw, &phy->scan_work, + MT7615_HW_SCAN_TIMEOUT); +} + +static void +mt7615_mcu_roc_event(struct mt7615_dev *dev, struct sk_buff *skb) +{ + struct mt7615_roc_tlv *event; + struct mt7615_phy *phy; + struct mt76_phy *mphy; + int duration; + + skb_pull(skb, sizeof(struct mt7615_mcu_rxd)); + event = (struct mt7615_roc_tlv *)skb->data; + + if (event->dbdc_band && dev->mt76.phys[MT_BAND1]) + mphy = dev->mt76.phys[MT_BAND1]; + else + mphy = &dev->mt76.phy; + + ieee80211_ready_on_channel(mphy->hw); + + phy = (struct mt7615_phy *)mphy->priv; + phy->roc_grant = true; + wake_up(&phy->roc_wait); + + duration = le32_to_cpu(event->max_interval); + mod_timer(&phy->roc_timer, + round_jiffies_up(jiffies + msecs_to_jiffies(duration))); +} + +static void +mt7615_mcu_beacon_loss_event(struct mt7615_dev *dev, struct sk_buff *skb) +{ + struct mt76_connac_beacon_loss_event *event; + struct mt76_phy *mphy; + u8 band_idx = 0; /* DBDC support */ + + skb_pull(skb, sizeof(struct mt7615_mcu_rxd)); + event = (struct mt76_connac_beacon_loss_event *)skb->data; + if (band_idx && dev->mt76.phys[MT_BAND1]) + mphy = dev->mt76.phys[MT_BAND1]; + else + mphy = &dev->mt76.phy; + + ieee80211_iterate_active_interfaces_atomic(mphy->hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt76_connac_mcu_beacon_loss_iter, + event); +} + +static void +mt7615_mcu_bss_event(struct mt7615_dev *dev, struct sk_buff *skb) +{ + struct mt76_connac_mcu_bss_event *event; + struct mt76_phy *mphy; + u8 band_idx = 0; /* DBDC support */ + + skb_pull(skb, sizeof(struct mt7615_mcu_rxd)); + event = (struct mt76_connac_mcu_bss_event *)skb->data; + + if (band_idx && dev->mt76.phys[MT_BAND1]) + mphy = dev->mt76.phys[MT_BAND1]; + else + mphy = &dev->mt76.phy; + + if (event->is_absent) + ieee80211_stop_queues(mphy->hw); + else + ieee80211_wake_queues(mphy->hw); +} + +static void +mt7615_mcu_rx_unsolicited_event(struct mt7615_dev *dev, struct sk_buff *skb) +{ + struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data; + + switch (rxd->eid) { + case MCU_EVENT_EXT: + mt7615_mcu_rx_ext_event(dev, skb); + break; + case MCU_EVENT_BSS_BEACON_LOSS: + mt7615_mcu_beacon_loss_event(dev, skb); + break; + case MCU_EVENT_ROC: + mt7615_mcu_roc_event(dev, skb); + break; + case MCU_EVENT_SCHED_SCAN_DONE: + case MCU_EVENT_SCAN_DONE: + mt7615_mcu_scan_event(dev, skb); + return; + case MCU_EVENT_BSS_ABSENCE: + mt7615_mcu_bss_event(dev, skb); + break; + case MCU_EVENT_COREDUMP: + mt76_connac_mcu_coredump_event(&dev->mt76, skb, + &dev->coredump); + return; + default: + break; + } + dev_kfree_skb(skb); +} + +void mt7615_mcu_rx_event(struct mt7615_dev *dev, struct sk_buff *skb) +{ + struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data; + + if (rxd->ext_eid == MCU_EXT_EVENT_THERMAL_PROTECT || + rxd->ext_eid == MCU_EXT_EVENT_FW_LOG_2_HOST || + rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP || + rxd->ext_eid == MCU_EXT_EVENT_PS_SYNC || + rxd->eid == MCU_EVENT_BSS_BEACON_LOSS || + rxd->eid == MCU_EVENT_SCHED_SCAN_DONE || + rxd->eid == MCU_EVENT_BSS_ABSENCE || + rxd->eid == MCU_EVENT_SCAN_DONE || + rxd->eid == MCU_EVENT_COREDUMP || + rxd->eid == MCU_EVENT_ROC || + !rxd->seq) + mt7615_mcu_rx_unsolicited_event(dev, skb); + else + mt76_mcu_rx_event(&dev->mt76, skb); +} + +static int +mt7615_mcu_muar_config(struct mt7615_dev *dev, struct ieee80211_vif *vif, + bool bssid, bool enable) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + u32 idx = mvif->mt76.omac_idx - REPEATER_BSSID_START; + u32 mask = dev->omac_mask >> 32 & ~BIT(idx); + const u8 *addr = vif->addr; + struct { + u8 mode; + u8 force_clear; + u8 clear_bitmap[8]; + u8 entry_count; + u8 write; + + u8 index; + u8 bssid; + u8 addr[ETH_ALEN]; + } __packed req = { + .mode = !!mask || enable, + .entry_count = 1, + .write = 1, + + .index = idx * 2 + bssid, + }; + + if (bssid) + addr = vif->bss_conf.bssid; + + if (enable) + ether_addr_copy(req.addr, addr); + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MUAR_UPDATE), + &req, sizeof(req), true); +} + +static int +mt7615_mcu_add_dev(struct mt7615_phy *phy, struct ieee80211_vif *vif, + bool enable) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct mt7615_dev *dev = phy->dev; + struct { + struct req_hdr { + u8 omac_idx; + u8 band_idx; + __le16 tlv_num; + u8 is_tlv_append; + u8 rsv[3]; + } __packed hdr; + struct req_tlv { + __le16 tag; + __le16 len; + u8 active; + u8 band_idx; + u8 omac_addr[ETH_ALEN]; + } __packed tlv; + } data = { + .hdr = { + .omac_idx = mvif->mt76.omac_idx, + .band_idx = mvif->mt76.band_idx, + .tlv_num = cpu_to_le16(1), + .is_tlv_append = 1, + }, + .tlv = { + .tag = cpu_to_le16(DEV_INFO_ACTIVE), + .len = cpu_to_le16(sizeof(struct req_tlv)), + .active = enable, + .band_idx = mvif->mt76.band_idx, + }, + }; + + if (mvif->mt76.omac_idx >= REPEATER_BSSID_START) + return mt7615_mcu_muar_config(dev, vif, false, enable); + + memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(DEV_INFO_UPDATE), + &data, sizeof(data), true); +} + +static int +mt7615_mcu_add_beacon_offload(struct mt7615_dev *dev, + struct ieee80211_hw *hw, + struct ieee80211_vif *vif, bool enable) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct mt76_wcid *wcid = &dev->mt76.global_wcid; + struct ieee80211_mutable_offsets offs; + struct ieee80211_tx_info *info; + struct req { + u8 omac_idx; + u8 enable; + u8 wlan_idx; + u8 band_idx; + u8 pkt_type; + u8 need_pre_tbtt_int; + __le16 csa_ie_pos; + __le16 pkt_len; + __le16 tim_ie_pos; + u8 pkt[512]; + u8 csa_cnt; + /* bss color change */ + u8 bcc_cnt; + __le16 bcc_ie_pos; + } __packed req = { + .omac_idx = mvif->mt76.omac_idx, + .enable = enable, + .wlan_idx = wcid->idx, + .band_idx = mvif->mt76.band_idx, + }; + struct sk_buff *skb; + + if (!enable) + goto out; + + skb = ieee80211_beacon_get_template(hw, vif, &offs, 0); + if (!skb) + return -EINVAL; + + if (skb->len > 512 - MT_TXD_SIZE) { + dev_err(dev->mt76.dev, "Bcn size limit exceed\n"); + dev_kfree_skb(skb); + return -EINVAL; + } + + info = IEEE80211_SKB_CB(skb); + info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, mvif->mt76.band_idx); + + mt7615_mac_write_txwi(dev, (__le32 *)(req.pkt), skb, wcid, NULL, + 0, NULL, 0, true); + memcpy(req.pkt + MT_TXD_SIZE, skb->data, skb->len); + req.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len); + req.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset); + if (offs.cntdwn_counter_offs[0]) { + u16 csa_offs; + + csa_offs = MT_TXD_SIZE + offs.cntdwn_counter_offs[0] - 4; + req.csa_ie_pos = cpu_to_le16(csa_offs); + req.csa_cnt = skb->data[offs.cntdwn_counter_offs[0]]; + } + dev_kfree_skb(skb); + +out: + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(BCN_OFFLOAD), &req, + sizeof(req), true); +} + +static int +mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int band, int state) +{ + return mt76_connac_mcu_set_pm(&dev->mt76, band, state); +} + +static int +mt7615_mcu_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, bool enable) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct mt7615_dev *dev = phy->dev; + struct sk_buff *skb; + + if (mvif->mt76.omac_idx >= REPEATER_BSSID_START) + mt7615_mcu_muar_config(dev, vif, true, enable); + + skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, NULL); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + if (enable) + mt76_connac_mcu_bss_omac_tlv(skb, vif); + + mt76_connac_mcu_bss_basic_tlv(skb, vif, sta, phy->mt76, + mvif->sta.wcid.idx, enable); + + if (enable && mvif->mt76.omac_idx >= EXT_BSSID_START && + mvif->mt76.omac_idx < REPEATER_BSSID_START) + mt76_connac_mcu_bss_ext_tlv(skb, &mvif->mt76); + + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD(BSS_INFO_UPDATE), true); +} + +static int +mt7615_mcu_wtbl_tx_ba(struct mt7615_dev *dev, + struct ieee80211_ampdu_params *params, + bool enable) +{ + struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv; + struct mt7615_vif *mvif = msta->vif; + struct wtbl_req_hdr *wtbl_hdr; + struct sk_buff *skb = NULL; + int err; + + wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid, + WTBL_SET, NULL, &skb); + if (IS_ERR(wtbl_hdr)) + return PTR_ERR(wtbl_hdr); + + mt76_connac_mcu_wtbl_ba_tlv(&dev->mt76, skb, params, enable, true, + NULL, wtbl_hdr); + + err = mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD(WTBL_UPDATE), true); + if (err < 0) + return err; + + skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, + &msta->wcid); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + mt76_connac_mcu_sta_ba_tlv(skb, params, enable, true); + + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD(STA_REC_UPDATE), true); +} + +static int +mt7615_mcu_wtbl_rx_ba(struct mt7615_dev *dev, + struct ieee80211_ampdu_params *params, + bool enable) +{ + struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv; + struct mt7615_vif *mvif = msta->vif; + struct wtbl_req_hdr *wtbl_hdr; + struct sk_buff *skb; + int err; + + skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, + &msta->wcid); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + mt76_connac_mcu_sta_ba_tlv(skb, params, enable, false); + + err = mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD(STA_REC_UPDATE), true); + if (err < 0 || !enable) + return err; + + skb = NULL; + wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid, + WTBL_SET, NULL, &skb); + if (IS_ERR(wtbl_hdr)) + return PTR_ERR(wtbl_hdr); + + mt76_connac_mcu_wtbl_ba_tlv(&dev->mt76, skb, params, enable, false, + NULL, wtbl_hdr); + + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD(WTBL_UPDATE), true); +} + +static int +mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, bool enable) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct sk_buff *skb, *sskb, *wskb = NULL; + struct mt7615_dev *dev = phy->dev; + struct wtbl_req_hdr *wtbl_hdr; + struct mt7615_sta *msta; + bool new_entry = true; + int cmd, err; + + msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta; + + sskb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, + &msta->wcid); + if (IS_ERR(sskb)) + return PTR_ERR(sskb); + + if (!sta) { + if (mvif->sta_added) + new_entry = false; + else + mvif->sta_added = true; + } + mt76_connac_mcu_sta_basic_tlv(sskb, vif, sta, enable, new_entry); + if (enable && sta) + mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif, 0, + MT76_STA_INFO_STATE_ASSOC); + + wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid, + WTBL_RESET_AND_SET, NULL, + &wskb); + if (IS_ERR(wtbl_hdr)) + return PTR_ERR(wtbl_hdr); + + if (enable) { + mt76_connac_mcu_wtbl_generic_tlv(&dev->mt76, wskb, vif, sta, + NULL, wtbl_hdr); + if (sta) + mt76_connac_mcu_wtbl_ht_tlv(&dev->mt76, wskb, sta, + NULL, wtbl_hdr, true, true); + mt76_connac_mcu_wtbl_hdr_trans_tlv(wskb, vif, &msta->wcid, + NULL, wtbl_hdr); + } + + cmd = enable ? MCU_EXT_CMD(WTBL_UPDATE) : MCU_EXT_CMD(STA_REC_UPDATE); + skb = enable ? wskb : sskb; + + err = mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true); + if (err < 0) { + skb = enable ? sskb : wskb; + dev_kfree_skb(skb); + + return err; + } + + cmd = enable ? MCU_EXT_CMD(STA_REC_UPDATE) : MCU_EXT_CMD(WTBL_UPDATE); + skb = enable ? sskb : wskb; + + return mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true); +} + +static int +mt7615_mcu_wtbl_update_hdr_trans(struct mt7615_dev *dev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + return mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta); +} + +static const struct mt7615_mcu_ops wtbl_update_ops = { + .add_beacon_offload = mt7615_mcu_add_beacon_offload, + .set_pm_state = mt7615_mcu_ctrl_pm_state, + .add_dev_info = mt7615_mcu_add_dev, + .add_bss_info = mt7615_mcu_add_bss, + .add_tx_ba = mt7615_mcu_wtbl_tx_ba, + .add_rx_ba = mt7615_mcu_wtbl_rx_ba, + .sta_add = mt7615_mcu_wtbl_sta_add, + .set_drv_ctrl = mt7615_mcu_drv_pmctrl, + .set_fw_ctrl = mt7615_mcu_fw_pmctrl, + .set_sta_decap_offload = mt7615_mcu_wtbl_update_hdr_trans, +}; + +static int +mt7615_mcu_sta_ba(struct mt7615_dev *dev, + struct ieee80211_ampdu_params *params, + bool enable, bool tx) +{ + struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv; + struct mt7615_vif *mvif = msta->vif; + struct wtbl_req_hdr *wtbl_hdr; + struct tlv *sta_wtbl; + struct sk_buff *skb; + + skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, + &msta->wcid); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + mt76_connac_mcu_sta_ba_tlv(skb, params, enable, tx); + + sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv)); + + wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid, + WTBL_SET, sta_wtbl, &skb); + if (IS_ERR(wtbl_hdr)) + return PTR_ERR(wtbl_hdr); + + mt76_connac_mcu_wtbl_ba_tlv(&dev->mt76, skb, params, enable, tx, + sta_wtbl, wtbl_hdr); + + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD(STA_REC_UPDATE), true); +} + +static int +mt7615_mcu_sta_tx_ba(struct mt7615_dev *dev, + struct ieee80211_ampdu_params *params, + bool enable) +{ + return mt7615_mcu_sta_ba(dev, params, enable, true); +} + +static int +mt7615_mcu_sta_rx_ba(struct mt7615_dev *dev, + struct ieee80211_ampdu_params *params, + bool enable) +{ + return mt7615_mcu_sta_ba(dev, params, enable, false); +} + +static int +__mt7615_mcu_add_sta(struct mt76_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, bool enable, int cmd, + bool offload_fw) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct mt76_sta_cmd_info info = { + .sta = sta, + .vif = vif, + .offload_fw = offload_fw, + .enable = enable, + .newly = true, + .cmd = cmd, + }; + + info.wcid = sta ? (struct mt76_wcid *)sta->drv_priv : &mvif->sta.wcid; + return mt76_connac_mcu_sta_cmd(phy, &info); +} + +static int +mt7615_mcu_add_sta(struct mt7615_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, bool enable) +{ + return __mt7615_mcu_add_sta(phy->mt76, vif, sta, enable, + MCU_EXT_CMD(STA_REC_UPDATE), false); +} + +static int +mt7615_mcu_sta_update_hdr_trans(struct mt7615_dev *dev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv; + + return mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76, + vif, &msta->wcid, + MCU_EXT_CMD(STA_REC_UPDATE)); +} + +static const struct mt7615_mcu_ops sta_update_ops = { + .add_beacon_offload = mt7615_mcu_add_beacon_offload, + .set_pm_state = mt7615_mcu_ctrl_pm_state, + .add_dev_info = mt7615_mcu_add_dev, + .add_bss_info = mt7615_mcu_add_bss, + .add_tx_ba = mt7615_mcu_sta_tx_ba, + .add_rx_ba = mt7615_mcu_sta_rx_ba, + .sta_add = mt7615_mcu_add_sta, + .set_drv_ctrl = mt7615_mcu_drv_pmctrl, + .set_fw_ctrl = mt7615_mcu_fw_pmctrl, + .set_sta_decap_offload = mt7615_mcu_sta_update_hdr_trans, +}; + +static int +mt7615_mcu_uni_ctrl_pm_state(struct mt7615_dev *dev, int band, int state) +{ + return 0; +} + +static int +mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev, + struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + bool enable) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct mt76_wcid *wcid = &dev->mt76.global_wcid; + struct ieee80211_mutable_offsets offs; + struct { + struct req_hdr { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct bcn_content_tlv { + __le16 tag; + __le16 len; + __le16 tim_ie_pos; + __le16 csa_ie_pos; + __le16 bcc_ie_pos; + /* 0: disable beacon offload + * 1: enable beacon offload + * 2: update probe respond offload + */ + u8 enable; + /* 0: legacy format (TXD + payload) + * 1: only cap field IE + */ + u8 type; + __le16 pkt_len; + u8 pkt[512]; + } __packed beacon_tlv; + } req = { + .hdr = { + .bss_idx = mvif->mt76.idx, + }, + .beacon_tlv = { + .tag = cpu_to_le16(UNI_BSS_INFO_BCN_CONTENT), + .len = cpu_to_le16(sizeof(struct bcn_content_tlv)), + .enable = enable, + }, + }; + struct sk_buff *skb; + + if (!enable) + goto out; + + skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs, 0); + if (!skb) + return -EINVAL; + + if (skb->len > 512 - MT_TXD_SIZE) { + dev_err(dev->mt76.dev, "beacon size limit exceed\n"); + dev_kfree_skb(skb); + return -EINVAL; + } + + mt7615_mac_write_txwi(dev, (__le32 *)(req.beacon_tlv.pkt), skb, + wcid, NULL, 0, NULL, 0, true); + memcpy(req.beacon_tlv.pkt + MT_TXD_SIZE, skb->data, skb->len); + req.beacon_tlv.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len); + req.beacon_tlv.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset); + + if (offs.cntdwn_counter_offs[0]) { + u16 csa_offs; + + csa_offs = MT_TXD_SIZE + offs.cntdwn_counter_offs[0] - 4; + req.beacon_tlv.csa_ie_pos = cpu_to_le16(csa_offs); + } + dev_kfree_skb(skb); + +out: + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE), + &req, sizeof(req), true); +} + +static int +mt7615_mcu_uni_add_dev(struct mt7615_phy *phy, struct ieee80211_vif *vif, + bool enable) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + + return mt76_connac_mcu_uni_add_dev(phy->mt76, vif, &mvif->sta.wcid, + enable); +} + +static int +mt7615_mcu_uni_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, bool enable) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + + return mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid, + enable); +} + +static inline int +mt7615_mcu_uni_add_sta(struct mt7615_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, bool enable) +{ + return __mt7615_mcu_add_sta(phy->mt76, vif, sta, enable, + MCU_UNI_CMD(STA_REC_UPDATE), true); +} + +static int +mt7615_mcu_uni_tx_ba(struct mt7615_dev *dev, + struct ieee80211_ampdu_params *params, + bool enable) +{ + struct mt7615_sta *sta = (struct mt7615_sta *)params->sta->drv_priv; + + return mt76_connac_mcu_sta_ba(&dev->mt76, &sta->vif->mt76, params, + MCU_UNI_CMD(STA_REC_UPDATE), enable, + true); +} + +static int +mt7615_mcu_uni_rx_ba(struct mt7615_dev *dev, + struct ieee80211_ampdu_params *params, + bool enable) +{ + struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv; + struct mt7615_vif *mvif = msta->vif; + struct wtbl_req_hdr *wtbl_hdr; + struct tlv *sta_wtbl; + struct sk_buff *skb; + int err; + + skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, + &msta->wcid); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + mt76_connac_mcu_sta_ba_tlv(skb, params, enable, false); + + err = mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_UNI_CMD(STA_REC_UPDATE), true); + if (err < 0 || !enable) + return err; + + skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, + &msta->wcid); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL, + sizeof(struct tlv)); + + wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid, + WTBL_SET, sta_wtbl, &skb); + if (IS_ERR(wtbl_hdr)) + return PTR_ERR(wtbl_hdr); + + mt76_connac_mcu_wtbl_ba_tlv(&dev->mt76, skb, params, enable, false, + sta_wtbl, wtbl_hdr); + + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_UNI_CMD(STA_REC_UPDATE), true); +} + +static int +mt7615_mcu_sta_uni_update_hdr_trans(struct mt7615_dev *dev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv; + + return mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76, + vif, &msta->wcid, + MCU_UNI_CMD(STA_REC_UPDATE)); +} + +static const struct mt7615_mcu_ops uni_update_ops = { + .add_beacon_offload = mt7615_mcu_uni_add_beacon_offload, + .set_pm_state = mt7615_mcu_uni_ctrl_pm_state, + .add_dev_info = mt7615_mcu_uni_add_dev, + .add_bss_info = mt7615_mcu_uni_add_bss, + .add_tx_ba = mt7615_mcu_uni_tx_ba, + .add_rx_ba = mt7615_mcu_uni_rx_ba, + .sta_add = mt7615_mcu_uni_add_sta, + .set_drv_ctrl = mt7615_mcu_lp_drv_pmctrl, + .set_fw_ctrl = mt7615_mcu_fw_pmctrl, + .set_sta_decap_offload = mt7615_mcu_sta_uni_update_hdr_trans, +}; + +int mt7615_mcu_restart(struct mt76_dev *dev) +{ + return mt76_mcu_send_msg(dev, MCU_CMD(RESTART_DL_REQ), NULL, 0, true); +} +EXPORT_SYMBOL_GPL(mt7615_mcu_restart); + +static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name) +{ + const struct mt7615_patch_hdr *hdr; + const struct firmware *fw = NULL; + int len, ret, sem; + + ret = firmware_request_nowarn(&fw, name, dev->mt76.dev); + if (ret) + return ret; + + if (!fw || !fw->data || fw->size < sizeof(*hdr)) { + dev_err(dev->mt76.dev, "Invalid firmware\n"); + ret = -EINVAL; + goto release_fw; + } + + sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, true); + switch (sem) { + case PATCH_IS_DL: + goto release_fw; + case PATCH_NOT_DL_SEM_SUCCESS: + break; + default: + dev_err(dev->mt76.dev, "Failed to get patch semaphore\n"); + ret = -EAGAIN; + goto release_fw; + } + + hdr = (const struct mt7615_patch_hdr *)(fw->data); + + dev_info(dev->mt76.dev, "HW/SW Version: 0x%x, Build Time: %.16s\n", + be32_to_cpu(hdr->hw_sw_ver), hdr->build_date); + + len = fw->size - sizeof(*hdr); + + ret = mt76_connac_mcu_init_download(&dev->mt76, addr, len, + DL_MODE_NEED_RSP); + if (ret) { + dev_err(dev->mt76.dev, "Download request failed\n"); + goto out; + } + + ret = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER), + fw->data + sizeof(*hdr), len); + if (ret) { + dev_err(dev->mt76.dev, "Failed to send firmware to device\n"); + goto out; + } + + ret = mt76_connac_mcu_start_patch(&dev->mt76); + if (ret) + dev_err(dev->mt76.dev, "Failed to start patch\n"); + +out: + sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, false); + switch (sem) { + case PATCH_REL_SEM_SUCCESS: + break; + default: + ret = -EAGAIN; + dev_err(dev->mt76.dev, "Failed to release patch semaphore\n"); + break; + } + +release_fw: + release_firmware(fw); + + return ret; +} + +static int +mt7615_mcu_send_ram_firmware(struct mt7615_dev *dev, + const struct mt7615_fw_trailer *hdr, + const u8 *data, bool is_cr4) +{ + int n_region = is_cr4 ? CR4_REGION_NUM : N9_REGION_NUM; + int err, i, offset = 0; + u32 len, addr, mode; + + for (i = 0; i < n_region; i++) { + mode = mt76_connac_mcu_gen_dl_mode(&dev->mt76, + hdr[i].feature_set, is_cr4); + len = le32_to_cpu(hdr[i].len) + IMG_CRC_LEN; + addr = le32_to_cpu(hdr[i].addr); + + err = mt76_connac_mcu_init_download(&dev->mt76, addr, len, + mode); + if (err) { + dev_err(dev->mt76.dev, "Download request failed\n"); + return err; + } + + err = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER), + data + offset, len); + if (err) { + dev_err(dev->mt76.dev, "Failed to send firmware to device\n"); + return err; + } + + offset += len; + } + + return 0; +} + +static int mt7615_load_n9(struct mt7615_dev *dev, const char *name) +{ + const struct mt7615_fw_trailer *hdr; + const struct firmware *fw; + int ret; + + ret = request_firmware(&fw, name, dev->mt76.dev); + if (ret) + return ret; + + if (!fw || !fw->data || fw->size < N9_REGION_NUM * sizeof(*hdr)) { + dev_err(dev->mt76.dev, "Invalid firmware\n"); + ret = -EINVAL; + goto out; + } + + hdr = (const struct mt7615_fw_trailer *)(fw->data + fw->size - + N9_REGION_NUM * sizeof(*hdr)); + + dev_info(dev->mt76.dev, "N9 Firmware Version: %.10s, Build Time: %.15s\n", + hdr->fw_ver, hdr->build_date); + + ret = mt7615_mcu_send_ram_firmware(dev, hdr, fw->data, false); + if (ret) + goto out; + + ret = mt76_connac_mcu_start_firmware(&dev->mt76, + le32_to_cpu(hdr->addr), + FW_START_OVERRIDE); + if (ret) { + dev_err(dev->mt76.dev, "Failed to start N9 firmware\n"); + goto out; + } + + snprintf(dev->mt76.hw->wiphy->fw_version, + sizeof(dev->mt76.hw->wiphy->fw_version), + "%.10s-%.15s", hdr->fw_ver, hdr->build_date); + + if (!is_mt7615(&dev->mt76)) { + dev->fw_ver = MT7615_FIRMWARE_V2; + dev->mcu_ops = &sta_update_ops; + } else { + dev->fw_ver = MT7615_FIRMWARE_V1; + dev->mcu_ops = &wtbl_update_ops; + } + +out: + release_firmware(fw); + return ret; +} + +static int mt7615_load_cr4(struct mt7615_dev *dev, const char *name) +{ + const struct mt7615_fw_trailer *hdr; + const struct firmware *fw; + int ret; + + ret = request_firmware(&fw, name, dev->mt76.dev); + if (ret) + return ret; + + if (!fw || !fw->data || fw->size < CR4_REGION_NUM * sizeof(*hdr)) { + dev_err(dev->mt76.dev, "Invalid firmware\n"); + ret = -EINVAL; + goto out; + } + + hdr = (const struct mt7615_fw_trailer *)(fw->data + fw->size - + CR4_REGION_NUM * sizeof(*hdr)); + + dev_info(dev->mt76.dev, "CR4 Firmware Version: %.10s, Build Time: %.15s\n", + hdr->fw_ver, hdr->build_date); + + ret = mt7615_mcu_send_ram_firmware(dev, hdr, fw->data, true); + if (ret) + goto out; + + ret = mt76_connac_mcu_start_firmware(&dev->mt76, 0, + FW_START_WORKING_PDA_CR4); + if (ret) { + dev_err(dev->mt76.dev, "Failed to start CR4 firmware\n"); + goto out; + } + +out: + release_firmware(fw); + + return ret; +} + +static int mt7615_load_ram(struct mt7615_dev *dev) +{ + int ret; + + ret = mt7615_load_n9(dev, MT7615_FIRMWARE_N9); + if (ret) + return ret; + + return mt7615_load_cr4(dev, MT7615_FIRMWARE_CR4); +} + +static int mt7615_load_firmware(struct mt7615_dev *dev) +{ + int ret; + u32 val; + + val = mt76_get_field(dev, MT_TOP_MISC2, MT_TOP_MISC2_FW_STATE); + + if (val != FW_STATE_FW_DOWNLOAD) { + dev_err(dev->mt76.dev, "Firmware is not ready for download\n"); + return -EIO; + } + + ret = mt7615_load_patch(dev, MT7615_PATCH_ADDRESS, MT7615_ROM_PATCH); + if (ret) + return ret; + + ret = mt7615_load_ram(dev); + if (ret) + return ret; + + if (!mt76_poll_msec(dev, MT_TOP_MISC2, MT_TOP_MISC2_FW_STATE, + FIELD_PREP(MT_TOP_MISC2_FW_STATE, + FW_STATE_RDY), 500)) { + dev_err(dev->mt76.dev, "Timeout for initializing firmware\n"); + return -EIO; + } + + return 0; +} + +static int mt7622_load_firmware(struct mt7615_dev *dev) +{ + int ret; + u32 val; + + mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH); + + val = mt76_get_field(dev, MT_TOP_OFF_RSV, MT_TOP_OFF_RSV_FW_STATE); + if (val != FW_STATE_FW_DOWNLOAD) { + dev_err(dev->mt76.dev, "Firmware is not ready for download\n"); + return -EIO; + } + + ret = mt7615_load_patch(dev, MT7622_PATCH_ADDRESS, MT7622_ROM_PATCH); + if (ret) + return ret; + + ret = mt7615_load_n9(dev, MT7622_FIRMWARE_N9); + if (ret) + return ret; + + if (!mt76_poll_msec(dev, MT_TOP_OFF_RSV, MT_TOP_OFF_RSV_FW_STATE, + FIELD_PREP(MT_TOP_OFF_RSV_FW_STATE, + FW_STATE_NORMAL_TRX), 1500)) { + dev_err(dev->mt76.dev, "Timeout for initializing firmware\n"); + return -EIO; + } + + mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH); + + return 0; +} + +int mt7615_mcu_fw_log_2_host(struct mt7615_dev *dev, u8 ctrl) +{ + struct { + u8 ctrl_val; + u8 pad[3]; + } data = { + .ctrl_val = ctrl + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(FW_LOG_2_HOST), + &data, sizeof(data), true); +} + +static int mt7615_mcu_cal_cache_apply(struct mt7615_dev *dev) +{ + struct { + bool cache_enable; + u8 pad[3]; + } data = { + .cache_enable = true + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(CAL_CACHE), &data, + sizeof(data), false); +} + +static int mt7663_load_n9(struct mt7615_dev *dev, const char *name) +{ + u32 offset = 0, override_addr = 0, flag = FW_START_DLYCAL; + const struct mt76_connac2_fw_trailer *hdr; + const struct mt7663_fw_buf *buf; + const struct firmware *fw; + const u8 *base_addr; + int i, ret; + + ret = request_firmware(&fw, name, dev->mt76.dev); + if (ret) + return ret; + + if (!fw || !fw->data || fw->size < FW_V3_COMMON_TAILER_SIZE) { + dev_err(dev->mt76.dev, "Invalid firmware\n"); + ret = -EINVAL; + goto out; + } + + hdr = (const void *)(fw->data + fw->size - FW_V3_COMMON_TAILER_SIZE); + dev_info(dev->mt76.dev, "N9 Firmware Version: %.10s, Build Time: %.15s\n", + hdr->fw_ver, hdr->build_date); + dev_info(dev->mt76.dev, "Region number: 0x%x\n", hdr->n_region); + + base_addr = fw->data + fw->size - FW_V3_COMMON_TAILER_SIZE; + for (i = 0; i < hdr->n_region; i++) { + u32 shift = (hdr->n_region - i) * FW_V3_REGION_TAILER_SIZE; + u32 len, addr, mode; + + dev_info(dev->mt76.dev, "Parsing tailer Region: %d\n", i); + + buf = (const struct mt7663_fw_buf *)(base_addr - shift); + mode = mt76_connac_mcu_gen_dl_mode(&dev->mt76, + buf->feature_set, false); + addr = le32_to_cpu(buf->img_dest_addr); + len = le32_to_cpu(buf->img_size); + + ret = mt76_connac_mcu_init_download(&dev->mt76, addr, len, + mode); + if (ret) { + dev_err(dev->mt76.dev, "Download request failed\n"); + goto out; + } + + ret = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER), + fw->data + offset, len); + if (ret) { + dev_err(dev->mt76.dev, "Failed to send firmware\n"); + goto out; + } + + offset += le32_to_cpu(buf->img_size); + if (buf->feature_set & DL_MODE_VALID_RAM_ENTRY) { + override_addr = le32_to_cpu(buf->img_dest_addr); + dev_info(dev->mt76.dev, "Region %d, override_addr = 0x%08x\n", + i, override_addr); + } + } + + if (override_addr) + flag |= FW_START_OVERRIDE; + + dev_info(dev->mt76.dev, "override_addr = 0x%08x, option = %d\n", + override_addr, flag); + + ret = mt76_connac_mcu_start_firmware(&dev->mt76, override_addr, flag); + if (ret) { + dev_err(dev->mt76.dev, "Failed to start N9 firmware\n"); + goto out; + } + + snprintf(dev->mt76.hw->wiphy->fw_version, + sizeof(dev->mt76.hw->wiphy->fw_version), + "%.10s-%.15s", hdr->fw_ver, hdr->build_date); + +out: + release_firmware(fw); + + return ret; +} + +static int +mt7663_load_rom_patch(struct mt7615_dev *dev, const char **n9_firmware) +{ + const char *selected_rom, *secondary_rom = MT7663_ROM_PATCH; + const char *primary_rom = MT7663_OFFLOAD_ROM_PATCH; + int ret; + + if (!prefer_offload_fw) { + secondary_rom = MT7663_OFFLOAD_ROM_PATCH; + primary_rom = MT7663_ROM_PATCH; + } + selected_rom = primary_rom; + + ret = mt7615_load_patch(dev, MT7663_PATCH_ADDRESS, primary_rom); + if (ret) { + dev_info(dev->mt76.dev, "%s not found, switching to %s", + primary_rom, secondary_rom); + ret = mt7615_load_patch(dev, MT7663_PATCH_ADDRESS, + secondary_rom); + if (ret) { + dev_err(dev->mt76.dev, "failed to load %s", + secondary_rom); + return ret; + } + selected_rom = secondary_rom; + } + + if (!strcmp(selected_rom, MT7663_OFFLOAD_ROM_PATCH)) { + *n9_firmware = MT7663_OFFLOAD_FIRMWARE_N9; + dev->fw_ver = MT7615_FIRMWARE_V3; + dev->mcu_ops = &uni_update_ops; + } else { + *n9_firmware = MT7663_FIRMWARE_N9; + dev->fw_ver = MT7615_FIRMWARE_V2; + dev->mcu_ops = &sta_update_ops; + } + + return 0; +} + +int __mt7663_load_firmware(struct mt7615_dev *dev) +{ + const char *n9_firmware; + int ret; + + ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY); + if (ret) { + dev_dbg(dev->mt76.dev, "Firmware is already download\n"); + return -EIO; + } + + ret = mt7663_load_rom_patch(dev, &n9_firmware); + if (ret) + return ret; + + ret = mt7663_load_n9(dev, n9_firmware); + if (ret) + return ret; + + if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY, + MT_TOP_MISC2_FW_N9_RDY, 1500)) { + ret = mt76_get_field(dev, MT_CONN_ON_MISC, + MT7663_TOP_MISC2_FW_STATE); + dev_err(dev->mt76.dev, "Timeout for initializing firmware\n"); + return -EIO; + } + +#ifdef CONFIG_PM + if (mt7615_firmware_offload(dev)) + dev->mt76.hw->wiphy->wowlan = &mt76_connac_wowlan_support; +#endif /* CONFIG_PM */ + + dev_dbg(dev->mt76.dev, "Firmware init done\n"); + + return 0; +} +EXPORT_SYMBOL_GPL(__mt7663_load_firmware); + +static int mt7663_load_firmware(struct mt7615_dev *dev) +{ + int ret; + + mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH); + + ret = __mt7663_load_firmware(dev); + if (ret) + return ret; + + mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH); + + return 0; +} + +int mt7615_mcu_init(struct mt7615_dev *dev) +{ + static const struct mt76_mcu_ops mt7615_mcu_ops = { + .headroom = sizeof(struct mt7615_mcu_txd), + .mcu_skb_send_msg = mt7615_mcu_send_message, + .mcu_parse_response = mt7615_mcu_parse_response, + .mcu_restart = mt7615_mcu_restart, + }; + int ret; + + dev->mt76.mcu_ops = &mt7615_mcu_ops, + + ret = mt7615_mcu_drv_pmctrl(dev); + if (ret) + return ret; + + switch (mt76_chip(&dev->mt76)) { + case 0x7622: + ret = mt7622_load_firmware(dev); + break; + case 0x7663: + ret = mt7663_load_firmware(dev); + break; + default: + ret = mt7615_load_firmware(dev); + break; + } + if (ret) + return ret; + + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false); + dev_dbg(dev->mt76.dev, "Firmware init done\n"); + set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); + + if (dev->dbdc_support) { + ret = mt7615_mcu_cal_cache_apply(dev); + if (ret) + return ret; + } + + return mt7615_mcu_fw_log_2_host(dev, 0); +} +EXPORT_SYMBOL_GPL(mt7615_mcu_init); + +void mt7615_mcu_exit(struct mt7615_dev *dev) +{ + __mt76_mcu_restart(&dev->mt76); + mt7615_mcu_set_fw_ctrl(dev); + skb_queue_purge(&dev->mt76.mcu.res_q); +} +EXPORT_SYMBOL_GPL(mt7615_mcu_exit); + +int mt7615_mcu_set_eeprom(struct mt7615_dev *dev) +{ + struct { + u8 buffer_mode; + u8 content_format; + __le16 len; + } __packed req_hdr = { + .buffer_mode = 1, + }; + u8 *eep = (u8 *)dev->mt76.eeprom.data; + struct sk_buff *skb; + int eep_len, offset; + + switch (mt76_chip(&dev->mt76)) { + case 0x7622: + eep_len = MT7622_EE_MAX - MT_EE_NIC_CONF_0; + offset = MT_EE_NIC_CONF_0; + break; + case 0x7663: + eep_len = MT7663_EE_MAX - MT_EE_CHIP_ID; + req_hdr.content_format = 1; + offset = MT_EE_CHIP_ID; + break; + default: + eep_len = MT7615_EE_MAX - MT_EE_NIC_CONF_0; + offset = MT_EE_NIC_CONF_0; + break; + } + + req_hdr.len = cpu_to_le16(eep_len); + + skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req_hdr) + eep_len); + if (!skb) + return -ENOMEM; + + skb_put_data(skb, &req_hdr, sizeof(req_hdr)); + skb_put_data(skb, eep + offset, eep_len); + + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD(EFUSE_BUFFER_MODE), true); +} + +int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue, + const struct ieee80211_tx_queue_params *params) +{ +#define WMM_AIFS_SET BIT(0) +#define WMM_CW_MIN_SET BIT(1) +#define WMM_CW_MAX_SET BIT(2) +#define WMM_TXOP_SET BIT(3) +#define WMM_PARAM_SET (WMM_AIFS_SET | WMM_CW_MIN_SET | \ + WMM_CW_MAX_SET | WMM_TXOP_SET) + struct req_data { + u8 number; + u8 rsv[3]; + u8 queue; + u8 valid; + u8 aifs; + u8 cw_min; + __le16 cw_max; + __le16 txop; + } __packed req = { + .number = 1, + .queue = queue, + .valid = WMM_PARAM_SET, + .aifs = params->aifs, + .cw_min = 5, + .cw_max = cpu_to_le16(10), + .txop = cpu_to_le16(params->txop), + }; + + if (params->cw_min) + req.cw_min = fls(params->cw_min); + if (params->cw_max) + req.cw_max = cpu_to_le16(fls(params->cw_max)); + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EDCA_UPDATE), + &req, sizeof(req), true); +} + +int mt7615_mcu_set_dbdc(struct mt7615_dev *dev) +{ + struct mt7615_phy *ext_phy = mt7615_ext_phy(dev); + struct dbdc_entry { + u8 type; + u8 index; + u8 band; + u8 _rsv; + }; + struct { + u8 enable; + u8 num; + u8 _rsv[2]; + struct dbdc_entry entry[64]; + } req = { + .enable = !!ext_phy, + }; + int i; + + if (!ext_phy) + goto out; + +#define ADD_DBDC_ENTRY(_type, _idx, _band) \ + do { \ + req.entry[req.num].type = _type; \ + req.entry[req.num].index = _idx; \ + req.entry[req.num++].band = _band; \ + } while (0) + + for (i = 0; i < 4; i++) { + bool band = !!(ext_phy->omac_mask & BIT_ULL(i)); + + ADD_DBDC_ENTRY(DBDC_TYPE_BSS, i, band); + } + + for (i = 0; i < 14; i++) { + bool band = !!(ext_phy->omac_mask & BIT_ULL(0x11 + i)); + + ADD_DBDC_ENTRY(DBDC_TYPE_MBSS, i, band); + } + + ADD_DBDC_ENTRY(DBDC_TYPE_MU, 0, 1); + + for (i = 0; i < 3; i++) + ADD_DBDC_ENTRY(DBDC_TYPE_BF, i, 1); + + ADD_DBDC_ENTRY(DBDC_TYPE_WMM, 0, 0); + ADD_DBDC_ENTRY(DBDC_TYPE_WMM, 1, 0); + ADD_DBDC_ENTRY(DBDC_TYPE_WMM, 2, 1); + ADD_DBDC_ENTRY(DBDC_TYPE_WMM, 3, 1); + + ADD_DBDC_ENTRY(DBDC_TYPE_MGMT, 0, 0); + ADD_DBDC_ENTRY(DBDC_TYPE_MGMT, 1, 1); + +out: + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(DBDC_CTRL), &req, + sizeof(req), true); +} + +int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev) +{ + struct wtbl_req_hdr req = { + .operation = WTBL_RESET_ALL, + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(WTBL_UPDATE), + &req, sizeof(req), true); +} + +int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val) +{ + struct { + __le16 tag; + __le16 min_lpn; + } req = { + .tag = cpu_to_le16(0x1), + .min_lpn = cpu_to_le16(val), + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RADAR_TH), + &req, sizeof(req), true); +} + +int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev, + const struct mt7615_dfs_pulse *pulse) +{ + struct { + __le16 tag; + __le32 max_width; /* us */ + __le32 max_pwr; /* dbm */ + __le32 min_pwr; /* dbm */ + __le32 min_stgr_pri; /* us */ + __le32 max_stgr_pri; /* us */ + __le32 min_cr_pri; /* us */ + __le32 max_cr_pri; /* us */ + } req = { + .tag = cpu_to_le16(0x3), +#define __req_field(field) .field = cpu_to_le32(pulse->field) + __req_field(max_width), + __req_field(max_pwr), + __req_field(min_pwr), + __req_field(min_stgr_pri), + __req_field(max_stgr_pri), + __req_field(min_cr_pri), + __req_field(max_cr_pri), +#undef __req_field + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RADAR_TH), + &req, sizeof(req), true); +} + +int mt7615_mcu_set_radar_th(struct mt7615_dev *dev, int index, + const struct mt7615_dfs_pattern *pattern) +{ + struct { + __le16 tag; + __le16 radar_type; + u8 enb; + u8 stgr; + u8 min_crpn; + u8 max_crpn; + u8 min_crpr; + u8 min_pw; + u8 max_pw; + __le32 min_pri; + __le32 max_pri; + u8 min_crbn; + u8 max_crbn; + u8 min_stgpn; + u8 max_stgpn; + u8 min_stgpr; + } req = { + .tag = cpu_to_le16(0x2), + .radar_type = cpu_to_le16(index), +#define __req_field_u8(field) .field = pattern->field +#define __req_field_u32(field) .field = cpu_to_le32(pattern->field) + __req_field_u8(enb), + __req_field_u8(stgr), + __req_field_u8(min_crpn), + __req_field_u8(max_crpn), + __req_field_u8(min_crpr), + __req_field_u8(min_pw), + __req_field_u8(max_pw), + __req_field_u32(min_pri), + __req_field_u32(max_pri), + __req_field_u8(min_crbn), + __req_field_u8(max_crbn), + __req_field_u8(min_stgpn), + __req_field_u8(max_stgpn), + __req_field_u8(min_stgpr), +#undef __req_field_u8 +#undef __req_field_u32 + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RADAR_TH), + &req, sizeof(req), true); +} + +int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev) +{ + struct { + u8 pulse_num; + u8 rsv[3]; + struct { + __le32 start_time; + __le16 width; + __le16 power; + } pattern[32]; + } req = { + .pulse_num = dev->radar_pattern.n_pulses, + }; + u32 start_time = ktime_to_ms(ktime_get_boottime()); + int i; + + if (dev->radar_pattern.n_pulses > ARRAY_SIZE(req.pattern)) + return -EINVAL; + + /* TODO: add some noise here */ + for (i = 0; i < dev->radar_pattern.n_pulses; i++) { + u32 ts = start_time + i * dev->radar_pattern.period; + + req.pattern[i].width = cpu_to_le16(dev->radar_pattern.width); + req.pattern[i].power = cpu_to_le16(dev->radar_pattern.power); + req.pattern[i].start_time = cpu_to_le32(ts); + } + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_PATTERN), + &req, sizeof(req), false); +} + +static void mt7615_mcu_set_txpower_sku(struct mt7615_phy *phy, u8 *sku) +{ + struct mt76_phy *mphy = phy->mt76; + struct ieee80211_hw *hw = mphy->hw; + struct mt76_power_limits limits; + s8 *limits_array = (s8 *)&limits; + int n_chains = hweight8(mphy->antenna_mask); + int tx_power = hw->conf.power_level * 2; + int i; + static const u8 sku_mapping[] = { +#define SKU_FIELD(_type, _field) \ + [MT_SKU_##_type] = offsetof(struct mt76_power_limits, _field) + SKU_FIELD(CCK_1_2, cck[0]), + SKU_FIELD(CCK_55_11, cck[2]), + SKU_FIELD(OFDM_6_9, ofdm[0]), + SKU_FIELD(OFDM_12_18, ofdm[2]), + SKU_FIELD(OFDM_24_36, ofdm[4]), + SKU_FIELD(OFDM_48, ofdm[6]), + SKU_FIELD(OFDM_54, ofdm[7]), + SKU_FIELD(HT20_0_8, mcs[0][0]), + SKU_FIELD(HT20_32, ofdm[0]), + SKU_FIELD(HT20_1_2_9_10, mcs[0][1]), + SKU_FIELD(HT20_3_4_11_12, mcs[0][3]), + SKU_FIELD(HT20_5_13, mcs[0][5]), + SKU_FIELD(HT20_6_14, mcs[0][6]), + SKU_FIELD(HT20_7_15, mcs[0][7]), + SKU_FIELD(HT40_0_8, mcs[1][0]), + SKU_FIELD(HT40_32, ofdm[0]), + SKU_FIELD(HT40_1_2_9_10, mcs[1][1]), + SKU_FIELD(HT40_3_4_11_12, mcs[1][3]), + SKU_FIELD(HT40_5_13, mcs[1][5]), + SKU_FIELD(HT40_6_14, mcs[1][6]), + SKU_FIELD(HT40_7_15, mcs[1][7]), + SKU_FIELD(VHT20_0, mcs[0][0]), + SKU_FIELD(VHT20_1_2, mcs[0][1]), + SKU_FIELD(VHT20_3_4, mcs[0][3]), + SKU_FIELD(VHT20_5_6, mcs[0][5]), + SKU_FIELD(VHT20_7, mcs[0][7]), + SKU_FIELD(VHT20_8, mcs[0][8]), + SKU_FIELD(VHT20_9, mcs[0][9]), + SKU_FIELD(VHT40_0, mcs[1][0]), + SKU_FIELD(VHT40_1_2, mcs[1][1]), + SKU_FIELD(VHT40_3_4, mcs[1][3]), + SKU_FIELD(VHT40_5_6, mcs[1][5]), + SKU_FIELD(VHT40_7, mcs[1][7]), + SKU_FIELD(VHT40_8, mcs[1][8]), + SKU_FIELD(VHT40_9, mcs[1][9]), + SKU_FIELD(VHT80_0, mcs[2][0]), + SKU_FIELD(VHT80_1_2, mcs[2][1]), + SKU_FIELD(VHT80_3_4, mcs[2][3]), + SKU_FIELD(VHT80_5_6, mcs[2][5]), + SKU_FIELD(VHT80_7, mcs[2][7]), + SKU_FIELD(VHT80_8, mcs[2][8]), + SKU_FIELD(VHT80_9, mcs[2][9]), + SKU_FIELD(VHT160_0, mcs[3][0]), + SKU_FIELD(VHT160_1_2, mcs[3][1]), + SKU_FIELD(VHT160_3_4, mcs[3][3]), + SKU_FIELD(VHT160_5_6, mcs[3][5]), + SKU_FIELD(VHT160_7, mcs[3][7]), + SKU_FIELD(VHT160_8, mcs[3][8]), + SKU_FIELD(VHT160_9, mcs[3][9]), +#undef SKU_FIELD + }; + + tx_power = mt76_get_sar_power(mphy, mphy->chandef.chan, tx_power); + tx_power -= mt76_tx_power_nss_delta(n_chains); + tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan, + &limits, tx_power); + mphy->txpower_cur = tx_power; + + if (is_mt7663(mphy->dev)) { + memset(sku, tx_power, MT_SKU_4SS_DELTA + 1); + return; + } + + for (i = 0; i < MT_SKU_1SS_DELTA; i++) + sku[i] = limits_array[sku_mapping[i]]; + + for (i = 0; i < 4; i++) { + int delta = 0; + + if (i < n_chains - 1) + delta = mt76_tx_power_nss_delta(n_chains) - + mt76_tx_power_nss_delta(i + 1); + sku[MT_SKU_1SS_DELTA + i] = delta; + } +} + +static u8 mt7615_mcu_chan_bw(struct cfg80211_chan_def *chandef) +{ + static const u8 width_to_bw[] = { + [NL80211_CHAN_WIDTH_40] = CMD_CBW_40MHZ, + [NL80211_CHAN_WIDTH_80] = CMD_CBW_80MHZ, + [NL80211_CHAN_WIDTH_80P80] = CMD_CBW_8080MHZ, + [NL80211_CHAN_WIDTH_160] = CMD_CBW_160MHZ, + [NL80211_CHAN_WIDTH_5] = CMD_CBW_5MHZ, + [NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ, + [NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ, + [NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ, + }; + + if (chandef->width >= ARRAY_SIZE(width_to_bw)) + return 0; + + return width_to_bw[chandef->width]; +} + +int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd) +{ + struct mt7615_dev *dev = phy->dev; + struct cfg80211_chan_def *chandef = &phy->mt76->chandef; + int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2; + struct { + u8 control_chan; + u8 center_chan; + u8 bw; + u8 tx_streams; + u8 rx_streams_mask; + u8 switch_reason; + u8 band_idx; + /* for 80+80 only */ + u8 center_chan2; + __le16 cac_case; + u8 channel_band; + u8 rsv0; + __le32 outband_freq; + u8 txpower_drop; + u8 rsv1[3]; + u8 txpower_sku[53]; + u8 rsv2[3]; + } req = { + .control_chan = chandef->chan->hw_value, + .center_chan = ieee80211_frequency_to_channel(freq1), + .tx_streams = hweight8(phy->mt76->antenna_mask), + .rx_streams_mask = phy->mt76->chainmask, + .center_chan2 = ieee80211_frequency_to_channel(freq2), + }; + + if (cmd == MCU_EXT_CMD(SET_RX_PATH) || + dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR) + req.switch_reason = CH_SWITCH_NORMAL; + else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) + req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; + else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef, + NL80211_IFTYPE_AP)) + req.switch_reason = CH_SWITCH_DFS; + else + req.switch_reason = CH_SWITCH_NORMAL; + + req.band_idx = phy != &dev->phy; + req.bw = mt7615_mcu_chan_bw(chandef); + + if (mt76_testmode_enabled(phy->mt76)) + memset(req.txpower_sku, 0x3f, 49); + else + mt7615_mcu_set_txpower_sku(phy, req.txpower_sku); + + return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true); +} + +int mt7615_mcu_get_temperature(struct mt7615_dev *dev) +{ + struct { + u8 action; + u8 rsv[3]; + } req = {}; + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_CTRL), + &req, sizeof(req), true); +} + +int mt7615_mcu_set_test_param(struct mt7615_dev *dev, u8 param, bool test_mode, + u32 val) +{ + struct { + u8 test_mode_en; + u8 param_idx; + u8 _rsv[2]; + + __le32 value; + + u8 pad[8]; + } req = { + .test_mode_en = test_mode, + .param_idx = param, + .value = cpu_to_le32(val), + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), + &req, sizeof(req), false); +} + +int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable) +{ + struct mt7615_dev *dev = phy->dev; + struct { + u8 format_id; + u8 sku_enable; + u8 band_idx; + u8 rsv; + } req = { + .format_id = 0, + .band_idx = phy != &dev->phy, + .sku_enable = enable, + }; + + return mt76_mcu_send_msg(&dev->mt76, + MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), + &req, sizeof(req), true); +} + +static int mt7615_find_freq_idx(const u16 *freqs, int n_freqs, u16 cur) +{ + int i; + + for (i = 0; i < n_freqs; i++) + if (cur == freqs[i]) + return i; + + return -1; +} + +static int mt7615_dcoc_freq_idx(u16 freq, u8 bw) +{ + static const u16 freq_list[] = { + 4980, 5805, 5905, 5190, + 5230, 5270, 5310, 5350, + 5390, 5430, 5470, 5510, + 5550, 5590, 5630, 5670, + 5710, 5755, 5795, 5835, + 5875, 5210, 5290, 5370, + 5450, 5530, 5610, 5690, + 5775, 5855 + }; + static const u16 freq_bw40[] = { + 5190, 5230, 5270, 5310, + 5350, 5390, 5430, 5470, + 5510, 5550, 5590, 5630, + 5670, 5710, 5755, 5795, + 5835, 5875 + }; + int offset_2g = ARRAY_SIZE(freq_list); + int idx; + + if (freq < 4000) { + if (freq < 2427) + return offset_2g; + if (freq < 2442) + return offset_2g + 1; + if (freq < 2457) + return offset_2g + 2; + + return offset_2g + 3; + } + + switch (bw) { + case NL80211_CHAN_WIDTH_80: + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_160: + break; + default: + idx = mt7615_find_freq_idx(freq_bw40, ARRAY_SIZE(freq_bw40), + freq + 10); + if (idx >= 0) { + freq = freq_bw40[idx]; + break; + } + + idx = mt7615_find_freq_idx(freq_bw40, ARRAY_SIZE(freq_bw40), + freq - 10); + if (idx >= 0) { + freq = freq_bw40[idx]; + break; + } + fallthrough; + case NL80211_CHAN_WIDTH_40: + idx = mt7615_find_freq_idx(freq_bw40, ARRAY_SIZE(freq_bw40), + freq); + if (idx >= 0) + break; + + return -1; + + } + + return mt7615_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), freq); +} + +int mt7615_mcu_apply_rx_dcoc(struct mt7615_phy *phy) +{ + struct mt7615_dev *dev = phy->dev; + struct cfg80211_chan_def *chandef = &phy->mt76->chandef; + int freq2 = chandef->center_freq2; + int ret; + struct { + u8 direction; + u8 runtime_calibration; + u8 _rsv[2]; + + __le16 center_freq; + u8 bw; + u8 band; + u8 is_freq2; + u8 success; + u8 dbdc_en; + + u8 _rsv2; + + struct { + __le32 sx0_i_lna[4]; + __le32 sx0_q_lna[4]; + + __le32 sx2_i_lna[4]; + __le32 sx2_q_lna[4]; + } dcoc_data[4]; + } req = { + .direction = 1, + + .bw = mt7615_mcu_chan_bw(chandef), + .band = chandef->center_freq1 > 4000, + .dbdc_en = !!dev->mt76.phys[MT_BAND1], + }; + u16 center_freq = chandef->center_freq1; + int freq_idx; + u8 *eep = dev->mt76.eeprom.data; + + if (!(eep[MT_EE_CALDATA_FLASH] & MT_EE_CALDATA_FLASH_RX_CAL)) + return 0; + + if (chandef->width == NL80211_CHAN_WIDTH_160) { + freq2 = center_freq + 40; + center_freq -= 40; + } + +again: + req.runtime_calibration = 1; + freq_idx = mt7615_dcoc_freq_idx(center_freq, chandef->width); + if (freq_idx < 0) + goto out; + + memcpy(req.dcoc_data, eep + MT7615_EEPROM_DCOC_OFFSET + + freq_idx * MT7615_EEPROM_DCOC_SIZE, + sizeof(req.dcoc_data)); + req.runtime_calibration = 0; + +out: + req.center_freq = cpu_to_le16(center_freq); + ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RXDCOC_CAL), &req, + sizeof(req), true); + + if ((chandef->width == NL80211_CHAN_WIDTH_80P80 || + chandef->width == NL80211_CHAN_WIDTH_160) && !req.is_freq2) { + req.is_freq2 = true; + center_freq = freq2; + goto again; + } + + return ret; +} + +static int mt7615_dpd_freq_idx(u16 freq, u8 bw) +{ + static const u16 freq_list[] = { + 4920, 4940, 4960, 4980, + 5040, 5060, 5080, 5180, + 5200, 5220, 5240, 5260, + 5280, 5300, 5320, 5340, + 5360, 5380, 5400, 5420, + 5440, 5460, 5480, 5500, + 5520, 5540, 5560, 5580, + 5600, 5620, 5640, 5660, + 5680, 5700, 5720, 5745, + 5765, 5785, 5805, 5825, + 5845, 5865, 5885, 5905 + }; + int offset_2g = ARRAY_SIZE(freq_list); + int idx; + + if (freq < 4000) { + if (freq < 2432) + return offset_2g; + if (freq < 2457) + return offset_2g + 1; + + return offset_2g + 2; + } + + if (bw != NL80211_CHAN_WIDTH_20) { + idx = mt7615_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), + freq + 10); + if (idx >= 0) + return idx; + + idx = mt7615_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), + freq - 10); + if (idx >= 0) + return idx; + } + + return mt7615_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), freq); +} + + +int mt7615_mcu_apply_tx_dpd(struct mt7615_phy *phy) +{ + struct mt7615_dev *dev = phy->dev; + struct cfg80211_chan_def *chandef = &phy->mt76->chandef; + int freq2 = chandef->center_freq2; + int ret; + struct { + u8 direction; + u8 runtime_calibration; + u8 _rsv[2]; + + __le16 center_freq; + u8 bw; + u8 band; + u8 is_freq2; + u8 success; + u8 dbdc_en; + + u8 _rsv2; + + struct { + struct { + u32 dpd_g0; + u8 data[32]; + } wf0, wf1; + + struct { + u32 dpd_g0_prim; + u32 dpd_g0_sec; + u8 data_prim[32]; + u8 data_sec[32]; + } wf2, wf3; + } dpd_data; + } req = { + .direction = 1, + + .bw = mt7615_mcu_chan_bw(chandef), + .band = chandef->center_freq1 > 4000, + .dbdc_en = !!dev->mt76.phys[MT_BAND1], + }; + u16 center_freq = chandef->center_freq1; + int freq_idx; + u8 *eep = dev->mt76.eeprom.data; + + if (!(eep[MT_EE_CALDATA_FLASH] & MT_EE_CALDATA_FLASH_TX_DPD)) + return 0; + + if (chandef->width == NL80211_CHAN_WIDTH_160) { + freq2 = center_freq + 40; + center_freq -= 40; + } + +again: + req.runtime_calibration = 1; + freq_idx = mt7615_dpd_freq_idx(center_freq, chandef->width); + if (freq_idx < 0) + goto out; + + memcpy(&req.dpd_data, eep + MT7615_EEPROM_TXDPD_OFFSET + + freq_idx * MT7615_EEPROM_TXDPD_SIZE, + sizeof(req.dpd_data)); + req.runtime_calibration = 0; + +out: + req.center_freq = cpu_to_le16(center_freq); + ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXDPD_CAL), + &req, sizeof(req), true); + + if ((chandef->width == NL80211_CHAN_WIDTH_80P80 || + chandef->width == NL80211_CHAN_WIDTH_160) && !req.is_freq2) { + req.is_freq2 = true; + center_freq = freq2; + goto again; + } + + return ret; +} + +int mt7615_mcu_set_rx_hdr_trans_blacklist(struct mt7615_dev *dev) +{ + struct { + u8 operation; + u8 count; + u8 _rsv[2]; + u8 index; + u8 enable; + __le16 etype; + } req = { + .operation = 1, + .count = 1, + .enable = 1, + .etype = cpu_to_le16(ETH_P_PAE), + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_HDR_TRANS), + &req, sizeof(req), false); +} + +int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif, + bool enable) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct { + u8 bss_idx; + u8 dtim_period; + __le16 aid; + __le16 bcn_interval; + __le16 atim_window; + u8 uapsd; + u8 bmc_delivered_ac; + u8 bmc_triggered_ac; + u8 pad; + } req = { + .bss_idx = mvif->mt76.idx, + .aid = cpu_to_le16(vif->cfg.aid), + .dtim_period = vif->bss_conf.dtim_period, + .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int), + }; + struct { + u8 bss_idx; + u8 pad[3]; + } req_hdr = { + .bss_idx = mvif->mt76.idx, + }; + int err; + + if (vif->type != NL80211_IFTYPE_STATION) + return 0; + + err = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_ABORT), + &req_hdr, sizeof(req_hdr), false); + if (err < 0 || !enable) + return err; + + return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_CONNECTED), + &req, sizeof(req), false); +} + +int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_channel *chan, int duration) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct mt7615_dev *dev = phy->dev; + struct mt7615_roc_tlv req = { + .bss_idx = mvif->mt76.idx, + .active = !chan, + .max_interval = cpu_to_le32(duration), + .primary_chan = chan ? chan->hw_value : 0, + .band = chan ? chan->band : 0, + .req_type = 2, + }; + + phy->roc_grant = false; + + return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_ROC), + &req, sizeof(req), false); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h new file mode 100644 index 000000000..615956acc --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h @@ -0,0 +1,254 @@ +/* SPDX-License-Identifier: ISC */ +/* Copyright (C) 2019 MediaTek Inc. */ + +#ifndef __MT7615_MCU_H +#define __MT7615_MCU_H + +#include "../mt76_connac_mcu.h" + +struct mt7615_mcu_txd { + __le32 txd[8]; + + __le16 len; + __le16 pq_id; + + u8 cid; + u8 pkt_type; + u8 set_query; /* FW don't care */ + u8 seq; + + u8 uc_d2b0_rev; + u8 ext_cid; + u8 s2d_index; + u8 ext_cid_ack; + + u32 reserved[5]; +} __packed __aligned(4); + +/** + * struct mt7615_uni_txd - mcu command descriptor for firmware v3 + * @txd: hardware descriptor + * @len: total length not including txd + * @cid: command identifier + * @pkt_type: must be 0xa0 (cmd packet by long format) + * @frag_n: fragment number + * @seq: sequence number + * @checksum: 0 mean there is no checksum + * @s2d_index: index for command source and destination + * Definition | value | note + * CMD_S2D_IDX_H2N | 0x00 | command from HOST to WM + * CMD_S2D_IDX_C2N | 0x01 | command from WA to WM + * CMD_S2D_IDX_H2C | 0x02 | command from HOST to WA + * CMD_S2D_IDX_H2N_AND_H2C | 0x03 | command from HOST to WA and WM + * + * @option: command option + * BIT[0]: UNI_CMD_OPT_BIT_ACK + * set to 1 to request a fw reply + * if UNI_CMD_OPT_BIT_0_ACK is set and UNI_CMD_OPT_BIT_2_SET_QUERY + * is set, mcu firmware will send response event EID = 0x01 + * (UNI_EVENT_ID_CMD_RESULT) to the host. + * BIT[1]: UNI_CMD_OPT_BIT_UNI_CMD + * 0: original command + * 1: unified command + * BIT[2]: UNI_CMD_OPT_BIT_SET_QUERY + * 0: QUERY command + * 1: SET command + */ +struct mt7615_uni_txd { + __le32 txd[8]; + + /* DW1 */ + __le16 len; + __le16 cid; + + /* DW2 */ + u8 reserved; + u8 pkt_type; + u8 frag_n; + u8 seq; + + /* DW3 */ + __le16 checksum; + u8 s2d_index; + u8 option; + + /* DW4 */ + u8 reserved2[4]; +} __packed __aligned(4); + +enum { + MT_SKU_CCK_1_2 = 0, + MT_SKU_CCK_55_11, + MT_SKU_OFDM_6_9, + MT_SKU_OFDM_12_18, + MT_SKU_OFDM_24_36, + MT_SKU_OFDM_48, + MT_SKU_OFDM_54, + MT_SKU_HT20_0_8, + MT_SKU_HT20_32, + MT_SKU_HT20_1_2_9_10, + MT_SKU_HT20_3_4_11_12, + MT_SKU_HT20_5_13, + MT_SKU_HT20_6_14, + MT_SKU_HT20_7_15, + MT_SKU_HT40_0_8, + MT_SKU_HT40_32, + MT_SKU_HT40_1_2_9_10, + MT_SKU_HT40_3_4_11_12, + MT_SKU_HT40_5_13, + MT_SKU_HT40_6_14, + MT_SKU_HT40_7_15, + MT_SKU_VHT20_0, + MT_SKU_VHT20_1_2, + MT_SKU_VHT20_3_4, + MT_SKU_VHT20_5_6, + MT_SKU_VHT20_7, + MT_SKU_VHT20_8, + MT_SKU_VHT20_9, + MT_SKU_VHT40_0, + MT_SKU_VHT40_1_2, + MT_SKU_VHT40_3_4, + MT_SKU_VHT40_5_6, + MT_SKU_VHT40_7, + MT_SKU_VHT40_8, + MT_SKU_VHT40_9, + MT_SKU_VHT80_0, + MT_SKU_VHT80_1_2, + MT_SKU_VHT80_3_4, + MT_SKU_VHT80_5_6, + MT_SKU_VHT80_7, + MT_SKU_VHT80_8, + MT_SKU_VHT80_9, + MT_SKU_VHT160_0, + MT_SKU_VHT160_1_2, + MT_SKU_VHT160_3_4, + MT_SKU_VHT160_5_6, + MT_SKU_VHT160_7, + MT_SKU_VHT160_8, + MT_SKU_VHT160_9, + MT_SKU_1SS_DELTA, + MT_SKU_2SS_DELTA, + MT_SKU_3SS_DELTA, + MT_SKU_4SS_DELTA, +}; + +struct mt7615_mcu_rxd { + __le32 rxd[4]; + + __le16 len; + __le16 pkt_type_id; + + u8 eid; + u8 seq; + __le16 __rsv; + + u8 ext_eid; + u8 __rsv1[2]; + u8 s2d_index; +}; + +struct mt7615_mcu_csa_notify { + struct mt7615_mcu_rxd rxd; + + u8 omac_idx; + u8 csa_count; + u8 rsv[2]; +} __packed; + +struct mt7615_mcu_rdd_report { + struct mt7615_mcu_rxd rxd; + + u8 band_idx; + u8 long_detected; + u8 constant_prf_detected; + u8 staggered_prf_detected; + u8 radar_type_idx; + u8 periodic_pulse_num; + u8 long_pulse_num; + u8 hw_pulse_num; + + u8 out_lpn; + u8 out_spn; + u8 out_crpn; + u8 out_crpw; + u8 out_crbn; + u8 out_stgpn; + u8 out_stgpw; + + u8 _rsv[2]; + + __le32 out_pri_const; + __le32 out_pri_stg[3]; + + struct { + __le32 start; + __le16 pulse_width; + __le16 pulse_power; + } long_pulse[32]; + + struct { + __le32 start; + __le16 pulse_width; + __le16 pulse_power; + } periodic_pulse[32]; + + struct { + __le32 start; + __le16 pulse_width; + __le16 pulse_power; + u8 sc_pass; + u8 sw_reset; + } hw_pulse[32]; +}; + +enum { + MCU_ATE_SET_FREQ_OFFSET = 0xa, + MCU_ATE_SET_TX_POWER_CONTROL = 0x15, +}; + +struct mt7615_mcu_uni_event { + u8 cid; + u8 pad[3]; + __le32 status; /* 0: success, others: fail */ +} __packed; + +struct mt7615_mcu_reg_event { + __le32 reg; + __le32 val; +} __packed; + +struct mt7615_roc_tlv { + u8 bss_idx; + u8 token; + u8 active; + u8 primary_chan; + u8 sco; + u8 band; + u8 width; /* To support 80/160MHz bandwidth */ + u8 freq_seg1; /* To support 80/160MHz bandwidth */ + u8 freq_seg2; /* To support 80/160MHz bandwidth */ + u8 req_type; + u8 dbdc_band; + u8 rsv0; + __le32 max_interval; /* ms */ + u8 rsv1[8]; +} __packed; + +enum { + FW_STATE_PWR_ON = 1, + FW_STATE_N9_RDY = 2, +}; + +enum { + DBDC_TYPE_WMM, + DBDC_TYPE_MGMT, + DBDC_TYPE_BSS, + DBDC_TYPE_MBSS, + DBDC_TYPE_REPEATER, + DBDC_TYPE_MU, + DBDC_TYPE_BF, + DBDC_TYPE_PTA, + __DBDC_TYPE_MAX, +}; + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c new file mode 100644 index 000000000..a784f9d9e --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c @@ -0,0 +1,292 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2020 MediaTek Inc. */ + +#include +#include +#include +#include + +#include "mt7615.h" +#include "regs.h" +#include "mac.h" +#include "../trace.h" + +const u32 mt7615e_reg_map[] = { + [MT_TOP_CFG_BASE] = 0x01000, + [MT_HW_BASE] = 0x01000, + [MT_PCIE_REMAP_2] = 0x02504, + [MT_ARB_BASE] = 0x20c00, + [MT_HIF_BASE] = 0x04000, + [MT_CSR_BASE] = 0x07000, + [MT_PLE_BASE] = 0x08000, + [MT_PSE_BASE] = 0x0c000, + [MT_CFG_BASE] = 0x20200, + [MT_AGG_BASE] = 0x20a00, + [MT_TMAC_BASE] = 0x21000, + [MT_RMAC_BASE] = 0x21200, + [MT_DMA_BASE] = 0x21800, + [MT_PF_BASE] = 0x22000, + [MT_WTBL_BASE_ON] = 0x23000, + [MT_WTBL_BASE_OFF] = 0x23400, + [MT_LPON_BASE] = 0x24200, + [MT_MIB_BASE] = 0x24800, + [MT_WTBL_BASE_ADDR] = 0x30000, + [MT_PCIE_REMAP_BASE2] = 0x80000, + [MT_TOP_MISC_BASE] = 0xc0000, + [MT_EFUSE_ADDR_BASE] = 0x81070000, +}; + +const u32 mt7663e_reg_map[] = { + [MT_TOP_CFG_BASE] = 0x01000, + [MT_HW_BASE] = 0x02000, + [MT_DMA_SHDL_BASE] = 0x06000, + [MT_PCIE_REMAP_2] = 0x0700c, + [MT_ARB_BASE] = 0x20c00, + [MT_HIF_BASE] = 0x04000, + [MT_CSR_BASE] = 0x07000, + [MT_PLE_BASE] = 0x08000, + [MT_PSE_BASE] = 0x0c000, + [MT_PP_BASE] = 0x0e000, + [MT_CFG_BASE] = 0x20000, + [MT_AGG_BASE] = 0x22000, + [MT_TMAC_BASE] = 0x24000, + [MT_RMAC_BASE] = 0x25000, + [MT_DMA_BASE] = 0x27000, + [MT_PF_BASE] = 0x28000, + [MT_WTBL_BASE_ON] = 0x29000, + [MT_WTBL_BASE_OFF] = 0x29800, + [MT_LPON_BASE] = 0x2b000, + [MT_MIB_BASE] = 0x2d000, + [MT_WTBL_BASE_ADDR] = 0x30000, + [MT_PCIE_REMAP_BASE2] = 0x90000, + [MT_TOP_MISC_BASE] = 0xc0000, + [MT_EFUSE_ADDR_BASE] = 0x78011000, +}; + +u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr) +{ + u32 base, offset; + + if (is_mt7663(&dev->mt76)) { + base = addr & MT7663_MCU_PCIE_REMAP_2_BASE; + offset = addr & MT7663_MCU_PCIE_REMAP_2_OFFSET; + } else { + base = addr & MT_MCU_PCIE_REMAP_2_BASE; + offset = addr & MT_MCU_PCIE_REMAP_2_OFFSET; + } + mt76_wr(dev, MT_MCU_PCIE_REMAP_2, base); + + return MT_PCIE_REMAP_BASE_2 + offset; +} + +static void +mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + + mt7615_irq_enable(dev, MT_INT_RX_DONE(q)); +} + +static irqreturn_t mt7615_irq_handler(int irq, void *dev_instance) +{ + struct mt7615_dev *dev = dev_instance; + + mt76_wr(dev, MT_INT_MASK_CSR, 0); + + if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state)) + return IRQ_NONE; + + tasklet_schedule(&dev->irq_tasklet); + + return IRQ_HANDLED; +} + +static void mt7615_irq_tasklet(struct tasklet_struct *t) +{ + struct mt7615_dev *dev = from_tasklet(dev, t, irq_tasklet); + u32 intr, mask = 0, tx_mcu_mask = mt7615_tx_mcu_int_mask(dev); + u32 mcu_int; + + mt76_wr(dev, MT_INT_MASK_CSR, 0); + + intr = mt76_rr(dev, MT_INT_SOURCE_CSR); + intr &= dev->mt76.mmio.irqmask; + mt76_wr(dev, MT_INT_SOURCE_CSR, intr); + + trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask); + + mask |= intr & MT_INT_RX_DONE_ALL; + if (intr & tx_mcu_mask) + mask |= tx_mcu_mask; + mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0); + + if (intr & tx_mcu_mask) + napi_schedule(&dev->mt76.tx_napi); + + if (intr & MT_INT_RX_DONE(0)) + napi_schedule(&dev->mt76.napi[0]); + + if (intr & MT_INT_RX_DONE(1)) + napi_schedule(&dev->mt76.napi[1]); + + if (!(intr & (MT_INT_MCU_CMD | MT7663_INT_MCU_CMD))) + return; + + if (is_mt7663(&dev->mt76)) { + mcu_int = mt76_rr(dev, MT_MCU2HOST_INT_STATUS); + mcu_int &= MT7663_MCU_CMD_ERROR_MASK; + mt76_wr(dev, MT_MCU2HOST_INT_STATUS, mcu_int); + } else { + mcu_int = mt76_rr(dev, MT_MCU_CMD); + mcu_int &= MT_MCU_CMD_ERROR_MASK; + } + + if (!mcu_int) + return; + + dev->reset_state = mcu_int; + queue_work(dev->mt76.wq, &dev->reset_work); + wake_up(&dev->reset_wait); +} + +static u32 __mt7615_reg_addr(struct mt7615_dev *dev, u32 addr) +{ + if (addr < 0x100000) + return addr; + + return mt7615_reg_map(dev, addr); +} + +static u32 mt7615_rr(struct mt76_dev *mdev, u32 offset) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + u32 addr = __mt7615_reg_addr(dev, offset); + + return dev->bus_ops->rr(mdev, addr); +} + +static void mt7615_wr(struct mt76_dev *mdev, u32 offset, u32 val) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + u32 addr = __mt7615_reg_addr(dev, offset); + + dev->bus_ops->wr(mdev, addr, val); +} + +static u32 mt7615_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + u32 addr = __mt7615_reg_addr(dev, offset); + + return dev->bus_ops->rmw(mdev, addr, mask, val); +} + +int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base, + int irq, const u32 *map) +{ + static const struct mt76_driver_ops drv_ops = { + /* txwi_size = txd size + txp size */ + .txwi_size = MT_TXD_SIZE + sizeof(struct mt76_connac_txp_common), + .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ, + .survey_flags = SURVEY_INFO_TIME_TX | + SURVEY_INFO_TIME_RX | + SURVEY_INFO_TIME_BSS_RX, + .token_size = MT7615_TOKEN_SIZE, + .tx_prepare_skb = mt7615_tx_prepare_skb, + .tx_complete_skb = mt76_connac_tx_complete_skb, + .rx_check = mt7615_rx_check, + .rx_skb = mt7615_queue_rx_skb, + .rx_poll_complete = mt7615_rx_poll_complete, + .sta_ps = mt7615_sta_ps, + .sta_add = mt7615_mac_sta_add, + .sta_remove = mt7615_mac_sta_remove, + .update_survey = mt7615_update_channel, + }; + struct mt76_bus_ops *bus_ops; + struct ieee80211_ops *ops; + struct mt7615_dev *dev; + struct mt76_dev *mdev; + int ret; + + ops = devm_kmemdup(pdev, &mt7615_ops, sizeof(mt7615_ops), GFP_KERNEL); + if (!ops) + return -ENOMEM; + + mdev = mt76_alloc_device(pdev, sizeof(*dev), ops, &drv_ops); + if (!mdev) + return -ENOMEM; + + dev = container_of(mdev, struct mt7615_dev, mt76); + mt76_mmio_init(&dev->mt76, mem_base); + tasklet_setup(&dev->irq_tasklet, mt7615_irq_tasklet); + + dev->reg_map = map; + dev->ops = ops; + mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) | + (mt76_rr(dev, MT_HW_REV) & 0xff); + dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev); + + dev->bus_ops = dev->mt76.bus; + bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops), + GFP_KERNEL); + if (!bus_ops) { + ret = -ENOMEM; + goto err_free_dev; + } + + bus_ops->rr = mt7615_rr; + bus_ops->wr = mt7615_wr; + bus_ops->rmw = mt7615_rmw; + dev->mt76.bus = bus_ops; + + mt76_wr(dev, MT_INT_MASK_CSR, 0); + + ret = devm_request_irq(mdev->dev, irq, mt7615_irq_handler, + IRQF_SHARED, KBUILD_MODNAME, dev); + if (ret) + goto err_free_dev; + + if (is_mt7663(mdev)) + mt76_wr(dev, MT_PCIE_IRQ_ENABLE, 1); + + ret = mt7615_register_device(dev); + if (ret) + goto err_free_irq; + + return 0; + +err_free_irq: + devm_free_irq(pdev, irq, dev); +err_free_dev: + mt76_free_device(&dev->mt76); + + return ret; +} + +static int __init mt7615_init(void) +{ + int ret; + + ret = pci_register_driver(&mt7615_pci_driver); + if (ret) + return ret; + + if (IS_ENABLED(CONFIG_MT7622_WMAC)) { + ret = platform_driver_register(&mt7622_wmac_driver); + if (ret) + pci_unregister_driver(&mt7615_pci_driver); + } + + return ret; +} + +static void __exit mt7615_exit(void) +{ + if (IS_ENABLED(CONFIG_MT7622_WMAC)) + platform_driver_unregister(&mt7622_wmac_driver); + pci_unregister_driver(&mt7615_pci_driver); +} + +module_init(mt7615_init); +module_exit(mt7615_exit); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h new file mode 100644 index 000000000..c0e62082b --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -0,0 +1,563 @@ +/* SPDX-License-Identifier: ISC */ +/* Copyright (C) 2019 MediaTek Inc. */ + +#ifndef __MT7615_H +#define __MT7615_H + +#include +#include +#include +#include +#include "../mt76_connac_mcu.h" +#include "regs.h" + +#define MT7615_MAX_INTERFACES 16 +#define MT7615_MAX_WMM_SETS 4 +#define MT7663_WTBL_SIZE 32 +#define MT7615_WTBL_SIZE 128 +#define MT7615_WTBL_RESERVED (mt7615_wtbl_size(dev) - 1) +#define MT7615_WTBL_STA (MT7615_WTBL_RESERVED - \ + MT7615_MAX_INTERFACES) + +#define MT7615_PM_TIMEOUT (HZ / 12) +#define MT7615_HW_SCAN_TIMEOUT (HZ / 10) +#define MT7615_RESET_TIMEOUT (30 * HZ) +#define MT7615_RATE_RETRY 2 + +#define MT7615_TX_RING_SIZE 1024 +#define MT7615_TX_MGMT_RING_SIZE 128 +#define MT7615_TX_MCU_RING_SIZE 128 +#define MT7615_TX_FWDL_RING_SIZE 128 + +#define MT7615_RX_RING_SIZE 1024 +#define MT7615_RX_MCU_RING_SIZE 512 + +#define MT7615_DRV_OWN_RETRY_COUNT 10 + +#define MT7615_FIRMWARE_CR4 "mediatek/mt7615_cr4.bin" +#define MT7615_FIRMWARE_N9 "mediatek/mt7615_n9.bin" +#define MT7615_ROM_PATCH "mediatek/mt7615_rom_patch.bin" + +#define MT7622_FIRMWARE_N9 "mediatek/mt7622_n9.bin" +#define MT7622_ROM_PATCH "mediatek/mt7622_rom_patch.bin" + +#define MT7615_FIRMWARE_V1 1 +#define MT7615_FIRMWARE_V2 2 +#define MT7615_FIRMWARE_V3 3 + +#define MT7663_OFFLOAD_ROM_PATCH "mediatek/mt7663pr2h.bin" +#define MT7663_OFFLOAD_FIRMWARE_N9 "mediatek/mt7663_n9_v3.bin" +#define MT7663_ROM_PATCH "mediatek/mt7663pr2h_rebb.bin" +#define MT7663_FIRMWARE_N9 "mediatek/mt7663_n9_rebb.bin" + +#define MT7615_EEPROM_SIZE 1024 +#define MT7615_TOKEN_SIZE 4096 + +#define MT_FRAC_SCALE 12 +#define MT_FRAC(val, div) (((val) << MT_FRAC_SCALE) / (div)) + +#define MT_CHFREQ_VALID BIT(7) +#define MT_CHFREQ_DBDC_IDX BIT(6) +#define MT_CHFREQ_SEQ GENMASK(5, 0) + +#define MT7615_BAR_RATE_DEFAULT 0x4b /* OFDM 6M */ +#define MT7615_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */ +#define MT7615_CFEND_RATE_11B 0x03 /* 11B LP, 11M */ + +struct mt7615_vif; +struct mt7615_sta; +struct mt7615_dfs_pulse; +struct mt7615_dfs_pattern; +enum mt7615_cipher_type; + +enum mt7615_hw_txq_id { + MT7615_TXQ_MAIN, + MT7615_TXQ_EXT, + MT7615_TXQ_MCU, + MT7615_TXQ_FWDL, +}; + +enum mt7622_hw_txq_id { + MT7622_TXQ_AC0, + MT7622_TXQ_AC1, + MT7622_TXQ_AC2, + MT7622_TXQ_FWDL = MT7615_TXQ_FWDL, + MT7622_TXQ_AC3, + MT7622_TXQ_MGMT, + MT7622_TXQ_MCU = 15, +}; + +struct mt7615_rate_set { + struct ieee80211_tx_rate probe_rate; + struct ieee80211_tx_rate rates[4]; +}; + +struct mt7615_rate_desc { + bool rateset; + u16 probe_val; + u16 val[4]; + u8 bw_idx; + u8 bw; +}; + +struct mt7615_wtbl_rate_desc { + struct list_head node; + + struct mt7615_rate_desc rate; + struct mt7615_sta *sta; +}; + +struct mt7663s_intr { + u32 isr; + struct { + u32 wtqcr[8]; + } tx; + struct { + u16 num[2]; + u16 len[2][16]; + } rx; + u32 rec_mb[2]; +} __packed; + +struct mt7615_sta { + struct mt76_wcid wcid; /* must be first */ + + struct mt7615_vif *vif; + + struct list_head poll_list; + u32 airtime_ac[8]; + + struct ieee80211_tx_rate rates[4]; + + struct mt7615_rate_set rateset[2]; + u32 rate_set_tsf; + + u8 rate_count; + u8 n_rates; + + u8 rate_probe; +}; + +struct mt7615_vif { + struct mt76_vif mt76; /* must be first */ + struct mt7615_sta sta; + bool sta_added; +}; + +struct mib_stats { + u32 ack_fail_cnt; + u32 fcs_err_cnt; + u32 rts_cnt; + u32 rts_retries_cnt; + u32 ba_miss_cnt; + unsigned long aggr_per; +}; + +struct mt7615_phy { + struct mt76_phy *mt76; + struct mt7615_dev *dev; + + struct ieee80211_vif *monitor_vif; + + u8 n_beacon_vif; + + u32 rxfilter; + u64 omac_mask; + + u16 noise; + + bool scs_en; + + unsigned long last_cca_adj; + int false_cca_ofdm, false_cca_cck; + s8 ofdm_sensitivity; + s8 cck_sensitivity; + + s16 coverage_class; + u8 slottime; + + u8 chfreq; + u8 rdd_state; + + u32 rx_ampdu_ts; + u32 ampdu_ref; + + struct mib_stats mib; + + struct sk_buff_head scan_event_list; + struct delayed_work scan_work; + + struct work_struct roc_work; + struct timer_list roc_timer; + wait_queue_head_t roc_wait; + bool roc_grant; + +#ifdef CONFIG_NL80211_TESTMODE + struct { + u32 *reg_backup; + + s16 last_freq_offset; + u8 last_rcpi[4]; + s8 last_ib_rssi[4]; + s8 last_wb_rssi[4]; + } test; +#endif +}; + +#define mt7615_mcu_add_tx_ba(dev, ...) (dev)->mcu_ops->add_tx_ba((dev), __VA_ARGS__) +#define mt7615_mcu_add_rx_ba(dev, ...) (dev)->mcu_ops->add_rx_ba((dev), __VA_ARGS__) +#define mt7615_mcu_sta_add(phy, ...) ((phy)->dev)->mcu_ops->sta_add((phy), __VA_ARGS__) +#define mt7615_mcu_add_dev_info(phy, ...) ((phy)->dev)->mcu_ops->add_dev_info((phy), __VA_ARGS__) +#define mt7615_mcu_add_bss_info(phy, ...) ((phy)->dev)->mcu_ops->add_bss_info((phy), __VA_ARGS__) +#define mt7615_mcu_add_beacon(dev, ...) (dev)->mcu_ops->add_beacon_offload((dev), __VA_ARGS__) +#define mt7615_mcu_set_pm(dev, ...) (dev)->mcu_ops->set_pm_state((dev), __VA_ARGS__) +#define mt7615_mcu_set_drv_ctrl(dev) (dev)->mcu_ops->set_drv_ctrl((dev)) +#define mt7615_mcu_set_fw_ctrl(dev) (dev)->mcu_ops->set_fw_ctrl((dev)) +#define mt7615_mcu_set_sta_decap_offload(dev, ...) (dev)->mcu_ops->set_sta_decap_offload((dev), __VA_ARGS__) +struct mt7615_mcu_ops { + int (*add_tx_ba)(struct mt7615_dev *dev, + struct ieee80211_ampdu_params *params, + bool enable); + int (*add_rx_ba)(struct mt7615_dev *dev, + struct ieee80211_ampdu_params *params, + bool enable); + int (*sta_add)(struct mt7615_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, bool enable); + int (*add_dev_info)(struct mt7615_phy *phy, struct ieee80211_vif *vif, + bool enable); + int (*add_bss_info)(struct mt7615_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, bool enable); + int (*add_beacon_offload)(struct mt7615_dev *dev, + struct ieee80211_hw *hw, + struct ieee80211_vif *vif, bool enable); + int (*set_pm_state)(struct mt7615_dev *dev, int band, int state); + int (*set_drv_ctrl)(struct mt7615_dev *dev); + int (*set_fw_ctrl)(struct mt7615_dev *dev); + int (*set_sta_decap_offload)(struct mt7615_dev *dev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +}; + +struct mt7615_dev { + union { /* must be first */ + struct mt76_dev mt76; + struct mt76_phy mphy; + }; + + const struct mt76_bus_ops *bus_ops; + struct tasklet_struct irq_tasklet; + + struct mt7615_phy phy; + u64 omac_mask; + + u16 chainmask; + + struct ieee80211_ops *ops; + const struct mt7615_mcu_ops *mcu_ops; + struct regmap *infracfg; + const u32 *reg_map; + + struct work_struct mcu_work; + + struct work_struct reset_work; + wait_queue_head_t reset_wait; + u32 reset_state; + + struct list_head sta_poll_list; + spinlock_t sta_poll_lock; + + struct { + u8 n_pulses; + u32 period; + u16 width; + s16 power; + } radar_pattern; + u32 hw_pattern; + + bool fw_debug; + bool flash_eeprom; + bool dbdc_support; + + u8 fw_ver; + + struct work_struct rate_work; + struct list_head wrd_head; + + u32 debugfs_rf_wf; + u32 debugfs_rf_reg; + + u32 muar_mask; + + struct mt76_connac_pm pm; + struct mt76_connac_coredump coredump; +}; + +enum tx_pkt_queue_idx { + MT_LMAC_AC00, + MT_LMAC_AC01, + MT_LMAC_AC02, + MT_LMAC_AC03, + MT_LMAC_ALTX0 = 0x10, + MT_LMAC_BMC0, + MT_LMAC_BCN0, + MT_LMAC_PSMP0, + MT_LMAC_ALTX1, + MT_LMAC_BMC1, + MT_LMAC_BCN1, + MT_LMAC_PSMP1, +}; + +enum { + MT_RX_SEL0, + MT_RX_SEL1, +}; + +enum mt7615_rdd_cmd { + RDD_STOP, + RDD_START, + RDD_DET_MODE, + RDD_DET_STOP, + RDD_CAC_START, + RDD_CAC_END, + RDD_NORMAL_START, + RDD_DISABLE_DFS_CAL, + RDD_PULSE_DBG, + RDD_READ_PULSE, + RDD_RESUME_BF, +}; + +static inline struct mt7615_phy * +mt7615_hw_phy(struct ieee80211_hw *hw) +{ + struct mt76_phy *phy = hw->priv; + + return phy->priv; +} + +static inline struct mt7615_dev * +mt7615_hw_dev(struct ieee80211_hw *hw) +{ + struct mt76_phy *phy = hw->priv; + + return container_of(phy->dev, struct mt7615_dev, mt76); +} + +static inline struct mt7615_phy * +mt7615_ext_phy(struct mt7615_dev *dev) +{ + struct mt76_phy *phy = dev->mt76.phys[MT_BAND1]; + + if (!phy) + return NULL; + + return phy->priv; +} + +extern struct ieee80211_rate mt7615_rates[12]; +extern const struct ieee80211_ops mt7615_ops; +extern const u32 mt7615e_reg_map[__MT_BASE_MAX]; +extern const u32 mt7663e_reg_map[__MT_BASE_MAX]; +extern const u32 mt7663_usb_sdio_reg_map[__MT_BASE_MAX]; +extern struct pci_driver mt7615_pci_driver; +extern struct platform_driver mt7622_wmac_driver; +extern const struct mt76_testmode_ops mt7615_testmode_ops; + +#ifdef CONFIG_MT7622_WMAC +int mt7622_wmac_init(struct mt7615_dev *dev); +#else +static inline int mt7622_wmac_init(struct mt7615_dev *dev) +{ + return 0; +} +#endif + +int mt7615_thermal_init(struct mt7615_dev *dev); +int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base, + int irq, const u32 *map); +u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr); + +void mt7615_init_device(struct mt7615_dev *dev); +int mt7615_register_device(struct mt7615_dev *dev); +void mt7615_unregister_device(struct mt7615_dev *dev); +int mt7615_register_ext_phy(struct mt7615_dev *dev); +void mt7615_unregister_ext_phy(struct mt7615_dev *dev); +int mt7615_eeprom_init(struct mt7615_dev *dev, u32 addr); +int mt7615_eeprom_get_target_power_index(struct mt7615_dev *dev, + struct ieee80211_channel *chan, + u8 chain_idx); +int mt7615_eeprom_get_power_delta_index(struct mt7615_dev *dev, + enum nl80211_band band); +int mt7615_wait_pdma_busy(struct mt7615_dev *dev); +int mt7615_dma_init(struct mt7615_dev *dev); +void mt7615_dma_start(struct mt7615_dev *dev); +void mt7615_dma_cleanup(struct mt7615_dev *dev); +int mt7615_mcu_init(struct mt7615_dev *dev); +bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev); +void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, + struct ieee80211_tx_rate *probe_rate, + struct ieee80211_tx_rate *rates); +void mt7615_pm_wake_work(struct work_struct *work); +void mt7615_pm_power_save_work(struct work_struct *work); +int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev); +int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd); +int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue, + const struct ieee80211_tx_queue_params *params); +void mt7615_mcu_rx_event(struct mt7615_dev *dev, struct sk_buff *skb); +int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev); +int mt7615_mcu_fw_log_2_host(struct mt7615_dev *dev, u8 ctrl); + +static inline void mt7615_irq_enable(struct mt7615_dev *dev, u32 mask) +{ + mt76_set_irq_mask(&dev->mt76, 0, 0, mask); + + tasklet_schedule(&dev->irq_tasklet); +} + +static inline bool mt7615_firmware_offload(struct mt7615_dev *dev) +{ + return dev->fw_ver > MT7615_FIRMWARE_V2; +} + +static inline u16 mt7615_wtbl_size(struct mt7615_dev *dev) +{ + if (is_mt7663(&dev->mt76) && mt7615_firmware_offload(dev)) + return MT7663_WTBL_SIZE; + else + return MT7615_WTBL_SIZE; +} + +#define mt7615_mutex_acquire(dev) \ + mt76_connac_mutex_acquire(&(dev)->mt76, &(dev)->pm) +#define mt7615_mutex_release(dev) \ + mt76_connac_mutex_release(&(dev)->mt76, &(dev)->pm) + +static inline u8 mt7615_lmac_mapping(struct mt7615_dev *dev, u8 ac) +{ + static const u8 lmac_queue_map[] = { + [IEEE80211_AC_BK] = MT_LMAC_AC00, + [IEEE80211_AC_BE] = MT_LMAC_AC01, + [IEEE80211_AC_VI] = MT_LMAC_AC02, + [IEEE80211_AC_VO] = MT_LMAC_AC03, + }; + + if (WARN_ON_ONCE(ac >= ARRAY_SIZE(lmac_queue_map))) + return MT_LMAC_AC01; /* BE */ + + return lmac_queue_map[ac]; +} + +static inline u32 mt7615_tx_mcu_int_mask(struct mt7615_dev *dev) +{ + return MT_INT_TX_DONE(dev->mt76.q_mcu[MT_MCUQ_WM]->hw_idx); +} + +static inline unsigned long +mt7615_get_macwork_timeout(struct mt7615_dev *dev) +{ + return dev->pm.enable ? HZ / 3 : HZ / 10; +} + +void mt7615_dma_reset(struct mt7615_dev *dev); +void mt7615_scan_work(struct work_struct *work); +void mt7615_roc_work(struct work_struct *work); +void mt7615_roc_timer(struct timer_list *timer); +void mt7615_init_txpower(struct mt7615_dev *dev, + struct ieee80211_supported_band *sband); +int mt7615_set_channel(struct mt7615_phy *phy); +void mt7615_init_work(struct mt7615_dev *dev); + +int mt7615_mcu_restart(struct mt76_dev *dev); +void mt7615_update_channel(struct mt76_phy *mphy); +bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask); +void mt7615_mac_reset_counters(struct mt7615_dev *dev); +void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy); +void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable); +void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy); +void mt7615_mac_sta_poll(struct mt7615_dev *dev); +int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, + struct sk_buff *skb, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, int pid, + struct ieee80211_key_conf *key, + enum mt76_txq_id qid, bool beacon); +void mt7615_mac_set_timing(struct mt7615_phy *phy); +int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, + struct mt76_wcid *wcid, + struct ieee80211_key_conf *key); +int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, struct mt76_wcid *wcid, + struct ieee80211_key_conf *key); +void mt7615_mac_reset_work(struct work_struct *work); +u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid); + +int mt7615_mcu_parse_response(struct mt76_dev *mdev, int cmd, + struct sk_buff *skb, int seq); +u32 mt7615_rf_rr(struct mt7615_dev *dev, u32 wf, u32 reg); +int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val); +int mt7615_mcu_set_dbdc(struct mt7615_dev *dev); +int mt7615_mcu_set_eeprom(struct mt7615_dev *dev); +int mt7615_mcu_get_temperature(struct mt7615_dev *dev); +int mt7615_mcu_set_tx_power(struct mt7615_phy *phy); +void mt7615_mcu_exit(struct mt7615_dev *dev); +void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb, + int cmd, int *wait_seq); + +int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, + enum mt76_txq_id qid, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, + struct mt76_tx_info *tx_info); + +void mt7615_tx_worker(struct mt76_worker *w); +void mt7615_tx_token_put(struct mt7615_dev *dev); +bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len); +void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, + struct sk_buff *skb); +void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps); +int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +void mt7615_mac_work(struct work_struct *work); +int mt7615_mcu_set_rx_hdr_trans_blacklist(struct mt7615_dev *dev); +int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val); +int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev, + const struct mt7615_dfs_pulse *pulse); +int mt7615_mcu_set_radar_th(struct mt7615_dev *dev, int index, + const struct mt7615_dfs_pattern *pattern); +int mt7615_mcu_set_test_param(struct mt7615_dev *dev, u8 param, bool test_mode, + u32 val); +int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable); +int mt7615_mcu_apply_rx_dcoc(struct mt7615_phy *phy); +int mt7615_mcu_apply_tx_dpd(struct mt7615_phy *phy); +int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy); + +int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_channel *chan, int duration); + +int mt7615_init_debugfs(struct mt7615_dev *dev); +int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq); + +int mt7615_mac_set_beacon_filter(struct mt7615_phy *phy, + struct ieee80211_vif *vif, + bool enable); +int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif, + bool enable); +int __mt7663_load_firmware(struct mt7615_dev *dev); +void mt7615_coredump_work(struct work_struct *work); + +void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en); + +/* usb */ +int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, + enum mt76_txq_id qid, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, + struct mt76_tx_info *tx_info); +bool mt7663_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update); +void mt7663_usb_sdio_tx_complete_skb(struct mt76_dev *mdev, + struct mt76_queue_entry *e); +int mt7663_usb_sdio_register_device(struct mt7615_dev *dev); +int mt7663u_mcu_init(struct mt7615_dev *dev); +int mt7663u_mcu_power_on(struct mt7615_dev *dev); + +/* sdio */ +int mt7663s_mcu_init(struct mt7615_dev *dev); + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h new file mode 100644 index 000000000..d3eb49d83 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (C) 2019 Lorenzo Bianconi + */ + +#if !defined(__MT7615_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define __MT7615_TRACE_H + +#include +#include "mt7615.h" + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mt7615 + +#define MAXNAME 32 +#define DEV_ENTRY __array(char, wiphy_name, 32) +#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \ + wiphy_name(mt76_hw(dev)->wiphy), MAXNAME) +#define DEV_PR_FMT "%s" +#define DEV_PR_ARG __entry->wiphy_name + +#define TOKEN_ENTRY __field(u16, token) +#define TOKEN_ASSIGN __entry->token = token +#define TOKEN_PR_FMT " %d" +#define TOKEN_PR_ARG __entry->token + +DECLARE_EVENT_CLASS(dev_token, + TP_PROTO(struct mt7615_dev *dev, u16 token), + TP_ARGS(dev, token), + TP_STRUCT__entry( + DEV_ENTRY + TOKEN_ENTRY + ), + TP_fast_assign( + DEV_ASSIGN; + TOKEN_ASSIGN; + ), + TP_printk( + DEV_PR_FMT TOKEN_PR_FMT, + DEV_PR_ARG, TOKEN_PR_ARG + ) +); + +DEFINE_EVENT(dev_token, mac_tx_free, + TP_PROTO(struct mt7615_dev *dev, u16 token), + TP_ARGS(dev, token) +); + +#endif + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE mt7615_trace + +#include diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c new file mode 100644 index 000000000..b80824894 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2019 MediaTek Inc. + * + * Author: Ryder Lee + * Felix Fietkau + */ + +#include +#include +#include + +#include "mt7615.h" +#include "mcu.h" + +static const struct pci_device_id mt7615_pci_device_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7615) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7663) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7611) }, + { }, +}; + +static int mt7615_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + const u32 *map; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); + if (ret) + return ret; + + pci_set_master(pdev); + + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); + if (ret < 0) + return ret; + + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) + goto error; + + mt76_pci_disable_aspm(pdev); + + map = id->device == 0x7663 ? mt7663e_reg_map : mt7615e_reg_map; + ret = mt7615_mmio_probe(&pdev->dev, pcim_iomap_table(pdev)[0], + pdev->irq, map); + if (ret) + goto error; + + return 0; +error: + pci_free_irq_vectors(pdev); + + return ret; +} + +static void mt7615_pci_remove(struct pci_dev *pdev) +{ + struct mt76_dev *mdev = pci_get_drvdata(pdev); + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + + mt7615_unregister_device(dev); + devm_free_irq(&pdev->dev, pdev->irq, dev); + pci_free_irq_vectors(pdev); +} + +#ifdef CONFIG_PM +static int mt7615_pci_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct mt76_dev *mdev = pci_get_drvdata(pdev); + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + bool hif_suspend; + int i, err; + + err = mt76_connac_pm_wake(&dev->mphy, &dev->pm); + if (err < 0) + return err; + + hif_suspend = !test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) && + mt7615_firmware_offload(dev); + if (hif_suspend) { + err = mt76_connac_mcu_set_hif_suspend(mdev, true); + if (err) + return err; + } + + napi_disable(&mdev->tx_napi); + mt76_worker_disable(&mdev->tx_worker); + + mt76_for_each_q_rx(mdev, i) { + napi_disable(&mdev->napi[i]); + } + tasklet_kill(&dev->irq_tasklet); + + mt7615_dma_reset(dev); + + err = mt7615_wait_pdma_busy(dev); + if (err) + goto restore; + + if (is_mt7663(mdev)) { + mt76_set(dev, MT_PDMA_SLP_PROT, MT_PDMA_AXI_SLPPROT_ENABLE); + if (!mt76_poll_msec(dev, MT_PDMA_SLP_PROT, + MT_PDMA_AXI_SLPPROT_RDY, + MT_PDMA_AXI_SLPPROT_RDY, 1000)) { + dev_err(mdev->dev, "PDMA sleep protection failed\n"); + err = -EIO; + goto restore; + } + } + + pci_enable_wake(pdev, pci_choose_state(pdev, state), true); + pci_save_state(pdev); + err = pci_set_power_state(pdev, pci_choose_state(pdev, state)); + if (err) + goto restore; + + err = mt7615_mcu_set_fw_ctrl(dev); + if (err) + goto restore; + + return 0; + +restore: + mt76_for_each_q_rx(mdev, i) { + napi_enable(&mdev->napi[i]); + } + napi_enable(&mdev->tx_napi); + if (hif_suspend) + mt76_connac_mcu_set_hif_suspend(mdev, false); + + return err; +} + +static int mt7615_pci_resume(struct pci_dev *pdev) +{ + struct mt76_dev *mdev = pci_get_drvdata(pdev); + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + bool pdma_reset; + int i, err; + + err = mt7615_mcu_set_drv_ctrl(dev); + if (err < 0) + return err; + + err = pci_set_power_state(pdev, PCI_D0); + if (err) + return err; + + pci_restore_state(pdev); + + if (is_mt7663(&dev->mt76)) { + mt76_clear(dev, MT_PDMA_SLP_PROT, MT_PDMA_AXI_SLPPROT_ENABLE); + mt76_wr(dev, MT_PCIE_IRQ_ENABLE, 1); + } + + pdma_reset = !mt76_rr(dev, MT_WPDMA_TX_RING0_CTRL0) && + !mt76_rr(dev, MT_WPDMA_TX_RING0_CTRL1); + if (pdma_reset) + dev_err(mdev->dev, "PDMA engine must be reinitialized\n"); + + mt76_worker_enable(&mdev->tx_worker); + local_bh_disable(); + mt76_for_each_q_rx(mdev, i) { + napi_enable(&mdev->napi[i]); + napi_schedule(&mdev->napi[i]); + } + napi_enable(&mdev->tx_napi); + napi_schedule(&mdev->tx_napi); + local_bh_enable(); + + if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) && + mt7615_firmware_offload(dev)) + err = mt76_connac_mcu_set_hif_suspend(mdev, false); + + return err; +} +#endif /* CONFIG_PM */ + +struct pci_driver mt7615_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = mt7615_pci_device_table, + .probe = mt7615_pci_probe, + .remove = mt7615_pci_remove, +#ifdef CONFIG_PM + .suspend = mt7615_pci_suspend, + .resume = mt7615_pci_resume, +#endif /* CONFIG_PM */ +}; + +MODULE_DEVICE_TABLE(pci, mt7615_pci_device_table); +MODULE_FIRMWARE(MT7615_FIRMWARE_CR4); +MODULE_FIRMWARE(MT7615_FIRMWARE_N9); +MODULE_FIRMWARE(MT7615_ROM_PATCH); +MODULE_FIRMWARE(MT7663_OFFLOAD_FIRMWARE_N9); +MODULE_FIRMWARE(MT7663_OFFLOAD_ROM_PATCH); +MODULE_FIRMWARE(MT7663_FIRMWARE_N9); +MODULE_FIRMWARE(MT7663_ROM_PATCH); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c new file mode 100644 index 000000000..87b4aa52e --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2019 MediaTek Inc. + * + * Author: Roy Luo + * Ryder Lee + * Felix Fietkau + * Lorenzo Bianconi + */ + +#include +#include "mt7615.h" +#include "mac.h" +#include "eeprom.h" + +static void mt7615_pci_init_work(struct work_struct *work) +{ + struct mt7615_dev *dev = container_of(work, struct mt7615_dev, + mcu_work); + int i, ret; + + ret = mt7615_mcu_init(dev); + for (i = 0; (ret == -EAGAIN) && (i < 10); i++) { + msleep(200); + ret = mt7615_mcu_init(dev); + } + + if (ret) + return; + + mt7615_init_work(dev); +} + +static int mt7615_init_hardware(struct mt7615_dev *dev) +{ + u32 addr = mt7615_reg_map(dev, MT_EFUSE_BASE); + int ret, idx; + + mt76_wr(dev, MT_INT_SOURCE_CSR, ~0); + + INIT_WORK(&dev->mcu_work, mt7615_pci_init_work); + ret = mt7615_eeprom_init(dev, addr); + if (ret < 0) + return ret; + + if (is_mt7663(&dev->mt76)) { + /* Reset RGU */ + mt76_clear(dev, MT_MCU_CIRQ_IRQ_SEL(4), BIT(1)); + mt76_set(dev, MT_MCU_CIRQ_IRQ_SEL(4), BIT(1)); + } + + ret = mt7615_dma_init(dev); + if (ret) + return ret; + + set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state); + + /* Beacon and mgmt frames should occupy wcid 0 */ + idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1); + if (idx) + return -ENOSPC; + + dev->mt76.global_wcid.idx = idx; + dev->mt76.global_wcid.hw_key_idx = -1; + rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid); + + return 0; +} + +static void +mt7615_led_set_config(struct led_classdev *led_cdev, + u8 delay_on, u8 delay_off) +{ + struct mt7615_dev *dev; + struct mt76_dev *mt76; + u32 val, addr; + + mt76 = container_of(led_cdev, struct mt76_dev, led_cdev); + dev = container_of(mt76, struct mt7615_dev, mt76); + + if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) + return; + + val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) | + FIELD_PREP(MT_LED_STATUS_OFF, delay_off) | + FIELD_PREP(MT_LED_STATUS_ON, delay_on); + + addr = mt7615_reg_map(dev, MT_LED_STATUS_0(mt76->led_pin)); + mt76_wr(dev, addr, val); + addr = mt7615_reg_map(dev, MT_LED_STATUS_1(mt76->led_pin)); + mt76_wr(dev, addr, val); + + val = MT_LED_CTRL_REPLAY(mt76->led_pin) | + MT_LED_CTRL_KICK(mt76->led_pin); + if (mt76->led_al) + val |= MT_LED_CTRL_POLARITY(mt76->led_pin); + addr = mt7615_reg_map(dev, MT_LED_CTRL); + mt76_wr(dev, addr, val); + + mt76_connac_pm_unref(&dev->mphy, &dev->pm); +} + +static int +mt7615_led_set_blink(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off) +{ + u8 delta_on, delta_off; + + delta_off = max_t(u8, *delay_off / 10, 1); + delta_on = max_t(u8, *delay_on / 10, 1); + + mt7615_led_set_config(led_cdev, delta_on, delta_off); + + return 0; +} + +static void +mt7615_led_set_brightness(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + if (!brightness) + mt7615_led_set_config(led_cdev, 0, 0xff); + else + mt7615_led_set_config(led_cdev, 0xff, 0); +} + +int mt7615_register_device(struct mt7615_dev *dev) +{ + int ret; + + mt7615_init_device(dev); + INIT_WORK(&dev->reset_work, mt7615_mac_reset_work); + + /* init led callbacks */ + if (IS_ENABLED(CONFIG_MT76_LEDS)) { + dev->mt76.led_cdev.brightness_set = mt7615_led_set_brightness; + dev->mt76.led_cdev.blink_set = mt7615_led_set_blink; + } + + ret = mt7622_wmac_init(dev); + if (ret) + return ret; + + ret = mt7615_init_hardware(dev); + if (ret) + return ret; + + ret = mt76_register_device(&dev->mt76, true, mt76_rates, + ARRAY_SIZE(mt76_rates)); + if (ret) + return ret; + + ret = mt7615_thermal_init(dev); + if (ret) + return ret; + + ieee80211_queue_work(mt76_hw(dev), &dev->mcu_work); + mt7615_init_txpower(dev, &dev->mphy.sband_2g.sband); + mt7615_init_txpower(dev, &dev->mphy.sband_5g.sband); + + if (dev->dbdc_support) { + ret = mt7615_register_ext_phy(dev); + if (ret) + return ret; + } + + return mt7615_init_debugfs(dev); +} + +void mt7615_unregister_device(struct mt7615_dev *dev) +{ + bool mcu_running; + + mcu_running = mt7615_wait_for_mcu_init(dev); + + mt7615_unregister_ext_phy(dev); + mt76_unregister_device(&dev->mt76); + if (mcu_running) + mt7615_mcu_exit(dev); + + mt7615_tx_token_put(dev); + mt7615_dma_cleanup(dev); + tasklet_disable(&dev->irq_tasklet); + + mt76_free_device(&dev->mt76); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c new file mode 100644 index 000000000..0019890fd --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c @@ -0,0 +1,293 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2020 MediaTek Inc. + * + * Author: Ryder Lee + * Roy Luo + * Felix Fietkau + * Lorenzo Bianconi + */ + +#include +#include + +#include "mt7615.h" +#include "../dma.h" +#include "mac.h" + +static void +mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info, + void *txp_ptr, u32 id) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); + struct ieee80211_key_conf *key = info->control.hw_key; + struct ieee80211_vif *vif = info->control.vif; + struct mt76_connac_fw_txp *txp = txp_ptr; + u8 *rept_wds_wcid = (u8 *)&txp->rept_wds_wcid; + int nbuf = tx_info->nbuf - 1; + int i; + + for (i = 0; i < nbuf; i++) { + txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr); + txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len); + } + txp->nbuf = nbuf; + + /* pass partial skb header to fw */ + tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp); + tx_info->buf[1].len = MT_CT_PARSE_LEN; + tx_info->buf[1].skip_unmap = true; + tx_info->nbuf = MT_CT_DMA_BUF_NUM; + + txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD); + + if (!key) + txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME); + + if (ieee80211_is_mgmt(hdr->frame_control)) + txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME); + + if (vif) { + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + + txp->bss_idx = mvif->idx; + } + + txp->token = cpu_to_le16(id); + *rept_wds_wcid = 0xff; +} + +int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, + enum mt76_txq_id qid, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, + struct mt76_tx_info *tx_info) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); + struct ieee80211_key_conf *key = info->control.hw_key; + int pid, id; + u8 *txwi = (u8 *)txwi_ptr; + struct mt76_txwi_cache *t; + struct mt7615_sta *msta; + void *txp; + + msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL; + if (!wcid) + wcid = &dev->mt76.global_wcid; + + if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && msta) { + struct mt7615_phy *phy = &dev->phy; + u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2; + + if (phy_idx && mdev->phys[MT_BAND1]) + phy = mdev->phys[MT_BAND1]->priv; + + spin_lock_bh(&dev->mt76.lock); + mt7615_mac_set_rates(phy, msta, &info->control.rates[0], + msta->rates); + spin_unlock_bh(&dev->mt76.lock); + } + + t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); + t->skb = tx_info->skb; + + id = mt76_token_get(mdev, &t); + if (id < 0) + return id; + + pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); + mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta, + pid, key, qid, false); + + txp = txwi + MT_TXD_SIZE; + memset(txp, 0, sizeof(struct mt76_connac_txp_common)); + if (is_mt7615(&dev->mt76)) + mt7615_write_fw_txp(dev, tx_info, txp, id); + else + mt76_connac_write_hw_txp(mdev, tx_info, txp, id); + + tx_info->skb = DMA_DUMMY_DATA; + + return 0; +} + +void mt7615_dma_reset(struct mt7615_dev *dev) +{ + int i; + + mt76_clear(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN | + MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); + + usleep_range(1000, 2000); + + for (i = 0; i < __MT_TXQ_MAX; i++) + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); + + for (i = 0; i < __MT_MCUQ_MAX; i++) + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true); + + mt76_for_each_q_rx(&dev->mt76, i) + mt76_queue_rx_reset(dev, i); + + mt76_tx_status_check(&dev->mt76, true); + + mt7615_dma_start(dev); +} +EXPORT_SYMBOL_GPL(mt7615_dma_reset); + +static void +mt7615_hif_int_event_trigger(struct mt7615_dev *dev, u8 event) +{ + u32 reg = MT_MCU_INT_EVENT; + + if (is_mt7663(&dev->mt76)) + reg = MT7663_MCU_INT_EVENT; + + mt76_wr(dev, reg, event); + + mt7622_trigger_hif_int(dev, true); + mt7622_trigger_hif_int(dev, false); +} + +static bool +mt7615_wait_reset_state(struct mt7615_dev *dev, u32 state) +{ + bool ret; + + ret = wait_event_timeout(dev->reset_wait, + (READ_ONCE(dev->reset_state) & state), + MT7615_RESET_TIMEOUT); + WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); + return ret; +} + +static void +mt7615_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + struct ieee80211_hw *hw = priv; + struct mt7615_dev *dev = mt7615_hw_dev(hw); + + switch (vif->type) { + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_AP: + mt7615_mcu_add_beacon(dev, hw, vif, + vif->bss_conf.enable_beacon); + break; + default: + break; + } +} + +static void +mt7615_update_beacons(struct mt7615_dev *dev) +{ + struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1]; + + ieee80211_iterate_active_interfaces(dev->mt76.hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7615_update_vif_beacon, dev->mt76.hw); + + if (!mphy_ext) + return; + + ieee80211_iterate_active_interfaces(mphy_ext->hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7615_update_vif_beacon, mphy_ext->hw); +} + +void mt7615_mac_reset_work(struct work_struct *work) +{ + struct mt7615_phy *phy2; + struct mt76_phy *ext_phy; + struct mt7615_dev *dev; + unsigned long timeout; + int i; + + dev = container_of(work, struct mt7615_dev, reset_work); + ext_phy = dev->mt76.phys[MT_BAND1]; + phy2 = ext_phy ? ext_phy->priv : NULL; + + if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_PDMA)) + return; + + ieee80211_stop_queues(mt76_hw(dev)); + if (ext_phy) + ieee80211_stop_queues(ext_phy->hw); + + set_bit(MT76_RESET, &dev->mphy.state); + set_bit(MT76_MCU_RESET, &dev->mphy.state); + wake_up(&dev->mt76.mcu.wait); + cancel_delayed_work_sync(&dev->mphy.mac_work); + del_timer_sync(&dev->phy.roc_timer); + cancel_work_sync(&dev->phy.roc_work); + if (phy2) { + set_bit(MT76_RESET, &phy2->mt76->state); + cancel_delayed_work_sync(&phy2->mt76->mac_work); + del_timer_sync(&phy2->roc_timer); + cancel_work_sync(&phy2->roc_work); + } + + /* lock/unlock all queues to ensure that no tx is pending */ + mt76_txq_schedule_all(&dev->mphy); + if (ext_phy) + mt76_txq_schedule_all(ext_phy); + + mt76_worker_disable(&dev->mt76.tx_worker); + mt76_for_each_q_rx(&dev->mt76, i) + napi_disable(&dev->mt76.napi[i]); + napi_disable(&dev->mt76.tx_napi); + + mt7615_mutex_acquire(dev); + + mt7615_hif_int_event_trigger(dev, MT_MCU_INT_EVENT_PDMA_STOPPED); + + if (mt7615_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { + mt7615_dma_reset(dev); + + mt7615_tx_token_put(dev); + idr_init(&dev->mt76.token); + + mt76_wr(dev, MT_WPDMA_MEM_RNG_ERR, 0); + + mt7615_hif_int_event_trigger(dev, MT_MCU_INT_EVENT_PDMA_INIT); + mt7615_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); + } + + clear_bit(MT76_MCU_RESET, &dev->mphy.state); + clear_bit(MT76_RESET, &dev->mphy.state); + if (phy2) + clear_bit(MT76_RESET, &phy2->mt76->state); + + mt76_worker_enable(&dev->mt76.tx_worker); + + local_bh_disable(); + napi_enable(&dev->mt76.tx_napi); + napi_schedule(&dev->mt76.tx_napi); + + mt76_for_each_q_rx(&dev->mt76, i) { + napi_enable(&dev->mt76.napi[i]); + napi_schedule(&dev->mt76.napi[i]); + } + local_bh_enable(); + + ieee80211_wake_queues(mt76_hw(dev)); + if (ext_phy) + ieee80211_wake_queues(ext_phy->hw); + + mt7615_hif_int_event_trigger(dev, MT_MCU_INT_EVENT_RESET_DONE); + mt7615_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); + + mt7615_update_beacons(dev); + + mt7615_mutex_release(dev); + + timeout = mt7615_get_macwork_timeout(dev); + ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, + timeout); + if (phy2) + ieee80211_queue_delayed_work(ext_phy->hw, + &phy2->mt76->mac_work, timeout); + +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h new file mode 100644 index 000000000..6712ad9fa --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h @@ -0,0 +1,609 @@ +/* SPDX-License-Identifier: ISC */ +/* Copyright (C) 2019 MediaTek Inc. */ + +#ifndef __MT7615_REGS_H +#define __MT7615_REGS_H + +enum mt7615_reg_base { + MT_TOP_CFG_BASE, + MT_HW_BASE, + MT_DMA_SHDL_BASE, + MT_PCIE_REMAP_2, + MT_ARB_BASE, + MT_HIF_BASE, + MT_CSR_BASE, + MT_PLE_BASE, + MT_PSE_BASE, + MT_CFG_BASE, + MT_AGG_BASE, + MT_TMAC_BASE, + MT_RMAC_BASE, + MT_DMA_BASE, + MT_PF_BASE, + MT_WTBL_BASE_ON, + MT_WTBL_BASE_OFF, + MT_LPON_BASE, + MT_MIB_BASE, + MT_WTBL_BASE_ADDR, + MT_PCIE_REMAP_BASE2, + MT_TOP_MISC_BASE, + MT_EFUSE_ADDR_BASE, + MT_PP_BASE, + __MT_BASE_MAX, +}; + +#define MT_HW_INFO_BASE ((dev)->reg_map[MT_HW_BASE]) +#define MT_HW_INFO(ofs) (MT_HW_INFO_BASE + (ofs)) +#define MT_HW_REV MT_HW_INFO(0x000) +#define MT_HW_CHIPID MT_HW_INFO(0x008) +#define MT_TOP_STRAP_STA MT_HW_INFO(0x010) +#define MT_TOP_3NSS BIT(24) + +#define MT_TOP_OFF_RSV 0x1128 +#define MT_TOP_OFF_RSV_FW_STATE GENMASK(18, 16) + +#define MT_TOP_MISC2 ((dev)->reg_map[MT_TOP_CFG_BASE] + 0x134) +#define MT_TOP_MISC2_FW_STATE GENMASK(2, 0) + +#define MT7663_TOP_MISC2_FW_STATE GENMASK(3, 1) +#define MT_TOP_MISC2_FW_PWR_ON BIT(1) + +#define MT_MCU_BASE 0x2000 +#define MT_MCU(ofs) (MT_MCU_BASE + (ofs)) + +#define MT_MCU_PCIE_REMAP_1 MT_MCU(0x500) +#define MT_MCU_PCIE_REMAP_1_OFFSET GENMASK(17, 0) +#define MT_MCU_PCIE_REMAP_1_BASE GENMASK(31, 18) +#define MT_PCIE_REMAP_BASE_1 0x40000 + +#define MT_MCU_PCIE_REMAP_2 ((dev)->reg_map[MT_PCIE_REMAP_2]) +#define MT_MCU_PCIE_REMAP_2_OFFSET GENMASK(18, 0) +#define MT_MCU_PCIE_REMAP_2_BASE GENMASK(31, 19) +#define MT_PCIE_REMAP_BASE_2 ((dev)->reg_map[MT_PCIE_REMAP_BASE2]) + +#define MT_MCU_CIRQ_BASE 0xc0000 +#define MT_MCU_CIRQ(ofs) (MT_MCU_CIRQ_BASE + (ofs)) + +#define MT_MCU_CIRQ_IRQ_SEL(n) MT_MCU_CIRQ((n) << 2) + +#define MT_HIF(ofs) ((dev)->reg_map[MT_HIF_BASE] + (ofs)) +#define MT_HIF_RST MT_HIF(0x100) +#define MT_HIF_LOGIC_RST_N BIT(4) + +#define MT_PDMA_SLP_PROT MT_HIF(0x154) +#define MT_PDMA_AXI_SLPPROT_ENABLE BIT(0) +#define MT_PDMA_AXI_SLPPROT_RDY BIT(16) + +#define MT_PDMA_BUSY_STATUS MT_HIF(0x168) +#define MT_PDMA_TX_IDX_BUSY BIT(2) +#define MT_PDMA_BUSY_IDX BIT(31) + +#define MT_WPDMA_TX_RING0_CTRL0 MT_HIF(0x300) +#define MT_WPDMA_TX_RING0_CTRL1 MT_HIF(0x304) + +#define MT7663_MCU_PCIE_REMAP_2_OFFSET GENMASK(15, 0) +#define MT7663_MCU_PCIE_REMAP_2_BASE GENMASK(31, 16) + +#define MT_HIF2_BASE 0xf0000 +#define MT_HIF2(ofs) (MT_HIF2_BASE + (ofs)) +#define MT_PCIE_IRQ_ENABLE MT_HIF2(0x188) +#define MT_PCIE_DOORBELL_PUSH MT_HIF2(0x1484) + +#define MT_CFG_LPCR_HOST MT_HIF(0x1f0) +#define MT_CFG_LPCR_HOST_FW_OWN BIT(0) +#define MT_CFG_LPCR_HOST_DRV_OWN BIT(1) + +#define MT_MCU2HOST_INT_STATUS MT_HIF(0x1f0) +#define MT_MCU2HOST_INT_ENABLE MT_HIF(0x1f4) + +#define MT7663_MCU_INT_EVENT MT_HIF(0x108) +#define MT_MCU_INT_EVENT MT_HIF(0x1f8) +#define MT_MCU_INT_EVENT_PDMA_STOPPED BIT(0) +#define MT_MCU_INT_EVENT_PDMA_INIT BIT(1) +#define MT_MCU_INT_EVENT_SER_TRIGGER BIT(2) +#define MT_MCU_INT_EVENT_RESET_DONE BIT(3) + +#define MT_INT_SOURCE_CSR MT_HIF(0x200) +#define MT_INT_MASK_CSR MT_HIF(0x204) +#define MT_DELAY_INT_CFG MT_HIF(0x210) + +#define MT_INT_RX_DONE(_n) BIT(_n) +#define MT_INT_RX_DONE_ALL GENMASK(1, 0) +#define MT_INT_TX_DONE_ALL GENMASK(19, 4) +#define MT_INT_TX_DONE(_n) BIT((_n) + 4) +#define MT7663_INT_MCU_CMD BIT(29) +#define MT_INT_MCU_CMD BIT(30) + +#define MT_WPDMA_GLO_CFG MT_HIF(0x208) +#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0) +#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1) +#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2) +#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3) +#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4) +#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6) +#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7) +#define MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT0 BIT(9) +#define MT_WPDMA_GLO_CFG_BYPASS_TX_SCH BIT(9) /* MT7622 */ +#define MT_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10) +#define MT_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12) +#define MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT21 GENMASK(23, 22) +#define MT_WPDMA_GLO_CFG_SW_RESET BIT(24) +#define MT_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY BIT(26) +#define MT_WPDMA_GLO_CFG_OMIT_TX_INFO BIT(28) + +#define MT_WPDMA_RST_IDX MT_HIF(0x20c) + +#define MT_WPDMA_MEM_RNG_ERR MT_HIF(0x224) + +#define MT_MCU_CMD MT_HIF(0x234) +#define MT_MCU_CMD_CLEAR_FW_OWN BIT(0) +#define MT_MCU_CMD_STOP_PDMA_FW_RELOAD BIT(1) +#define MT_MCU_CMD_STOP_PDMA BIT(2) +#define MT_MCU_CMD_RESET_DONE BIT(3) +#define MT_MCU_CMD_RECOVERY_DONE BIT(4) +#define MT_MCU_CMD_NORMAL_STATE BIT(5) +#define MT_MCU_CMD_LMAC_ERROR BIT(24) +#define MT_MCU_CMD_PSE_ERROR BIT(25) +#define MT_MCU_CMD_PLE_ERROR BIT(26) +#define MT_MCU_CMD_PDMA_ERROR BIT(27) +#define MT_MCU_CMD_PCIE_ERROR BIT(28) +#define MT_MCU_CMD_ERROR_MASK (GENMASK(5, 1) | GENMASK(28, 24)) +#define MT7663_MCU_CMD_ERROR_MASK GENMASK(5, 2) + +#define MT_TX_RING_BASE MT_HIF(0x300) +#define MT_RX_RING_BASE MT_HIF(0x400) + +#define MT_WPDMA_GLO_CFG1 MT_HIF(0x500) +#define MT_WPDMA_TX_PRE_CFG MT_HIF(0x510) +#define MT_WPDMA_RX_PRE_CFG MT_HIF(0x520) +#define MT_WPDMA_ABT_CFG MT_HIF(0x530) +#define MT_WPDMA_ABT_CFG1 MT_HIF(0x534) + +#define MT_CSR(ofs) ((dev)->reg_map[MT_CSR_BASE] + (ofs)) +#define MT_CONN_HIF_ON_LPCTL MT_CSR(0x000) + +#define MT_PLE(ofs) ((dev)->reg_map[MT_PLE_BASE] + (ofs)) + +#define MT_PLE_PG_HIF0_GROUP MT_PLE(0x110) +#define MT_HIF0_MIN_QUOTA GENMASK(11, 0) +#define MT_PLE_FL_Q0_CTRL MT_PLE(0x1b0) +#define MT_PLE_FL_Q1_CTRL MT_PLE(0x1b4) +#define MT_PLE_FL_Q2_CTRL MT_PLE(0x1b8) +#define MT_PLE_FL_Q3_CTRL MT_PLE(0x1bc) + +#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x300 + 0x10 * (ac) + \ + ((n) << 2)) + +#define MT_PSE(ofs) ((dev)->reg_map[MT_PSE_BASE] + (ofs)) +#define MT_PSE_PG_HIF0_GROUP MT_PSE(0x110) +#define MT_HIF0_MIN_QUOTA GENMASK(11, 0) +#define MT_PSE_PG_HIF1_GROUP MT_PSE(0x118) +#define MT_HIF1_MIN_QUOTA GENMASK(11, 0) +#define MT_PSE_QUEUE_EMPTY MT_PSE(0x0b4) +#define MT_HIF_0_EMPTY_MASK BIT(16) +#define MT_HIF_1_EMPTY_MASK BIT(17) +#define MT_HIF_ALL_EMPTY_MASK GENMASK(17, 16) +#define MT_PSE_PG_INFO MT_PSE(0x194) +#define MT_PSE_SRC_CNT GENMASK(27, 16) + +#define MT_PP(ofs) ((dev)->reg_map[MT_PP_BASE] + (ofs)) +#define MT_PP_TXDWCNT MT_PP(0x0) +#define MT_PP_TXDWCNT_TX0_ADD_DW_CNT GENMASK(7, 0) +#define MT_PP_TXDWCNT_TX1_ADD_DW_CNT GENMASK(15, 8) + +#define MT_WF_PHY_BASE 0x82070000 +#define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs)) + +#define MT_WF_PHY_WF2_RFCTRL0(n) MT_WF_PHY(0x1900 + (n) * 0x400) +#define MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN BIT(9) + +#define MT_WF_PHY_R0_PHYMUX_5(_phy) MT_WF_PHY(0x0614 + ((_phy) << 9)) +#define MT7663_WF_PHY_R0_PHYMUX_5 MT_WF_PHY(0x0414) + +#define MT_WF_PHY_R0_PHYCTRL_STS0(_phy) MT_WF_PHY(0x020c + ((_phy) << 9)) +#define MT_WF_PHYCTRL_STAT_PD_OFDM GENMASK(31, 16) +#define MT_WF_PHYCTRL_STAT_PD_CCK GENMASK(15, 0) + +#define MT7663_WF_PHY_R0_PHYCTRL_STS0(_phy) MT_WF_PHY(0x0210 + ((_phy) << 12)) + +#define MT_WF_PHY_R0_PHYCTRL_STS5(_phy) MT_WF_PHY(0x0220 + ((_phy) << 9)) +#define MT_WF_PHYCTRL_STAT_MDRDY_OFDM GENMASK(31, 16) +#define MT_WF_PHYCTRL_STAT_MDRDY_CCK GENMASK(15, 0) + +#define MT7663_WF_PHY_R0_PHYCTRL_STS5(_phy) MT_WF_PHY(0x0224 + ((_phy) << 12)) + +#define MT_WF_PHY_MIN_PRI_PWR(_phy) MT_WF_PHY((_phy) ? 0x084 : 0x229c) +#define MT_WF_PHY_PD_OFDM_MASK(_phy) ((_phy) ? GENMASK(24, 16) : \ + GENMASK(28, 20)) +#define MT_WF_PHY_PD_OFDM(_phy, v) ((v) << ((_phy) ? 16 : 20)) +#define MT_WF_PHY_PD_BLK(_phy) ((_phy) ? BIT(25) : BIT(19)) + +#define MT7663_WF_PHY_MIN_PRI_PWR(_phy) MT_WF_PHY((_phy) ? 0x2aec : 0x22f0) + +#define MT_WF_PHY_RXTD_BASE MT_WF_PHY(0x2200) +#define MT_WF_PHY_RXTD(_n) (MT_WF_PHY_RXTD_BASE + ((_n) << 2)) + +#define MT7663_WF_PHY_RXTD(_n) (MT_WF_PHY(0x25b0) + ((_n) << 2)) + +#define MT_WF_PHY_RXTD_CCK_PD(_phy) MT_WF_PHY((_phy) ? 0x2314 : 0x2310) +#define MT_WF_PHY_PD_CCK_MASK(_phy) (_phy) ? GENMASK(31, 24) : \ + GENMASK(8, 1) +#define MT_WF_PHY_PD_CCK(_phy, v) ((v) << ((_phy) ? 24 : 1)) + +#define MT7663_WF_PHY_RXTD_CCK_PD(_phy) MT_WF_PHY((_phy) ? 0x2350 : 0x234c) + +#define MT_WF_PHY_RXTD2_BASE MT_WF_PHY(0x2a00) +#define MT_WF_PHY_RXTD2(_n) (MT_WF_PHY_RXTD2_BASE + ((_n) << 2)) + +#define MT_WF_PHY_RFINTF3_0(_n) MT_WF_PHY(0x1100 + (_n) * 0x400) +#define MT_WF_PHY_RFINTF3_0_ANT GENMASK(7, 4) + +#define MT_WF_CFG_BASE ((dev)->reg_map[MT_CFG_BASE]) +#define MT_WF_CFG(ofs) (MT_WF_CFG_BASE + (ofs)) + +#define MT_CFG_CCR MT_WF_CFG(0x000) +#define MT_CFG_CCR_MAC_D1_1X_GC_EN BIT(24) +#define MT_CFG_CCR_MAC_D0_1X_GC_EN BIT(25) +#define MT_CFG_CCR_MAC_D1_2X_GC_EN BIT(30) +#define MT_CFG_CCR_MAC_D0_2X_GC_EN BIT(31) + +#define MT_WF_AGG_BASE ((dev)->reg_map[MT_AGG_BASE]) +#define MT_WF_AGG(ofs) (MT_WF_AGG_BASE + (ofs)) + +#define MT_AGG_ARCR MT_WF_AGG(0x010) +#define MT_AGG_ARCR_INIT_RATE1 BIT(0) +#define MT_AGG_ARCR_RTS_RATE_THR GENMASK(12, 8) +#define MT_AGG_ARCR_RATE_DOWN_RATIO GENMASK(17, 16) +#define MT_AGG_ARCR_RATE_DOWN_RATIO_EN BIT(19) +#define MT_AGG_ARCR_RATE_UP_EXTRA_TH GENMASK(22, 20) + +#define MT_AGG_ARUCR(_band) MT_WF_AGG(0x018 + (_band) * 0x100) +#define MT_AGG_ARDCR(_band) MT_WF_AGG(0x01c + (_band) * 0x100) +#define MT_AGG_ARxCR_LIMIT_SHIFT(_n) (4 * (_n)) +#define MT_AGG_ARxCR_LIMIT(_n) GENMASK(2 + \ + MT_AGG_ARxCR_LIMIT_SHIFT(_n), \ + MT_AGG_ARxCR_LIMIT_SHIFT(_n)) + +#define MT_AGG_ASRCR0 MT_WF_AGG(0x060) +#define MT_AGG_ASRCR1 MT_WF_AGG(0x064) +#define MT_AGG_ASRCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(5, 0)) + +#define MT_AGG_ACR(_band) MT_WF_AGG(0x070 + (_band) * 0x100) +#define MT_AGG_ACR_NO_BA_RULE BIT(0) +#define MT_AGG_ACR_NO_BA_AR_RULE BIT(1) +#define MT_AGG_ACR_PKT_TIME_EN BIT(2) +#define MT_AGG_ACR_CFEND_RATE GENMASK(15, 4) +#define MT_AGG_ACR_BAR_RATE GENMASK(31, 20) + +#define MT_AGG_SCR MT_WF_AGG(0x0fc) +#define MT_AGG_SCR_NLNAV_MID_PTEC_DIS BIT(3) + +#define MT_WF_ARB_BASE ((dev)->reg_map[MT_ARB_BASE]) +#define MT_WF_ARB(ofs) (MT_WF_ARB_BASE + (ofs)) + +#define MT_ARB_RQCR MT_WF_ARB(0x070) +#define MT_ARB_RQCR_RX_START BIT(0) +#define MT_ARB_RQCR_RXV_START BIT(4) +#define MT_ARB_RQCR_RXV_R_EN BIT(7) +#define MT_ARB_RQCR_RXV_T_EN BIT(8) +#define MT_ARB_RQCR_BAND_SHIFT 16 + +#define MT_ARB_SCR MT_WF_ARB(0x080) +#define MT_ARB_SCR_TX0_DISABLE BIT(8) +#define MT_ARB_SCR_RX0_DISABLE BIT(9) +#define MT_ARB_SCR_TX1_DISABLE BIT(10) +#define MT_ARB_SCR_RX1_DISABLE BIT(11) + +#define MT_WF_TMAC_BASE ((dev)->reg_map[MT_TMAC_BASE]) +#define MT_WF_TMAC(ofs) (MT_WF_TMAC_BASE + (ofs)) + +#define MT_TMAC_CDTR MT_WF_TMAC(0x090) +#define MT_TMAC_ODTR MT_WF_TMAC(0x094) +#define MT_TIMEOUT_VAL_PLCP GENMASK(15, 0) +#define MT_TIMEOUT_VAL_CCA GENMASK(31, 16) + +#define MT_TMAC_TRCR(_band) MT_WF_TMAC((_band) ? 0x070 : 0x09c) +#define MT_TMAC_TRCR_CCA_SEL GENMASK(31, 30) +#define MT_TMAC_TRCR_SEC_CCA_SEL GENMASK(29, 28) + +#define MT_TMAC_ICR(_band) MT_WF_TMAC((_band) ? 0x074 : 0x0a4) +#define MT_IFS_EIFS GENMASK(8, 0) +#define MT_IFS_RIFS GENMASK(14, 10) +#define MT_IFS_SIFS GENMASK(22, 16) +#define MT_IFS_SLOT GENMASK(30, 24) + +#define MT_TMAC_CTCR0 MT_WF_TMAC(0x0f4) +#define MT_TMAC_CTCR0_INS_DDLMT_REFTIME GENMASK(5, 0) +#define MT_TMAC_CTCR0_INS_DDLMT_DENSITY GENMASK(15, 12) +#define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17) +#define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN BIT(18) + +#define MT_WF_RMAC_BASE ((dev)->reg_map[MT_RMAC_BASE]) +#define MT_WF_RMAC(ofs) (MT_WF_RMAC_BASE + (ofs)) + +#define MT_WF_RFCR(_band) MT_WF_RMAC((_band) ? 0x100 : 0x000) +#define MT_WF_RFCR_DROP_STBC_MULTI BIT(0) +#define MT_WF_RFCR_DROP_FCSFAIL BIT(1) +#define MT_WF_RFCR_DROP_VERSION BIT(3) +#define MT_WF_RFCR_DROP_PROBEREQ BIT(4) +#define MT_WF_RFCR_DROP_MCAST BIT(5) +#define MT_WF_RFCR_DROP_BCAST BIT(6) +#define MT_WF_RFCR_DROP_MCAST_FILTERED BIT(7) +#define MT_WF_RFCR_DROP_A3_MAC BIT(8) +#define MT_WF_RFCR_DROP_A3_BSSID BIT(9) +#define MT_WF_RFCR_DROP_A2_BSSID BIT(10) +#define MT_WF_RFCR_DROP_OTHER_BEACON BIT(11) +#define MT_WF_RFCR_DROP_FRAME_REPORT BIT(12) +#define MT_WF_RFCR_DROP_CTL_RSV BIT(13) +#define MT_WF_RFCR_DROP_CTS BIT(14) +#define MT_WF_RFCR_DROP_RTS BIT(15) +#define MT_WF_RFCR_DROP_DUPLICATE BIT(16) +#define MT_WF_RFCR_DROP_OTHER_BSS BIT(17) +#define MT_WF_RFCR_DROP_OTHER_UC BIT(18) +#define MT_WF_RFCR_DROP_OTHER_TIM BIT(19) +#define MT_WF_RFCR_DROP_NDPA BIT(20) +#define MT_WF_RFCR_DROP_UNWANTED_CTL BIT(21) + +#define MT_WF_RMAC_MORE(_band) MT_WF_RMAC((_band) ? 0x124 : 0x024) +#define MT_WF_RMAC_MORE_MUAR_MODE GENMASK(31, 30) + +#define MT_WF_RFCR1(_band) MT_WF_RMAC((_band) ? 0x104 : 0x004) +#define MT_WF_RFCR1_DROP_ACK BIT(4) +#define MT_WF_RFCR1_DROP_BF_POLL BIT(5) +#define MT_WF_RFCR1_DROP_BA BIT(6) +#define MT_WF_RFCR1_DROP_CFEND BIT(7) +#define MT_WF_RFCR1_DROP_CFACK BIT(8) + +#define MT_CHFREQ(_band) MT_WF_RMAC((_band) ? 0x130 : 0x030) + +#define MT_WF_RMAC_MAR0 MT_WF_RMAC(0x025c) +#define MT_WF_RMAC_MAR1 MT_WF_RMAC(0x0260) +#define MT_WF_RMAC_MAR1_ADDR GENMASK(15, 0) +#define MT_WF_RMAC_MAR1_START BIT(16) +#define MT_WF_RMAC_MAR1_WRITE BIT(17) +#define MT_WF_RMAC_MAR1_IDX GENMASK(29, 24) +#define MT_WF_RMAC_MAR1_GROUP GENMASK(31, 30) + +#define MT_WF_RMAC_MIB_TIME0 MT_WF_RMAC(0x03c4) +#define MT_WF_RMAC_MIB_RXTIME_CLR BIT(31) +#define MT_WF_RMAC_MIB_RXTIME_EN BIT(30) + +#define MT_WF_RMAC_MIB_AIRTIME0 MT_WF_RMAC(0x0380) + +#define MT_WF_RMAC_MIB_TIME5 MT_WF_RMAC(0x03d8) +#define MT_WF_RMAC_MIB_TIME6 MT_WF_RMAC(0x03dc) +#define MT_MIB_OBSSTIME_MASK GENMASK(23, 0) + +#define MT_WF_DMA_BASE ((dev)->reg_map[MT_DMA_BASE]) +#define MT_WF_DMA(ofs) (MT_WF_DMA_BASE + (ofs)) + +#define MT_DMA_DCR0 MT_WF_DMA(0x000) +#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 2) +#define MT_DMA_DCR0_DAMSDU_EN BIT(16) +#define MT_DMA_DCR0_RX_VEC_DROP BIT(17) +#define MT_DMA_DCR0_RX_HDR_TRANS_EN BIT(19) + +#define MT_DMA_RCFR0(_band) MT_WF_DMA(0x070 + (_band) * 0x40) +#define MT_DMA_RCFR0_MCU_RX_MGMT BIT(2) +#define MT_DMA_RCFR0_MCU_RX_CTL_NON_BAR BIT(3) +#define MT_DMA_RCFR0_MCU_RX_CTL_BAR BIT(4) +#define MT_DMA_RCFR0_MCU_RX_TDLS BIT(19) +#define MT_DMA_RCFR0_MCU_RX_BYPASS BIT(21) +#define MT_DMA_RCFR0_RX_DROPPED_UCAST GENMASK(25, 24) +#define MT_DMA_RCFR0_RX_DROPPED_MCAST GENMASK(27, 26) + +#define MT_WF_PF_BASE ((dev)->reg_map[MT_PF_BASE]) +#define MT_WF_PF(ofs) (MT_WF_PF_BASE + (ofs)) + +#define MT_WF_PFCR MT_WF_PF(0x000) +#define MT_WF_PFCR_TDLS_EN BIT(9) + +#define MT_WTBL_BASE(dev) ((dev)->reg_map[MT_WTBL_BASE_ADDR]) +#define MT_WTBL_ENTRY_SIZE 256 + +#define MT_WTBL_OFF_BASE ((dev)->reg_map[MT_WTBL_BASE_OFF]) +#define MT_WTBL_OFF(n) (MT_WTBL_OFF_BASE + (n)) + +#define MT_WTBL_W0_KEY_IDX GENMASK(24, 23) +#define MT_WTBL_W0_RX_KEY_VALID BIT(26) +#define MT_WTBL_W0_RX_IK_VALID BIT(27) + +#define MT_WTBL_W2_KEY_TYPE GENMASK(7, 4) + +#define MT_WTBL_UPDATE MT_WTBL_OFF(0x030) +#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(7, 0) +#define MT_WTBL_UPDATE_RXINFO_UPDATE BIT(11) +#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12) +#define MT_WTBL_UPDATE_RATE_UPDATE BIT(13) +#define MT_WTBL_UPDATE_TX_COUNT_CLEAR BIT(14) +#define MT_WTBL_UPDATE_BUSY BIT(31) + +#define MT_TOP_MISC(ofs) ((dev)->reg_map[MT_TOP_MISC_BASE] + (ofs)) +#define MT_CONN_ON_MISC MT_TOP_MISC(0x1140) +#define MT_TOP_MISC2_FW_N9_RDY BIT(2) + +#define MT_WTBL_ON_BASE ((dev)->reg_map[MT_WTBL_BASE_ON]) +#define MT_WTBL_ON(_n) (MT_WTBL_ON_BASE + (_n)) + +#define MT_WTBL_RICR0 MT_WTBL_ON(0x010) +#define MT_WTBL_RICR1 MT_WTBL_ON(0x014) + +#define MT_WTBL_RIUCR0 MT_WTBL_ON(0x020) + +#define MT_WTBL_RIUCR1 MT_WTBL_ON(0x024) +#define MT_WTBL_RIUCR1_RATE0 GENMASK(11, 0) +#define MT_WTBL_RIUCR1_RATE1 GENMASK(23, 12) +#define MT_WTBL_RIUCR1_RATE2_LO GENMASK(31, 24) + +#define MT_WTBL_RIUCR2 MT_WTBL_ON(0x028) +#define MT_WTBL_RIUCR2_RATE2_HI GENMASK(3, 0) +#define MT_WTBL_RIUCR2_RATE3 GENMASK(15, 4) +#define MT_WTBL_RIUCR2_RATE4 GENMASK(27, 16) +#define MT_WTBL_RIUCR2_RATE5_LO GENMASK(31, 28) + +#define MT_WTBL_RIUCR3 MT_WTBL_ON(0x02c) +#define MT_WTBL_RIUCR3_RATE5_HI GENMASK(7, 0) +#define MT_WTBL_RIUCR3_RATE6 GENMASK(19, 8) +#define MT_WTBL_RIUCR3_RATE7 GENMASK(31, 20) + +#define MT_WTBL_W5_CHANGE_BW_RATE GENMASK(7, 5) +#define MT_WTBL_W5_SHORT_GI_20 BIT(8) +#define MT_WTBL_W5_SHORT_GI_40 BIT(9) +#define MT_WTBL_W5_SHORT_GI_80 BIT(10) +#define MT_WTBL_W5_SHORT_GI_160 BIT(11) +#define MT_WTBL_W5_BW_CAP GENMASK(13, 12) +#define MT_WTBL_W5_MPDU_FAIL_COUNT GENMASK(25, 23) +#define MT_WTBL_W5_MPDU_OK_COUNT GENMASK(28, 26) +#define MT_WTBL_W5_RATE_IDX GENMASK(31, 29) + +#define MT_WTBL_W27_CC_BW_SEL GENMASK(6, 5) + +#define MT_LPON(_n) ((dev)->reg_map[MT_LPON_BASE] + (_n)) + +#define MT_LPON_TCR0(_n) MT_LPON(0x010 + ((_n) * 4)) +#define MT_LPON_TCR2(_n) MT_LPON(0x0f8 + ((_n) - 2) * 4) +#define MT_LPON_TCR_MODE GENMASK(1, 0) +#define MT_LPON_TCR_READ GENMASK(1, 0) +#define MT_LPON_TCR_WRITE BIT(0) +#define MT_LPON_TCR_ADJUST BIT(1) + +#define MT_LPON_UTTR0 MT_LPON(0x018) +#define MT_LPON_UTTR1 MT_LPON(0x01c) + +#define MT_WF_MIB_BASE (dev->reg_map[MT_MIB_BASE]) +#define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE + (ofs) + (_band) * 0x200) + +#define MT_WF_MIB_SCR0 MT_WF_MIB(0, 0) +#define MT_MIB_SCR0_AGG_CNT_RANGE_EN BIT(21) + +#define MT_MIB_M0_MISC_CR(_band) MT_WF_MIB(_band, 0x00c) + +#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, 0x014) +#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(15, 0) + +#define MT_MIB_SDR9(_band) MT_WF_MIB(_band, 0x02c) +#define MT_MIB_SDR9_BUSY_MASK GENMASK(23, 0) + +#define MT_MIB_SDR14(_band) MT_WF_MIB(_band, 0x040) +#define MT_MIB_AMPDU_MPDU_COUNT GENMASK(23, 0) + +#define MT_MIB_SDR15(_band) MT_WF_MIB(_band, 0x044) +#define MT_MIB_AMPDU_ACK_COUNT GENMASK(23, 0) + +#define MT_MIB_SDR16(_band) MT_WF_MIB(_band, 0x048) +#define MT_MIB_SDR16_BUSY_MASK GENMASK(23, 0) + +#define MT_MIB_SDR36(_band) MT_WF_MIB(_band, 0x098) +#define MT_MIB_SDR36_TXTIME_MASK GENMASK(23, 0) +#define MT_MIB_SDR37(_band) MT_WF_MIB(_band, 0x09c) +#define MT_MIB_SDR37_RXTIME_MASK GENMASK(23, 0) + +#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(_band, 0x100 + ((n) << 4)) +#define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16) +#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0) + +#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(_band, 0x104 + ((n) << 4)) +#define MT_MIB_BA_MISS_COUNT_MASK GENMASK(15, 0) +#define MT_MIB_ACK_FAIL_COUNT_MASK GENMASK(31, 16) + +#define MT_MIB_ARNG(n) MT_WF_MIB(0, 0x4b8 + ((n) << 2)) + +#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, 0xa8 + ((n) << 2)) + +#define MT_DMA_SHDL(ofs) (dev->reg_map[MT_DMA_SHDL_BASE] + (ofs)) + +#define MT_DMASHDL_BASE 0x5000a000 +#define MT_DMASHDL_OPTIONAL 0x008 +#define MT_DMASHDL_PAGE 0x00c + +#define MT_DMASHDL_REFILL 0x010 + +#define MT_DMASHDL_PKT_MAX_SIZE 0x01c +#define MT_DMASHDL_PKT_MAX_SIZE_PLE GENMASK(11, 0) +#define MT_DMASHDL_PKT_MAX_SIZE_PSE GENMASK(27, 16) + +#define MT_DMASHDL_GROUP_QUOTA(_n) (0x020 + ((_n) << 2)) +#define MT_DMASHDL_GROUP_QUOTA_MIN GENMASK(11, 0) +#define MT_DMASHDL_GROUP_QUOTA_MAX GENMASK(27, 16) + +#define MT_DMASHDL_SCHED_SET0 0x0b0 +#define MT_DMASHDL_SCHED_SET1 0x0b4 + +#define MT_DMASHDL_Q_MAP(_n) (0x0d0 + ((_n) << 2)) +#define MT_DMASHDL_Q_MAP_MASK GENMASK(3, 0) +#define MT_DMASHDL_Q_MAP_SHIFT(_n) (4 * ((_n) % 8)) + +#define MT_LED_BASE_PHYS 0x80024000 +#define MT_LED_PHYS(_n) (MT_LED_BASE_PHYS + (_n)) + +#define MT_LED_CTRL MT_LED_PHYS(0x00) + +#define MT_LED_CTRL_REPLAY(_n) BIT(0 + (8 * (_n))) +#define MT_LED_CTRL_POLARITY(_n) BIT(1 + (8 * (_n))) +#define MT_LED_CTRL_TX_BLINK_MODE(_n) BIT(2 + (8 * (_n))) +#define MT_LED_CTRL_TX_MANUAL_BLINK(_n) BIT(3 + (8 * (_n))) +#define MT_LED_CTRL_TX_OVER_BLINK(_n) BIT(5 + (8 * (_n))) +#define MT_LED_CTRL_KICK(_n) BIT(7 + (8 * (_n))) + +#define MT_LED_STATUS_0(_n) MT_LED_PHYS(0x10 + ((_n) * 8)) +#define MT_LED_STATUS_1(_n) MT_LED_PHYS(0x14 + ((_n) * 8)) +#define MT_LED_STATUS_OFF GENMASK(31, 24) +#define MT_LED_STATUS_ON GENMASK(23, 16) +#define MT_LED_STATUS_DURATION GENMASK(15, 0) + +#define MT_PDMA_BUSY 0x82000504 +#define MT_PDMA_TX_BUSY BIT(0) +#define MT_PDMA_RX_BUSY BIT(1) + +#define MT_EFUSE_BASE ((dev)->reg_map[MT_EFUSE_ADDR_BASE]) +#define MT_EFUSE_BASE_CTRL 0x000 +#define MT_EFUSE_BASE_CTRL_EMPTY BIT(30) + +#define MT_EFUSE_CTRL 0x008 +#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0) +#define MT_EFUSE_CTRL_MODE GENMASK(7, 6) +#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8) +#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14) +#define MT_EFUSE_CTRL_AIN GENMASK(25, 16) +#define MT_EFUSE_CTRL_VALID BIT(29) +#define MT_EFUSE_CTRL_KICK BIT(30) +#define MT_EFUSE_CTRL_SEL BIT(31) + +#define MT_EFUSE_WDATA(_i) (0x010 + ((_i) * 4)) +#define MT_EFUSE_RDATA(_i) (0x030 + ((_i) * 4)) + +/* INFRACFG host register range on MT7622 */ +#define MT_INFRACFG_MISC 0x700 +#define MT_INFRACFG_MISC_AP2CONN_WAKE BIT(1) + +#define MT_UMAC_BASE 0x7c000000 +#define MT_UMAC(ofs) (MT_UMAC_BASE + (ofs)) +#define MT_UDMA_TX_QSEL MT_UMAC(0x008) +#define MT_FW_DL_EN BIT(3) + +#define MT_UDMA_WLCFG_1 MT_UMAC(0x00c) +#define MT_WL_RX_AGG_PKT_LMT GENMASK(7, 0) +#define MT_WL_TX_TMOUT_LMT GENMASK(27, 8) + +#define MT_UDMA_WLCFG_0 MT_UMAC(0x18) +#define MT_WL_RX_AGG_TO GENMASK(7, 0) +#define MT_WL_RX_AGG_LMT GENMASK(15, 8) +#define MT_WL_TX_TMOUT_FUNC_EN BIT(16) +#define MT_WL_TX_DPH_CHK_EN BIT(17) +#define MT_WL_RX_MPSZ_PAD0 BIT(18) +#define MT_WL_RX_FLUSH BIT(19) +#define MT_TICK_1US_EN BIT(20) +#define MT_WL_RX_AGG_EN BIT(21) +#define MT_WL_RX_EN BIT(22) +#define MT_WL_TX_EN BIT(23) +#define MT_WL_RX_BUSY BIT(30) +#define MT_WL_TX_BUSY BIT(31) + +#define MT_MCU_PTA_BASE 0x81060000 +#define MT_MCU_PTA(_n) (MT_MCU_PTA_BASE + (_n)) + +#define MT_ANT_SWITCH_CON(_n) MT_MCU_PTA(0x0c8 + ((_n) - 1) * 4) +#define MT_ANT_SWITCH_CON_MODE(_n) (GENMASK(4, 0) << (_n * 8)) +#define MT_ANT_SWITCH_CON_MODE1(_n) (GENMASK(3, 0) << (_n * 8)) + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c new file mode 100644 index 000000000..d742b2242 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2020 MediaTek Inc. + * + * Author: Felix Fietkau + * Lorenzo Bianconi + * Sean Wang + */ + +#include +#include +#include + +#include +#include +#include + +#include "../sdio.h" +#include "mt7615.h" +#include "mac.h" +#include "mcu.h" + +static const struct sdio_device_id mt7663s_table[] = { + { SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7603) }, + { } /* Terminating entry */ +}; + +static void mt7663s_txrx_worker(struct mt76_worker *w) +{ + struct mt76_sdio *sdio = container_of(w, struct mt76_sdio, + txrx_worker); + struct mt76_dev *mdev = container_of(sdio, struct mt76_dev, sdio); + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + + if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { + queue_work(mdev->wq, &dev->pm.wake_work); + return; + } + mt76s_txrx_worker(sdio); + mt76_connac_pm_unref(&dev->mphy, &dev->pm); +} + +static void mt7663s_init_work(struct work_struct *work) +{ + struct mt7615_dev *dev; + + dev = container_of(work, struct mt7615_dev, mcu_work); + if (mt7663s_mcu_init(dev)) + return; + + mt7615_init_work(dev); +} + +static int mt7663s_parse_intr(struct mt76_dev *dev, struct mt76s_intr *intr) +{ + struct mt76_sdio *sdio = &dev->sdio; + struct mt7663s_intr *irq_data = sdio->intr_data; + int i, err; + + sdio_claim_host(sdio->func); + err = sdio_readsb(sdio->func, irq_data, MCR_WHISR, sizeof(*irq_data)); + sdio_release_host(sdio->func); + + if (err) + return err; + + intr->isr = irq_data->isr; + intr->rec_mb = irq_data->rec_mb; + intr->tx.wtqcr = irq_data->tx.wtqcr; + intr->rx.num = irq_data->rx.num; + for (i = 0; i < 2 ; i++) + intr->rx.len[i] = irq_data->rx.len[i]; + + return 0; +} + +static int mt7663s_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + static const struct mt76_driver_ops drv_ops = { + .txwi_size = MT_USB_TXD_SIZE, + .drv_flags = MT_DRV_RX_DMA_HDR, + .tx_prepare_skb = mt7663_usb_sdio_tx_prepare_skb, + .tx_complete_skb = mt7663_usb_sdio_tx_complete_skb, + .tx_status_data = mt7663_usb_sdio_tx_status_data, + .rx_skb = mt7615_queue_rx_skb, + .rx_check = mt7615_rx_check, + .sta_ps = mt7615_sta_ps, + .sta_add = mt7615_mac_sta_add, + .sta_remove = mt7615_mac_sta_remove, + .update_survey = mt7615_update_channel, + }; + static const struct mt76_bus_ops mt7663s_ops = { + .rr = mt76s_rr, + .rmw = mt76s_rmw, + .wr = mt76s_wr, + .write_copy = mt76s_write_copy, + .read_copy = mt76s_read_copy, + .wr_rp = mt76s_wr_rp, + .rd_rp = mt76s_rd_rp, + .type = MT76_BUS_SDIO, + }; + struct ieee80211_ops *ops; + struct mt7615_dev *dev; + struct mt76_dev *mdev; + int ret; + + ops = devm_kmemdup(&func->dev, &mt7615_ops, sizeof(mt7615_ops), + GFP_KERNEL); + if (!ops) + return -ENOMEM; + + mdev = mt76_alloc_device(&func->dev, sizeof(*dev), ops, &drv_ops); + if (!mdev) + return -ENOMEM; + + dev = container_of(mdev, struct mt7615_dev, mt76); + + INIT_WORK(&dev->mcu_work, mt7663s_init_work); + dev->reg_map = mt7663_usb_sdio_reg_map; + dev->ops = ops; + sdio_set_drvdata(func, dev); + + ret = mt76s_init(mdev, func, &mt7663s_ops); + if (ret < 0) + goto error; + + ret = mt76s_hw_init(mdev, func, MT76_CONNAC_SDIO); + if (ret) + goto error; + + mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) | + (mt76_rr(dev, MT_HW_REV) & 0xff); + dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev); + + mdev->sdio.parse_irq = mt7663s_parse_intr; + mdev->sdio.intr_data = devm_kmalloc(mdev->dev, + sizeof(struct mt7663s_intr), + GFP_KERNEL); + if (!mdev->sdio.intr_data) { + ret = -ENOMEM; + goto error; + } + + ret = mt76s_alloc_rx_queue(mdev, MT_RXQ_MAIN); + if (ret) + goto error; + + ret = mt76s_alloc_tx(mdev); + if (ret) + goto error; + + ret = mt76_worker_setup(mt76_hw(dev), &mdev->sdio.txrx_worker, + mt7663s_txrx_worker, "sdio-txrx"); + if (ret) + goto error; + + sched_set_fifo_low(mdev->sdio.txrx_worker.task); + + ret = mt7663_usb_sdio_register_device(dev); + if (ret) + goto error; + + return 0; + +error: + mt76s_deinit(&dev->mt76); + mt76_free_device(&dev->mt76); + + return ret; +} + +static void mt7663s_remove(struct sdio_func *func) +{ + struct mt7615_dev *dev = sdio_get_drvdata(func); + + if (!test_and_clear_bit(MT76_STATE_INITIALIZED, &dev->mphy.state)) + return; + + ieee80211_unregister_hw(dev->mt76.hw); + mt76s_deinit(&dev->mt76); + mt76_free_device(&dev->mt76); +} + +static int mt7663s_suspend(struct device *dev) +{ + struct sdio_func *func = dev_to_sdio_func(dev); + struct mt7615_dev *mdev = sdio_get_drvdata(func); + int err; + + if (!test_bit(MT76_STATE_SUSPEND, &mdev->mphy.state) && + mt7615_firmware_offload(mdev)) { + int err; + + err = mt76_connac_mcu_set_hif_suspend(&mdev->mt76, true); + if (err < 0) + return err; + } + + sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + + err = mt7615_mcu_set_fw_ctrl(mdev); + if (err) + return err; + + mt76_worker_disable(&mdev->mt76.sdio.txrx_worker); + mt76_worker_disable(&mdev->mt76.sdio.status_worker); + mt76_worker_disable(&mdev->mt76.sdio.net_worker); + mt76_worker_disable(&mdev->mt76.sdio.stat_worker); + + clear_bit(MT76_READING_STATS, &mdev->mphy.state); + + mt76_tx_status_check(&mdev->mt76, true); + + return 0; +} + +static int mt7663s_resume(struct device *dev) +{ + struct sdio_func *func = dev_to_sdio_func(dev); + struct mt7615_dev *mdev = sdio_get_drvdata(func); + int err; + + mt76_worker_enable(&mdev->mt76.sdio.txrx_worker); + mt76_worker_enable(&mdev->mt76.sdio.status_worker); + mt76_worker_enable(&mdev->mt76.sdio.net_worker); + + err = mt7615_mcu_set_drv_ctrl(mdev); + if (err) + return err; + + if (!test_bit(MT76_STATE_SUSPEND, &mdev->mphy.state) && + mt7615_firmware_offload(mdev)) + err = mt76_connac_mcu_set_hif_suspend(&mdev->mt76, false); + + return err; +} + +MODULE_DEVICE_TABLE(sdio, mt7663s_table); +MODULE_FIRMWARE(MT7663_OFFLOAD_FIRMWARE_N9); +MODULE_FIRMWARE(MT7663_OFFLOAD_ROM_PATCH); +MODULE_FIRMWARE(MT7663_FIRMWARE_N9); +MODULE_FIRMWARE(MT7663_ROM_PATCH); + +static DEFINE_SIMPLE_DEV_PM_OPS(mt7663s_pm_ops, mt7663s_suspend, mt7663s_resume); + +static struct sdio_driver mt7663s_driver = { + .name = KBUILD_MODNAME, + .probe = mt7663s_probe, + .remove = mt7663s_remove, + .id_table = mt7663s_table, + .drv.pm = pm_sleep_ptr(&mt7663s_pm_ops), +}; +module_sdio_driver(mt7663s_driver); + +MODULE_AUTHOR("Sean Wang "); +MODULE_AUTHOR("Lorenzo Bianconi "); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c new file mode 100644 index 000000000..dc9a2f0b4 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c @@ -0,0 +1,181 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2020 MediaTek Inc. + * + * Author: Felix Fietkau + * Lorenzo Bianconi + * Sean Wang + */ +#include +#include +#include +#include + +#include "../sdio.h" +#include "mt7615.h" +#include "mac.h" +#include "mcu.h" +#include "regs.h" + +static int mt7663s_mcu_init_sched(struct mt7615_dev *dev) +{ + struct mt76_sdio *sdio = &dev->mt76.sdio; + u32 txdwcnt; + + sdio->sched.pse_data_quota = mt76_get_field(dev, MT_PSE_PG_HIF0_GROUP, + MT_HIF0_MIN_QUOTA); + sdio->sched.pse_mcu_quota = mt76_get_field(dev, MT_PSE_PG_HIF1_GROUP, + MT_HIF1_MIN_QUOTA); + sdio->sched.ple_data_quota = mt76_get_field(dev, MT_PLE_PG_HIF0_GROUP, + MT_HIF0_MIN_QUOTA); + sdio->sched.pse_page_size = MT_PSE_PAGE_SZ; + txdwcnt = mt76_get_field(dev, MT_PP_TXDWCNT, + MT_PP_TXDWCNT_TX1_ADD_DW_CNT); + sdio->sched.deficit = txdwcnt << 2; + + return 0; +} + +static int +mt7663s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, + int cmd, int *seq) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + int ret; + + mt7615_mcu_fill_msg(dev, skb, cmd, seq); + ret = mt76_tx_queue_skb_raw(dev, mdev->q_mcu[MT_MCUQ_WM], skb, 0); + if (ret) + return ret; + + mt76_queue_kick(dev, mdev->q_mcu[MT_MCUQ_WM]); + + return ret; +} + +static int __mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev) +{ + struct sdio_func *func = dev->mt76.sdio.func; + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt76_connac_pm *pm = &dev->pm; + u32 status; + int ret; + + sdio_claim_host(func); + + sdio_writel(func, WHLPCR_FW_OWN_REQ_CLR, MCR_WHLPCR, NULL); + + ret = readx_poll_timeout(mt76s_read_pcr, &dev->mt76, status, + status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000); + if (ret < 0) { + dev_err(dev->mt76.dev, "Cannot get ownership from device"); + } else { + clear_bit(MT76_STATE_PM, &mphy->state); + + pm->stats.last_wake_event = jiffies; + pm->stats.doze_time += pm->stats.last_wake_event - + pm->stats.last_doze_event; + } + sdio_release_host(func); + + return ret; +} + +static int mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev) +{ + struct mt76_phy *mphy = &dev->mt76.phy; + int ret = 0; + + mutex_lock(&dev->pm.mutex); + + if (test_bit(MT76_STATE_PM, &mphy->state)) + ret = __mt7663s_mcu_drv_pmctrl(dev); + + mutex_unlock(&dev->pm.mutex); + + return ret; +} + +static int mt7663s_mcu_fw_pmctrl(struct mt7615_dev *dev) +{ + struct sdio_func *func = dev->mt76.sdio.func; + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt76_connac_pm *pm = &dev->pm; + int ret = 0; + u32 status; + + mutex_lock(&pm->mutex); + + if (mt76_connac_skip_fw_pmctrl(mphy, pm)) + goto out; + + sdio_claim_host(func); + + sdio_writel(func, WHLPCR_FW_OWN_REQ_SET, MCR_WHLPCR, NULL); + + ret = readx_poll_timeout(mt76s_read_pcr, &dev->mt76, status, + !(status & WHLPCR_IS_DRIVER_OWN), 2000, 1000000); + if (ret < 0) { + dev_err(dev->mt76.dev, "Cannot set ownership to device"); + clear_bit(MT76_STATE_PM, &mphy->state); + } else { + pm->stats.last_doze_event = jiffies; + pm->stats.awake_time += pm->stats.last_doze_event - + pm->stats.last_wake_event; + } + + sdio_release_host(func); +out: + mutex_unlock(&pm->mutex); + + return ret; +} + +int mt7663s_mcu_init(struct mt7615_dev *dev) +{ + static const struct mt76_mcu_ops mt7663s_mcu_ops = { + .headroom = sizeof(struct mt7615_mcu_txd), + .tailroom = MT_USB_TAIL_SIZE, + .mcu_skb_send_msg = mt7663s_mcu_send_message, + .mcu_parse_response = mt7615_mcu_parse_response, + .mcu_restart = mt7615_mcu_restart, + .mcu_rr = mt76_connac_mcu_reg_rr, + .mcu_wr = mt76_connac_mcu_reg_wr, + }; + struct mt7615_mcu_ops *mcu_ops; + int ret; + + ret = __mt7663s_mcu_drv_pmctrl(dev); + if (ret) + return ret; + + dev->mt76.mcu_ops = &mt7663s_mcu_ops, + + ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY); + if (ret) { + mt7615_mcu_restart(&dev->mt76); + if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, + MT_TOP_MISC2_FW_N9_RDY, 0, 500)) + return -EIO; + } + + ret = __mt7663_load_firmware(dev); + if (ret) + return ret; + + mcu_ops = devm_kmemdup(dev->mt76.dev, dev->mcu_ops, sizeof(*mcu_ops), + GFP_KERNEL); + if (!mcu_ops) + return -ENOMEM; + + mcu_ops->set_drv_ctrl = mt7663s_mcu_drv_pmctrl; + mcu_ops->set_fw_ctrl = mt7663s_mcu_fw_pmctrl; + dev->mcu_ops = mcu_ops; + + ret = mt7663s_mcu_init_sched(dev); + if (ret) + return ret; + + set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/soc.c b/drivers/net/wireless/mediatek/mt76/mt7615/soc.c new file mode 100644 index 000000000..f13d1b418 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/soc.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2019 MediaTek Inc. + * + * Author: Ryder Lee + * Felix Fietkau + */ + +#include +#include +#include +#include +#include +#include +#include "mt7615.h" + +int mt7622_wmac_init(struct mt7615_dev *dev) +{ + struct device_node *np = dev->mt76.dev->of_node; + + if (!is_mt7622(&dev->mt76)) + return 0; + + dev->infracfg = syscon_regmap_lookup_by_phandle(np, "mediatek,infracfg"); + if (IS_ERR(dev->infracfg)) { + dev_err(dev->mt76.dev, "Cannot find infracfg controller\n"); + return PTR_ERR(dev->infracfg); + } + + return 0; +} + +static int mt7622_wmac_probe(struct platform_device *pdev) +{ + void __iomem *mem_base; + int irq; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + mem_base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); + if (IS_ERR(mem_base)) + return PTR_ERR(mem_base); + + return mt7615_mmio_probe(&pdev->dev, mem_base, irq, mt7615e_reg_map); +} + +static int mt7622_wmac_remove(struct platform_device *pdev) +{ + struct mt7615_dev *dev = platform_get_drvdata(pdev); + + mt7615_unregister_device(dev); + + return 0; +} + +static const struct of_device_id mt7622_wmac_of_match[] = { + { .compatible = "mediatek,mt7622-wmac" }, + {}, +}; + +struct platform_driver mt7622_wmac_driver = { + .driver = { + .name = "mt7622-wmac", + .of_match_table = mt7622_wmac_of_match, + }, + .probe = mt7622_wmac_probe, + .remove = mt7622_wmac_remove, +}; + +MODULE_FIRMWARE(MT7622_FIRMWARE_N9); +MODULE_FIRMWARE(MT7622_ROM_PATCH); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c new file mode 100644 index 000000000..a3d1cfa72 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c @@ -0,0 +1,376 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2020 Felix Fietkau */ + +#include "mt7615.h" +#include "eeprom.h" +#include "mcu.h" + +enum { + TM_CHANGED_TXPOWER_CTRL, + TM_CHANGED_TXPOWER, + TM_CHANGED_FREQ_OFFSET, + + /* must be last */ + NUM_TM_CHANGED +}; + + +static const u8 tm_change_map[] = { + [TM_CHANGED_TXPOWER_CTRL] = MT76_TM_ATTR_TX_POWER_CONTROL, + [TM_CHANGED_TXPOWER] = MT76_TM_ATTR_TX_POWER, + [TM_CHANGED_FREQ_OFFSET] = MT76_TM_ATTR_FREQ_OFFSET, +}; + +static const u32 reg_backup_list[] = { + MT_WF_PHY_RFINTF3_0(0), + MT_WF_PHY_RFINTF3_0(1), + MT_WF_PHY_RFINTF3_0(2), + MT_WF_PHY_RFINTF3_0(3), + MT_ANT_SWITCH_CON(2), + MT_ANT_SWITCH_CON(3), + MT_ANT_SWITCH_CON(4), + MT_ANT_SWITCH_CON(6), + MT_ANT_SWITCH_CON(7), + MT_ANT_SWITCH_CON(8), +}; + +static const struct { + u16 wf; + u16 reg; +} rf_backup_list[] = { + { 0, 0x48 }, + { 1, 0x48 }, + { 2, 0x48 }, + { 3, 0x48 }, +}; + +static int +mt7615_tm_set_tx_power(struct mt7615_phy *phy) +{ + struct mt7615_dev *dev = phy->dev; + struct mt76_phy *mphy = phy->mt76; + int i, ret, n_chains = hweight8(mphy->antenna_mask); + struct cfg80211_chan_def *chandef = &mphy->chandef; + int freq = chandef->center_freq1, len, target_chains; + u8 *data, *eep = (u8 *)dev->mt76.eeprom.data; + enum nl80211_band band = chandef->chan->band; + struct sk_buff *skb; + struct { + u8 center_chan; + u8 dbdc_idx; + u8 band; + u8 rsv; + } __packed req_hdr = { + .center_chan = ieee80211_frequency_to_channel(freq), + .band = band, + .dbdc_idx = phy != &dev->phy, + }; + u8 *tx_power = NULL; + + if (mphy->test.state != MT76_TM_STATE_OFF) + tx_power = mphy->test.tx_power; + + len = MT7615_EE_MAX - MT_EE_NIC_CONF_0; + skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req_hdr) + len); + if (!skb) + return -ENOMEM; + + skb_put_data(skb, &req_hdr, sizeof(req_hdr)); + data = skb_put_data(skb, eep + MT_EE_NIC_CONF_0, len); + + target_chains = mt7615_ext_pa_enabled(dev, band) ? 1 : n_chains; + for (i = 0; i < target_chains; i++) { + ret = mt7615_eeprom_get_target_power_index(dev, chandef->chan, i); + if (ret < 0) { + dev_kfree_skb(skb); + return -EINVAL; + } + + if (tx_power && tx_power[i]) + data[ret - MT_EE_NIC_CONF_0] = tx_power[i]; + } + + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD(SET_TX_POWER_CTRL), false); +} + +static void +mt7615_tm_reg_backup_restore(struct mt7615_phy *phy) +{ + struct mt7615_dev *dev = phy->dev; + u32 *b = phy->test.reg_backup; + int n_regs = ARRAY_SIZE(reg_backup_list); + int n_rf_regs = ARRAY_SIZE(rf_backup_list); + int i; + + if (phy->mt76->test.state == MT76_TM_STATE_OFF) { + for (i = 0; i < n_regs; i++) + mt76_wr(dev, reg_backup_list[i], b[i]); + + for (i = 0; i < n_rf_regs; i++) + mt7615_rf_wr(dev, rf_backup_list[i].wf, + rf_backup_list[i].reg, b[n_regs + i]); + return; + } + + if (b) + return; + + b = devm_kzalloc(dev->mt76.dev, 4 * (n_regs + n_rf_regs), + GFP_KERNEL); + if (!b) + return; + + phy->test.reg_backup = b; + for (i = 0; i < n_regs; i++) + b[i] = mt76_rr(dev, reg_backup_list[i]); + for (i = 0; i < n_rf_regs; i++) + b[n_regs + i] = mt7615_rf_rr(dev, rf_backup_list[i].wf, + rf_backup_list[i].reg); +} + +static void +mt7615_tm_init(struct mt7615_phy *phy) +{ + struct mt7615_dev *dev = phy->dev; + unsigned int total_flags = ~0; + + if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) + return; + + mt7615_mcu_set_sku_en(phy, phy->mt76->test.state == MT76_TM_STATE_OFF); + + mutex_unlock(&dev->mt76.mutex); + mt7615_set_channel(phy); + mt7615_ops.configure_filter(phy->mt76->hw, 0, &total_flags, 0); + mutex_lock(&dev->mt76.mutex); + + mt7615_tm_reg_backup_restore(phy); +} + +static void +mt7615_tm_set_rx_enable(struct mt7615_dev *dev, bool en) +{ + u32 rqcr_mask = (MT_ARB_RQCR_RX_START | + MT_ARB_RQCR_RXV_START | + MT_ARB_RQCR_RXV_R_EN | + MT_ARB_RQCR_RXV_T_EN) * + (BIT(0) | BIT(MT_ARB_RQCR_BAND_SHIFT)); + + if (en) { + mt76_clear(dev, MT_ARB_SCR, + MT_ARB_SCR_RX0_DISABLE | MT_ARB_SCR_RX1_DISABLE); + mt76_set(dev, MT_ARB_RQCR, rqcr_mask); + } else { + mt76_set(dev, MT_ARB_SCR, + MT_ARB_SCR_RX0_DISABLE | MT_ARB_SCR_RX1_DISABLE); + mt76_clear(dev, MT_ARB_RQCR, rqcr_mask); + } +} + +static void +mt7615_tm_set_tx_antenna(struct mt7615_phy *phy, bool en) +{ + struct mt7615_dev *dev = phy->dev; + struct mt76_testmode_data *td = &phy->mt76->test; + u8 mask = td->tx_antenna_mask; + int i; + + if (!mask) + return; + + if (!en) + mask = phy->mt76->chainmask; + + for (i = 0; i < 4; i++) { + mt76_rmw_field(dev, MT_WF_PHY_RFINTF3_0(i), + MT_WF_PHY_RFINTF3_0_ANT, + (mask & BIT(i)) ? 0 : 0xa); + } + + /* 2.4 GHz band */ + mt76_rmw_field(dev, MT_ANT_SWITCH_CON(3), MT_ANT_SWITCH_CON_MODE(0), + (mask & BIT(0)) ? 0x8 : 0x1b); + mt76_rmw_field(dev, MT_ANT_SWITCH_CON(4), MT_ANT_SWITCH_CON_MODE(2), + (mask & BIT(1)) ? 0xe : 0x1b); + mt76_rmw_field(dev, MT_ANT_SWITCH_CON(6), MT_ANT_SWITCH_CON_MODE1(0), + (mask & BIT(2)) ? 0x0 : 0xf); + mt76_rmw_field(dev, MT_ANT_SWITCH_CON(7), MT_ANT_SWITCH_CON_MODE1(2), + (mask & BIT(3)) ? 0x6 : 0xf); + + /* 5 GHz band */ + mt76_rmw_field(dev, MT_ANT_SWITCH_CON(4), MT_ANT_SWITCH_CON_MODE(1), + (mask & BIT(0)) ? 0xd : 0x1b); + mt76_rmw_field(dev, MT_ANT_SWITCH_CON(2), MT_ANT_SWITCH_CON_MODE(3), + (mask & BIT(1)) ? 0x13 : 0x1b); + mt76_rmw_field(dev, MT_ANT_SWITCH_CON(7), MT_ANT_SWITCH_CON_MODE1(1), + (mask & BIT(2)) ? 0x5 : 0xf); + mt76_rmw_field(dev, MT_ANT_SWITCH_CON(8), MT_ANT_SWITCH_CON_MODE1(3), + (mask & BIT(3)) ? 0xb : 0xf); + + for (i = 0; i < 4; i++) { + u32 val; + + val = mt7615_rf_rr(dev, i, 0x48); + val &= ~(0x3ff << 20); + if (mask & BIT(i)) + val |= 3 << 20; + else + val |= (2 << 28) | (2 << 26) | (8 << 20); + mt7615_rf_wr(dev, i, 0x48, val); + } +} + +static void +mt7615_tm_set_tx_frames(struct mt7615_phy *phy, bool en) +{ + struct mt7615_dev *dev = phy->dev; + struct ieee80211_tx_info *info; + struct sk_buff *skb = phy->mt76->test.tx_skb; + + mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH)); + mt7615_tm_set_tx_antenna(phy, en); + mt7615_tm_set_rx_enable(dev, !en); + if (!en || !skb) + return; + + info = IEEE80211_SKB_CB(skb); + info->control.vif = phy->monitor_vif; +} + +static void +mt7615_tm_update_params(struct mt7615_phy *phy, u32 changed) +{ + struct mt7615_dev *dev = phy->dev; + struct mt76_testmode_data *td = &phy->mt76->test; + bool en = phy->mt76->test.state != MT76_TM_STATE_OFF; + + if (changed & BIT(TM_CHANGED_TXPOWER_CTRL)) + mt7615_mcu_set_test_param(dev, MCU_ATE_SET_TX_POWER_CONTROL, + en, en && td->tx_power_control); + if (changed & BIT(TM_CHANGED_FREQ_OFFSET)) + mt7615_mcu_set_test_param(dev, MCU_ATE_SET_FREQ_OFFSET, + en, en ? td->freq_offset : 0); + if (changed & BIT(TM_CHANGED_TXPOWER)) + mt7615_tm_set_tx_power(phy); +} + +static int +mt7615_tm_set_state(struct mt76_phy *mphy, enum mt76_testmode_state state) +{ + struct mt7615_phy *phy = mphy->priv; + struct mt76_testmode_data *td = &mphy->test; + enum mt76_testmode_state prev_state = td->state; + + mphy->test.state = state; + + if (prev_state == MT76_TM_STATE_TX_FRAMES) + mt7615_tm_set_tx_frames(phy, false); + else if (state == MT76_TM_STATE_TX_FRAMES) + mt7615_tm_set_tx_frames(phy, true); + + if (state <= MT76_TM_STATE_IDLE) + mt7615_tm_init(phy); + + if ((state == MT76_TM_STATE_IDLE && + prev_state == MT76_TM_STATE_OFF) || + (state == MT76_TM_STATE_OFF && + prev_state == MT76_TM_STATE_IDLE)) { + u32 changed = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(tm_change_map); i++) { + u16 cur = tm_change_map[i]; + + if (td->param_set[cur / 32] & BIT(cur % 32)) + changed |= BIT(i); + } + + mt7615_tm_update_params(phy, changed); + } + + return 0; +} + +static int +mt7615_tm_set_params(struct mt76_phy *mphy, struct nlattr **tb, + enum mt76_testmode_state new_state) +{ + struct mt76_testmode_data *td = &mphy->test; + struct mt7615_phy *phy = mphy->priv; + u32 changed = 0; + int i; + + BUILD_BUG_ON(NUM_TM_CHANGED >= 32); + + if (new_state == MT76_TM_STATE_OFF || + td->state == MT76_TM_STATE_OFF) + return 0; + + if (td->tx_antenna_mask & ~mphy->chainmask) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(tm_change_map); i++) { + if (tb[tm_change_map[i]]) + changed |= BIT(i); + } + + mt7615_tm_update_params(phy, changed); + + return 0; +} + +static int +mt7615_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg) +{ + struct mt7615_phy *phy = mphy->priv; + void *rx, *rssi; + int i; + + rx = nla_nest_start(msg, MT76_TM_STATS_ATTR_LAST_RX); + if (!rx) + return -ENOMEM; + + if (nla_put_s32(msg, MT76_TM_RX_ATTR_FREQ_OFFSET, phy->test.last_freq_offset)) + return -ENOMEM; + + rssi = nla_nest_start(msg, MT76_TM_RX_ATTR_RCPI); + if (!rssi) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(phy->test.last_rcpi); i++) + if (nla_put_u8(msg, i, phy->test.last_rcpi[i])) + return -ENOMEM; + + nla_nest_end(msg, rssi); + + rssi = nla_nest_start(msg, MT76_TM_RX_ATTR_IB_RSSI); + if (!rssi) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(phy->test.last_ib_rssi); i++) + if (nla_put_s8(msg, i, phy->test.last_ib_rssi[i])) + return -ENOMEM; + + nla_nest_end(msg, rssi); + + rssi = nla_nest_start(msg, MT76_TM_RX_ATTR_WB_RSSI); + if (!rssi) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(phy->test.last_wb_rssi); i++) + if (nla_put_s8(msg, i, phy->test.last_wb_rssi[i])) + return -ENOMEM; + + nla_nest_end(msg, rssi); + + nla_nest_end(msg, rx); + + return 0; +} + +const struct mt76_testmode_ops mt7615_testmode_ops = { + .set_state = mt7615_tm_set_state, + .set_params = mt7615_tm_set_params, + .dump_stats = mt7615_tm_dump_stats, +}; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/trace.c b/drivers/net/wireless/mediatek/mt76/mt7615/trace.c new file mode 100644 index 000000000..6c02d5aff --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/trace.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (C) 2019 Lorenzo Bianconi + */ + +#include + +#ifndef __CHECKER__ +#define CREATE_TRACE_POINTS +#include "mt7615_trace.h" + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c new file mode 100644 index 000000000..f2d651d7a --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2019 MediaTek Inc. + * + * Author: Felix Fietkau + * Lorenzo Bianconi + * Sean Wang + */ + +#include +#include +#include + +#include "mt7615.h" +#include "mac.h" +#include "mcu.h" +#include "regs.h" + +static const struct usb_device_id mt7615_device_table[] = { + { USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7663, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x043e, 0x310c, 0xff, 0xff, 0xff) }, + { }, +}; + +static u32 mt7663u_rr(struct mt76_dev *dev, u32 addr) +{ + u32 ret; + + mutex_lock(&dev->usb.usb_ctrl_mtx); + ret = ___mt76u_rr(dev, MT_VEND_READ_EXT, + USB_DIR_IN | USB_TYPE_VENDOR, addr); + mutex_unlock(&dev->usb.usb_ctrl_mtx); + + return ret; +} + +static void mt7663u_wr(struct mt76_dev *dev, u32 addr, u32 val) +{ + mutex_lock(&dev->usb.usb_ctrl_mtx); + ___mt76u_wr(dev, MT_VEND_WRITE_EXT, + USB_DIR_OUT | USB_TYPE_VENDOR, addr, val); + mutex_unlock(&dev->usb.usb_ctrl_mtx); +} + +static u32 mt7663u_rmw(struct mt76_dev *dev, u32 addr, + u32 mask, u32 val) +{ + mutex_lock(&dev->usb.usb_ctrl_mtx); + val |= ___mt76u_rr(dev, MT_VEND_READ_EXT, + USB_DIR_IN | USB_TYPE_VENDOR, addr) & ~mask; + ___mt76u_wr(dev, MT_VEND_WRITE_EXT, + USB_DIR_OUT | USB_TYPE_VENDOR, addr, val); + mutex_unlock(&dev->usb.usb_ctrl_mtx); + + return val; +} + +static void mt7663u_copy(struct mt76_dev *dev, u32 offset, + const void *data, int len) +{ + struct mt76_usb *usb = &dev->usb; + int ret, i = 0, batch_len; + const u8 *val = data; + + len = round_up(len, 4); + + mutex_lock(&usb->usb_ctrl_mtx); + while (i < len) { + batch_len = min_t(int, usb->data_len, len - i); + memcpy(usb->data, val + i, batch_len); + ret = __mt76u_vendor_request(dev, MT_VEND_WRITE_EXT, + USB_DIR_OUT | USB_TYPE_VENDOR, + (offset + i) >> 16, offset + i, + usb->data, batch_len); + if (ret < 0) + break; + + i += batch_len; + } + mutex_unlock(&usb->usb_ctrl_mtx); +} + +static void mt7663u_stop(struct ieee80211_hw *hw) +{ + struct mt7615_phy *phy = mt7615_hw_phy(hw); + struct mt7615_dev *dev = hw->priv; + + clear_bit(MT76_STATE_RUNNING, &dev->mphy.state); + del_timer_sync(&phy->roc_timer); + cancel_work_sync(&phy->roc_work); + cancel_delayed_work_sync(&phy->scan_work); + cancel_delayed_work_sync(&phy->mt76->mac_work); + mt76u_stop_tx(&dev->mt76); +} + +static void mt7663u_cleanup(struct mt7615_dev *dev) +{ + clear_bit(MT76_STATE_INITIALIZED, &dev->mphy.state); + mt76u_queues_deinit(&dev->mt76); +} + +static void mt7663u_init_work(struct work_struct *work) +{ + struct mt7615_dev *dev; + + dev = container_of(work, struct mt7615_dev, mcu_work); + if (mt7663u_mcu_init(dev)) + return; + + mt7615_init_work(dev); +} + +static int mt7663u_probe(struct usb_interface *usb_intf, + const struct usb_device_id *id) +{ + static const struct mt76_driver_ops drv_ops = { + .txwi_size = MT_USB_TXD_SIZE, + .drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ, + .tx_prepare_skb = mt7663_usb_sdio_tx_prepare_skb, + .tx_complete_skb = mt7663_usb_sdio_tx_complete_skb, + .tx_status_data = mt7663_usb_sdio_tx_status_data, + .rx_skb = mt7615_queue_rx_skb, + .rx_check = mt7615_rx_check, + .sta_ps = mt7615_sta_ps, + .sta_add = mt7615_mac_sta_add, + .sta_remove = mt7615_mac_sta_remove, + .update_survey = mt7615_update_channel, + }; + static struct mt76_bus_ops bus_ops = { + .rr = mt7663u_rr, + .wr = mt7663u_wr, + .rmw = mt7663u_rmw, + .read_copy = mt76u_read_copy, + .write_copy = mt7663u_copy, + .type = MT76_BUS_USB, + }; + struct usb_device *udev = interface_to_usbdev(usb_intf); + struct ieee80211_ops *ops; + struct mt7615_dev *dev; + struct mt76_dev *mdev; + int ret; + + ops = devm_kmemdup(&usb_intf->dev, &mt7615_ops, sizeof(mt7615_ops), + GFP_KERNEL); + if (!ops) + return -ENOMEM; + + ops->stop = mt7663u_stop; + + mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), ops, &drv_ops); + if (!mdev) + return -ENOMEM; + + dev = container_of(mdev, struct mt7615_dev, mt76); + udev = usb_get_dev(udev); + usb_reset_device(udev); + + usb_set_intfdata(usb_intf, dev); + + INIT_WORK(&dev->mcu_work, mt7663u_init_work); + dev->reg_map = mt7663_usb_sdio_reg_map; + dev->ops = ops; + ret = __mt76u_init(mdev, usb_intf, &bus_ops); + if (ret < 0) + goto error; + + mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) | + (mt76_rr(dev, MT_HW_REV) & 0xff); + dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev); + + if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON, + FW_STATE_PWR_ON << 1, 500)) { + ret = mt7663u_mcu_power_on(dev); + if (ret) + goto error; + } else { + set_bit(MT76_STATE_POWER_OFF, &dev->mphy.state); + } + + ret = mt76u_alloc_mcu_queue(&dev->mt76); + if (ret) + goto error; + + ret = mt76u_alloc_queues(&dev->mt76); + if (ret) + goto error; + + ret = mt7663_usb_sdio_register_device(dev); + if (ret) + goto error; + + return 0; + +error: + mt76u_queues_deinit(&dev->mt76); + usb_set_intfdata(usb_intf, NULL); + usb_put_dev(interface_to_usbdev(usb_intf)); + + mt76_free_device(&dev->mt76); + + return ret; +} + +static void mt7663u_disconnect(struct usb_interface *usb_intf) +{ + struct mt7615_dev *dev = usb_get_intfdata(usb_intf); + + if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state)) + return; + + ieee80211_unregister_hw(dev->mt76.hw); + mt7663u_cleanup(dev); + + usb_set_intfdata(usb_intf, NULL); + usb_put_dev(interface_to_usbdev(usb_intf)); + + mt76_free_device(&dev->mt76); +} + +#ifdef CONFIG_PM +static int mt7663u_suspend(struct usb_interface *intf, pm_message_t state) +{ + struct mt7615_dev *dev = usb_get_intfdata(intf); + + if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) && + mt7615_firmware_offload(dev)) { + int err; + + err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true); + if (err < 0) + return err; + } + + mt76u_stop_rx(&dev->mt76); + mt76u_stop_tx(&dev->mt76); + + return 0; +} + +static int mt7663u_resume(struct usb_interface *intf) +{ + struct mt7615_dev *dev = usb_get_intfdata(intf); + int err; + + err = mt76u_vendor_request(&dev->mt76, MT_VEND_FEATURE_SET, + USB_DIR_OUT | USB_TYPE_VENDOR, + 0x5, 0x0, NULL, 0); + if (err) + return err; + + err = mt76u_resume_rx(&dev->mt76); + if (err < 0) + return err; + + if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) && + mt7615_firmware_offload(dev)) + err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false); + + return err; +} +#endif /* CONFIG_PM */ + +MODULE_DEVICE_TABLE(usb, mt7615_device_table); +MODULE_FIRMWARE(MT7663_OFFLOAD_FIRMWARE_N9); +MODULE_FIRMWARE(MT7663_OFFLOAD_ROM_PATCH); +MODULE_FIRMWARE(MT7663_FIRMWARE_N9); +MODULE_FIRMWARE(MT7663_ROM_PATCH); + +static struct usb_driver mt7663u_driver = { + .name = KBUILD_MODNAME, + .id_table = mt7615_device_table, + .probe = mt7663u_probe, + .disconnect = mt7663u_disconnect, +#ifdef CONFIG_PM + .suspend = mt7663u_suspend, + .resume = mt7663u_resume, + .reset_resume = mt7663u_resume, +#endif /* CONFIG_PM */ + .soft_unbind = 1, + .disable_hub_initiated_lpm = 1, +}; +module_usb_driver(mt7663u_driver); + +MODULE_AUTHOR("Sean Wang "); +MODULE_AUTHOR("Lorenzo Bianconi "); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c new file mode 100644 index 000000000..98bf2f6ae --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2019 MediaTek Inc. + * + * Author: Felix Fietkau + * Lorenzo Bianconi + * Sean Wang + */ +#include +#include + +#include "mt7615.h" +#include "mac.h" +#include "mcu.h" +#include "regs.h" + +static int +mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, + int cmd, int *seq) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + int ret, ep, len, pad; + + mt7615_mcu_fill_msg(dev, skb, cmd, seq); + if (cmd != MCU_CMD(FW_SCATTER)) + ep = MT_EP_OUT_INBAND_CMD; + else + ep = MT_EP_OUT_AC_BE; + + len = skb->len; + put_unaligned_le32(len, skb_push(skb, sizeof(len))); + pad = round_up(skb->len, 4) + 4 - skb->len; + ret = mt76_skb_adjust_pad(skb, pad); + if (ret < 0) + goto out; + + ret = mt76u_bulk_msg(&dev->mt76, skb->data, skb->len, NULL, + 1000, ep); + +out: + dev_kfree_skb(skb); + + return ret; +} + +int mt7663u_mcu_power_on(struct mt7615_dev *dev) +{ + int ret; + + ret = mt76u_vendor_request(&dev->mt76, MT_VEND_POWER_ON, + USB_DIR_OUT | USB_TYPE_VENDOR, + 0x0, 0x1, NULL, 0); + if (ret) + return ret; + + if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, + MT_TOP_MISC2_FW_PWR_ON, + FW_STATE_PWR_ON << 1, 500)) { + dev_err(dev->mt76.dev, "Timeout for power on\n"); + ret = -EIO; + } + + return 0; +} + +int mt7663u_mcu_init(struct mt7615_dev *dev) +{ + static const struct mt76_mcu_ops mt7663u_mcu_ops = { + .headroom = MT_USB_HDR_SIZE + sizeof(struct mt7615_mcu_txd), + .tailroom = MT_USB_TAIL_SIZE, + .mcu_skb_send_msg = mt7663u_mcu_send_message, + .mcu_parse_response = mt7615_mcu_parse_response, + .mcu_restart = mt7615_mcu_restart, + }; + int ret; + + dev->mt76.mcu_ops = &mt7663u_mcu_ops, + + mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN); + if (test_and_clear_bit(MT76_STATE_POWER_OFF, &dev->mphy.state)) { + ret = mt7615_mcu_restart(&dev->mt76); + if (ret) + return ret; + + if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, + MT_TOP_MISC2_FW_PWR_ON, 0, 500)) + return -EIO; + + ret = mt7663u_mcu_power_on(dev); + if (ret) + return ret; + } + + ret = __mt7663_load_firmware(dev); + if (ret) + return ret; + + mt76_clear(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN); + set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c new file mode 100644 index 000000000..0052d103e --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2020 MediaTek Inc. + * + * Author: Lorenzo Bianconi + * Sean Wang + */ + +#include +#include +#include + +#include "mt7615.h" +#include "mac.h" +#include "mcu.h" +#include "regs.h" + +const u32 mt7663_usb_sdio_reg_map[] = { + [MT_TOP_CFG_BASE] = 0x80020000, + [MT_HW_BASE] = 0x80000000, + [MT_DMA_SHDL_BASE] = 0x5000a000, + [MT_HIF_BASE] = 0x50000000, + [MT_CSR_BASE] = 0x40000000, + [MT_EFUSE_ADDR_BASE] = 0x78011000, + [MT_TOP_MISC_BASE] = 0x81020000, + [MT_PLE_BASE] = 0x82060000, + [MT_PSE_BASE] = 0x82068000, + [MT_PP_BASE] = 0x8206c000, + [MT_WTBL_BASE_ADDR] = 0x820e0000, + [MT_CFG_BASE] = 0x820f0000, + [MT_AGG_BASE] = 0x820f2000, + [MT_ARB_BASE] = 0x820f3000, + [MT_TMAC_BASE] = 0x820f4000, + [MT_RMAC_BASE] = 0x820f5000, + [MT_DMA_BASE] = 0x820f7000, + [MT_PF_BASE] = 0x820f8000, + [MT_WTBL_BASE_ON] = 0x820f9000, + [MT_WTBL_BASE_OFF] = 0x820f9800, + [MT_LPON_BASE] = 0x820fb000, + [MT_MIB_BASE] = 0x820fd000, +}; +EXPORT_SYMBOL_GPL(mt7663_usb_sdio_reg_map); + +static void +mt7663_usb_sdio_write_txwi(struct mt7615_dev *dev, struct mt76_wcid *wcid, + enum mt76_txq_id qid, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, int pid, + struct sk_buff *skb) +{ + __le32 *txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE); + + memset(txwi, 0, MT_USB_TXD_SIZE); + mt7615_mac_write_txwi(dev, txwi, skb, wcid, sta, pid, key, qid, false); + skb_push(skb, MT_USB_TXD_SIZE); +} + +static int mt7663_usb_sdio_set_rates(struct mt7615_dev *dev, + struct mt7615_wtbl_rate_desc *wrd) +{ + struct mt7615_rate_desc *rate = &wrd->rate; + struct mt7615_sta *sta = wrd->sta; + u32 w5, w27, addr, val; + u16 idx; + + lockdep_assert_held(&dev->mt76.mutex); + + if (!sta) + return -EINVAL; + + if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) + return -ETIMEDOUT; + + addr = mt7615_mac_wtbl_addr(dev, sta->wcid.idx); + + w27 = mt76_rr(dev, addr + 27 * 4); + w27 &= ~MT_WTBL_W27_CC_BW_SEL; + w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, rate->bw); + + w5 = mt76_rr(dev, addr + 5 * 4); + w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE | + MT_WTBL_W5_MPDU_OK_COUNT | + MT_WTBL_W5_MPDU_FAIL_COUNT | + MT_WTBL_W5_RATE_IDX); + w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, rate->bw) | + FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE, + rate->bw_idx ? rate->bw_idx - 1 : 7); + + mt76_wr(dev, MT_WTBL_RIUCR0, w5); + + mt76_wr(dev, MT_WTBL_RIUCR1, + FIELD_PREP(MT_WTBL_RIUCR1_RATE0, rate->probe_val) | + FIELD_PREP(MT_WTBL_RIUCR1_RATE1, rate->val[0]) | + FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, rate->val[1])); + + mt76_wr(dev, MT_WTBL_RIUCR2, + FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, rate->val[1] >> 8) | + FIELD_PREP(MT_WTBL_RIUCR2_RATE3, rate->val[1]) | + FIELD_PREP(MT_WTBL_RIUCR2_RATE4, rate->val[2]) | + FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, rate->val[2])); + + mt76_wr(dev, MT_WTBL_RIUCR3, + FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, rate->val[2] >> 4) | + FIELD_PREP(MT_WTBL_RIUCR3_RATE6, rate->val[3]) | + FIELD_PREP(MT_WTBL_RIUCR3_RATE7, rate->val[3])); + + mt76_wr(dev, MT_WTBL_UPDATE, + FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, sta->wcid.idx) | + MT_WTBL_UPDATE_RATE_UPDATE | + MT_WTBL_UPDATE_TX_COUNT_CLEAR); + + mt76_wr(dev, addr + 27 * 4, w27); + + sta->rate_probe = sta->rateset[rate->rateset].probe_rate.idx != -1; + + idx = sta->vif->mt76.omac_idx; + idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx; + addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx); + + mt76_rmw(dev, addr, MT_LPON_TCR_MODE, MT_LPON_TCR_READ); /* TSF read */ + val = mt76_rr(dev, MT_LPON_UTTR0); + sta->rate_set_tsf = (val & ~BIT(0)) | rate->rateset; + + if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET)) + mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); + + sta->rate_count = 2 * MT7615_RATE_RETRY * sta->n_rates; + sta->wcid.tx_info |= MT_WCID_TX_INFO_SET; + + return 0; +} + +static void mt7663_usb_sdio_rate_work(struct work_struct *work) +{ + struct mt7615_wtbl_rate_desc *wrd, *wrd_next; + struct list_head wrd_list; + struct mt7615_dev *dev; + + dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, + rate_work); + + INIT_LIST_HEAD(&wrd_list); + spin_lock_bh(&dev->mt76.lock); + list_splice_init(&dev->wrd_head, &wrd_list); + spin_unlock_bh(&dev->mt76.lock); + + list_for_each_entry_safe(wrd, wrd_next, &wrd_list, node) { + list_del(&wrd->node); + + mt7615_mutex_acquire(dev); + mt7663_usb_sdio_set_rates(dev, wrd); + mt7615_mutex_release(dev); + + kfree(wrd); + } +} + +bool mt7663_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + + mt7615_mutex_acquire(dev); + mt7615_mac_sta_poll(dev); + mt7615_mutex_release(dev); + + return false; +} +EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_status_data); + +void mt7663_usb_sdio_tx_complete_skb(struct mt76_dev *mdev, + struct mt76_queue_entry *e) +{ + unsigned int headroom = MT_USB_TXD_SIZE; + + if (mt76_is_usb(mdev)) + headroom += MT_USB_HDR_SIZE; + skb_pull(e->skb, headroom); + + mt76_tx_complete_skb(mdev, e->wcid, e->skb); +} +EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_complete_skb); + +int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, + enum mt76_txq_id qid, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, + struct mt76_tx_info *tx_info) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + struct sk_buff *skb = tx_info->skb; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_key_conf *key = info->control.hw_key; + struct mt7615_sta *msta; + int pad, err, pktid; + + msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL; + if (!wcid) + wcid = &dev->mt76.global_wcid; + + if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && + msta && !msta->rate_probe) { + /* request to configure sampling rate */ + spin_lock_bh(&dev->mt76.lock); + mt7615_mac_set_rates(&dev->phy, msta, &info->control.rates[0], + msta->rates); + spin_unlock_bh(&dev->mt76.lock); + } + + pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb); + mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, key, pktid, skb); + if (mt76_is_usb(mdev)) { + u32 len = skb->len; + + put_unaligned_le32(len, skb_push(skb, sizeof(len))); + pad = round_up(skb->len, 4) + 4 - skb->len; + } else { + pad = round_up(skb->len, 4) - skb->len; + } + + err = mt76_skb_adjust_pad(skb, pad); + if (err) + /* Release pktid in case of error. */ + idr_remove(&wcid->pktid, pktid); + + return err; +} +EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_prepare_skb); + +static int mt7663u_dma_sched_init(struct mt7615_dev *dev) +{ + int i; + + mt76_rmw(dev, MT_DMA_SHDL(MT_DMASHDL_PKT_MAX_SIZE), + MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE, + FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) | + FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 8)); + + /* disable refill group 5 - group 15 and raise group 2 + * and 3 as high priority. + */ + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_REFILL), 0xffe00006); + mt76_clear(dev, MT_DMA_SHDL(MT_DMASHDL_PAGE), BIT(16)); + + for (i = 0; i < 5; i++) + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(i)), + FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x3) | + FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x1ff)); + + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(0)), 0x42104210); + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(1)), 0x42104210); + + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(2)), 0x4444); + + /* group pririority from high to low: + * 15 (cmd groups) > 4 > 3 > 2 > 1 > 0. + */ + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET0), 0x6501234f); + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET1), 0xedcba987); + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_OPTIONAL), 0x7004801c); + + mt76_wr(dev, MT_UDMA_WLCFG_1, + FIELD_PREP(MT_WL_TX_TMOUT_LMT, 80000) | + FIELD_PREP(MT_WL_RX_AGG_PKT_LMT, 1)); + + /* setup UDMA Rx Flush */ + mt76_clear(dev, MT_UDMA_WLCFG_0, MT_WL_RX_FLUSH); + /* hif reset */ + mt76_set(dev, MT_HIF_RST, MT_HIF_LOGIC_RST_N); + + mt76_set(dev, MT_UDMA_WLCFG_0, + MT_WL_RX_AGG_EN | MT_WL_RX_EN | MT_WL_TX_EN | + MT_WL_RX_MPSZ_PAD0 | MT_TICK_1US_EN | + MT_WL_TX_TMOUT_FUNC_EN); + mt76_rmw(dev, MT_UDMA_WLCFG_0, MT_WL_RX_AGG_LMT | MT_WL_RX_AGG_TO, + FIELD_PREP(MT_WL_RX_AGG_LMT, 32) | + FIELD_PREP(MT_WL_RX_AGG_TO, 100)); + + return 0; +} + +static int mt7663_usb_sdio_init_hardware(struct mt7615_dev *dev) +{ + int ret, idx; + + ret = mt7615_eeprom_init(dev, MT_EFUSE_BASE); + if (ret < 0) + return ret; + + if (mt76_is_usb(&dev->mt76)) { + ret = mt7663u_dma_sched_init(dev); + if (ret) + return ret; + } + + set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state); + + /* Beacon and mgmt frames should occupy wcid 0 */ + idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1); + if (idx) + return -ENOSPC; + + dev->mt76.global_wcid.idx = idx; + dev->mt76.global_wcid.hw_key_idx = -1; + rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid); + + return 0; +} + +int mt7663_usb_sdio_register_device(struct mt7615_dev *dev) +{ + struct ieee80211_hw *hw = mt76_hw(dev); + int err; + + INIT_WORK(&dev->rate_work, mt7663_usb_sdio_rate_work); + INIT_LIST_HEAD(&dev->wrd_head); + mt7615_init_device(dev); + + err = mt7663_usb_sdio_init_hardware(dev); + if (err) + return err; + + hw->extra_tx_headroom += MT_USB_TXD_SIZE; + if (mt76_is_usb(&dev->mt76)) { + hw->extra_tx_headroom += MT_USB_HDR_SIZE; + /* check hw sg support in order to enable AMSDU */ + if (dev->mt76.usb.sg_en) + hw->max_tx_fragments = MT_HW_TXP_MAX_BUF_NUM; + else + hw->max_tx_fragments = 1; + } + + err = mt76_register_device(&dev->mt76, true, mt76_rates, + ARRAY_SIZE(mt76_rates)); + if (err < 0) + return err; + + if (!dev->mt76.usb.sg_en) { + struct ieee80211_sta_vht_cap *vht_cap; + + /* decrease max A-MSDU size if SG is not supported */ + vht_cap = &dev->mphy.sband_5g.sband.vht_cap; + vht_cap->cap &= ~IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; + } + + ieee80211_queue_work(hw, &dev->mcu_work); + mt7615_init_txpower(dev, &dev->mphy.sband_2g.sband); + mt7615_init_txpower(dev, &dev->mphy.sband_5g.sband); + + return mt7615_init_debugfs(dev); +} +EXPORT_SYMBOL_GPL(mt7663_usb_sdio_register_device); + +MODULE_AUTHOR("Lorenzo Bianconi "); +MODULE_AUTHOR("Sean Wang "); +MODULE_LICENSE("Dual BSD/GPL"); -- cgit v1.2.3