summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/net/wireless/mediatek/mt76/mt7996/dma.c
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/wireless/mediatek/mt76/mt7996/dma.c')
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/dma.c439
1 files changed, 439 insertions, 0 deletions
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
new file mode 100644
index 0000000000..586e247a1e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
@@ -0,0 +1,439 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#include "mt7996.h"
+#include "../dma.h"
+#include "mac.h"
+
+static int mt7996_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct mt7996_dev *dev;
+
+ dev = container_of(napi, struct mt7996_dev, mt76.tx_napi);
+
+ mt76_connac_tx_cleanup(&dev->mt76);
+ if (napi_complete_done(napi, 0))
+ mt7996_irq_enable(dev, MT_INT_TX_DONE_MCU);
+
+ return 0;
+}
+
+static void mt7996_dma_config(struct mt7996_dev *dev)
+{
+#define Q_CONFIG(q, wfdma, int, id) do { \
+ if (wfdma) \
+ dev->q_wfdma_mask |= (1 << (q)); \
+ dev->q_int_mask[(q)] = int; \
+ dev->q_id[(q)] = id; \
+} while (0)
+
+#define MCUQ_CONFIG(q, wfdma, int, id) Q_CONFIG(q, (wfdma), (int), (id))
+#define RXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__RXQ(q), (wfdma), (int), (id))
+#define TXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__TXQ(q), (wfdma), (int), (id))
+
+ /* rx queue */
+ RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, MT7996_RXQ_MCU_WM);
+ RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, MT7996_RXQ_MCU_WA);
+
+ /* band0/band1 */
+ RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, MT7996_RXQ_BAND0);
+ RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN, MT7996_RXQ_MCU_WA_MAIN);
+
+ /* band2 */
+ RXQ_CONFIG(MT_RXQ_BAND2, WFDMA0, MT_INT_RX_DONE_BAND2, MT7996_RXQ_BAND2);
+ RXQ_CONFIG(MT_RXQ_BAND2_WA, WFDMA0, MT_INT_RX_DONE_WA_TRI, MT7996_RXQ_MCU_WA_TRI);
+
+ /* data tx queue */
+ TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0);
+ TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1);
+ TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND2, MT7996_TXQ_BAND2);
+
+ /* mcu tx queue */
+ MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7996_TXQ_MCU_WM);
+ MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA, MT7996_TXQ_MCU_WA);
+ MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, MT7996_TXQ_FWDL);
+}
+
+static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs)
+{
+#define PREFETCH(_base, _depth) ((_base) << 16 | (_depth))
+ /* prefetch SRAM wrapping boundary for tx/rx ring. */
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(0x0, 0x2));
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(0x20, 0x2));
+ mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x40, 0x4));
+ mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0x80, 0x4));
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0xc0, 0x2));
+ mt76_wr(dev, MT_TXQ_EXT_CTRL(2) + ofs, PREFETCH(0xe0, 0x4));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU) + ofs, PREFETCH(0x120, 0x2));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU_WA) + ofs, PREFETCH(0x140, 0x2));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN_WA) + ofs, PREFETCH(0x160, 0x2));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND2_WA) + ofs, PREFETCH(0x180, 0x2));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x1a0, 0x10));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND2) + ofs, PREFETCH(0x2a0, 0x10));
+
+ mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + ofs, WF_WFDMA0_GLO_CFG_EXT1_CALC_MODE);
+}
+
+void mt7996_dma_prefetch(struct mt7996_dev *dev)
+{
+ __mt7996_dma_prefetch(dev, 0);
+ if (dev->hif2)
+ __mt7996_dma_prefetch(dev, MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0));
+}
+
+static void mt7996_dma_disable(struct mt7996_dev *dev, bool reset)
+{
+ u32 hif1_ofs = 0;
+
+ if (dev->hif2)
+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
+
+ if (reset) {
+ mt76_clear(dev, MT_WFDMA0_RST,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA0_RST,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ if (dev->hif2) {
+ mt76_clear(dev, MT_WFDMA0_RST + hif1_ofs,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA0_RST + hif1_ofs,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+ }
+ }
+
+ /* disable */
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (dev->hif2) {
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+ }
+}
+
+void mt7996_dma_start(struct mt7996_dev *dev, bool reset)
+{
+ u32 hif1_ofs = 0;
+ u32 irq_mask;
+
+ if (dev->hif2)
+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
+
+ /* enable WFDMA Tx/Rx */
+ if (!reset) {
+ mt76_set(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (dev->hif2)
+ mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+ }
+
+ /* enable interrupts for TX/RX rings */
+ irq_mask = MT_INT_MCU_CMD;
+ if (reset)
+ goto done;
+
+ irq_mask = MT_INT_RX_DONE_MCU | MT_INT_TX_DONE_MCU;
+
+ if (!dev->mphy.band_idx)
+ irq_mask |= MT_INT_BAND0_RX_DONE;
+
+ if (dev->dbdc_support)
+ irq_mask |= MT_INT_BAND1_RX_DONE;
+
+ if (dev->tbtc_support)
+ irq_mask |= MT_INT_BAND2_RX_DONE;
+
+done:
+ mt7996_irq_enable(dev, irq_mask);
+ mt7996_irq_disable(dev, 0);
+}
+
+static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
+{
+ u32 hif1_ofs = 0;
+
+ if (dev->hif2)
+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
+
+ /* reset dma idx */
+ mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
+ if (dev->hif2)
+ mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0);
+
+ /* configure delay interrupt off */
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1, 0);
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2, 0);
+
+ if (dev->hif2) {
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0);
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1 + hif1_ofs, 0);
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2 + hif1_ofs, 0);
+ }
+
+ /* configure perfetch settings */
+ mt7996_dma_prefetch(dev);
+
+ /* hif wait WFDMA idle */
+ mt76_set(dev, MT_WFDMA0_BUSY_ENA,
+ MT_WFDMA0_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA0_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA0_BUSY_ENA_RX_FIFO);
+
+ if (dev->hif2)
+ mt76_set(dev, MT_WFDMA0_BUSY_ENA + hif1_ofs,
+ MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO);
+
+ mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC,
+ MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000);
+
+ /* GLO_CFG_EXT0 */
+ mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT0,
+ WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD |
+ WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE);
+
+ /* GLO_CFG_EXT1 */
+ mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1,
+ WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE);
+
+ if (dev->hif2) {
+ /* GLO_CFG_EXT0 */
+ mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs,
+ WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD |
+ WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE);
+
+ /* GLO_CFG_EXT1 */
+ mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + hif1_ofs,
+ WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE);
+
+ mt76_set(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_PDMA_BAND);
+ }
+
+ if (dev->hif2) {
+ /* fix hardware limitation, pcie1's rx ring3 is not available
+ * so, redirect pcie0 rx ring3 interrupt to pcie1
+ */
+ mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL,
+ MT_WFDMA0_RX_INT_SEL_RING3);
+
+ /* TODO: redirect rx ring6 interrupt to pcie0 for wed function */
+ }
+
+ mt7996_dma_start(dev, reset);
+}
+
+int mt7996_dma_init(struct mt7996_dev *dev)
+{
+ u32 hif1_ofs = 0;
+ int ret;
+
+ mt7996_dma_config(dev);
+
+ mt76_dma_attach(&dev->mt76);
+
+ if (dev->hif2)
+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
+
+ mt7996_dma_disable(dev, true);
+
+ /* init tx queue */
+ ret = mt76_connac_init_tx_queues(dev->phy.mt76,
+ MT_TXQ_ID(dev->mphy.band_idx),
+ MT7996_TX_RING_SIZE,
+ MT_TXQ_RING_BASE(0), 0);
+ if (ret)
+ return ret;
+
+ /* command to WM */
+ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM,
+ MT_MCUQ_ID(MT_MCUQ_WM),
+ MT7996_TX_MCU_RING_SIZE,
+ MT_MCUQ_RING_BASE(MT_MCUQ_WM));
+ if (ret)
+ return ret;
+
+ /* command to WA */
+ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA,
+ MT_MCUQ_ID(MT_MCUQ_WA),
+ MT7996_TX_MCU_RING_SIZE,
+ MT_MCUQ_RING_BASE(MT_MCUQ_WA));
+ if (ret)
+ return ret;
+
+ /* firmware download */
+ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL,
+ MT_MCUQ_ID(MT_MCUQ_FWDL),
+ MT7996_TX_FWDL_RING_SIZE,
+ MT_MCUQ_RING_BASE(MT_MCUQ_FWDL));
+ if (ret)
+ return ret;
+
+ /* event from WM */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
+ MT_RXQ_ID(MT_RXQ_MCU),
+ MT7996_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MCU));
+ if (ret)
+ return ret;
+
+ /* event from WA */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
+ MT_RXQ_ID(MT_RXQ_MCU_WA),
+ MT7996_RX_MCU_RING_SIZE_WA,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MCU_WA));
+ if (ret)
+ return ret;
+
+ /* rx data queue for band0 and band1 */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
+ MT_RXQ_ID(MT_RXQ_MAIN),
+ MT7996_RX_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MAIN));
+ if (ret)
+ return ret;
+
+ /* tx free notify event from WA for band0 */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
+ MT_RXQ_ID(MT_RXQ_MAIN_WA),
+ MT7996_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA));
+ if (ret)
+ return ret;
+
+ if (dev->tbtc_support || dev->mphy.band_idx == MT_BAND2) {
+ /* rx data queue for band2 */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2],
+ MT_RXQ_ID(MT_RXQ_BAND2),
+ MT7996_RX_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_BAND2) + hif1_ofs);
+ if (ret)
+ return ret;
+
+ /* tx free notify event from WA for band2
+ * use pcie0's rx ring3, but, redirect pcie0 rx ring3 interrupt to pcie1
+ */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2_WA],
+ MT_RXQ_ID(MT_RXQ_BAND2_WA),
+ MT7996_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_BAND2_WA));
+ if (ret)
+ return ret;
+ }
+
+ ret = mt76_init_queues(dev, mt76_dma_rx_poll);
+ if (ret < 0)
+ return ret;
+
+ netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
+ mt7996_poll_tx);
+ napi_enable(&dev->mt76.tx_napi);
+
+ mt7996_dma_enable(dev, false);
+
+ return 0;
+}
+
+void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
+{
+ struct mt76_phy *phy2 = dev->mt76.phys[MT_BAND1];
+ struct mt76_phy *phy3 = dev->mt76.phys[MT_BAND2];
+ u32 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
+ int i;
+
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+ if (dev->hif2)
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+ usleep_range(1000, 2000);
+
+ for (i = 0; i < __MT_TXQ_MAX; i++) {
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
+ if (phy2)
+ mt76_queue_tx_cleanup(dev, phy2->q_tx[i], true);
+ if (phy3)
+ mt76_queue_tx_cleanup(dev, phy3->q_tx[i], true);
+ }
+
+ for (i = 0; i < __MT_MCUQ_MAX; i++)
+ mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
+
+ mt76_for_each_q_rx(&dev->mt76, i)
+ mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]);
+
+ mt76_tx_status_check(&dev->mt76, true);
+
+ /* reset wfsys */
+ if (force)
+ mt7996_wfsys_reset(dev);
+
+ mt7996_dma_disable(dev, force);
+
+ /* reset hw queues */
+ for (i = 0; i < __MT_TXQ_MAX; i++) {
+ mt76_queue_reset(dev, dev->mphy.q_tx[i]);
+ if (phy2)
+ mt76_queue_reset(dev, phy2->q_tx[i]);
+ if (phy3)
+ mt76_queue_reset(dev, phy3->q_tx[i]);
+ }
+
+ for (i = 0; i < __MT_MCUQ_MAX; i++)
+ mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
+
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
+ }
+
+ mt76_tx_status_check(&dev->mt76, true);
+
+ mt76_for_each_q_rx(&dev->mt76, i)
+ mt76_queue_rx_reset(dev, i);
+
+ mt7996_dma_enable(dev, !force);
+}
+
+void mt7996_dma_cleanup(struct mt7996_dev *dev)
+{
+ mt7996_dma_disable(dev, true);
+
+ mt76_dma_cleanup(&dev->mt76);
+}