diff options
Diffstat (limited to 'drivers/net/ethernet/mediatek/mtk_wed.c')
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_wed.c | 1432 |
1 files changed, 1174 insertions, 258 deletions
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c index 94376aa2b3..9a6744c0d4 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed.c +++ b/drivers/net/ethernet/mediatek/mtk_wed.c @@ -17,17 +17,21 @@ #include <net/flow_offload.h> #include <net/pkt_cls.h> #include "mtk_eth_soc.h" -#include "mtk_wed_regs.h" #include "mtk_wed.h" #include "mtk_ppe.h" #include "mtk_wed_wo.h" #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000) -#define MTK_WED_PKT_SIZE 1900 +#define MTK_WED_PKT_SIZE 1920 #define MTK_WED_BUF_SIZE 2048 +#define MTK_WED_PAGE_BUF_SIZE 128 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048) +#define MTK_WED_RX_BUF_PER_PAGE (PAGE_SIZE / MTK_WED_PAGE_BUF_SIZE) #define MTK_WED_RX_RING_SIZE 1536 +#define MTK_WED_RX_PG_BM_CNT 8192 +#define MTK_WED_AMSDU_BUF_SIZE (PAGE_SIZE << 4) +#define MTK_WED_AMSDU_NPAGES 32 #define MTK_WED_TX_RING_SIZE 2048 #define MTK_WED_WDMA_RING_SIZE 1024 @@ -41,7 +45,10 @@ #define MTK_WED_RRO_QUE_CNT 8192 #define MTK_WED_MIOD_ENTRY_CNT 128 -static struct mtk_wed_hw *hw_list[2]; +#define MTK_WED_TX_BM_DMA_SIZE 65536 +#define MTK_WED_TX_BM_PKT_CNT 32768 + +static struct mtk_wed_hw *hw_list[3]; static DEFINE_MUTEX(hw_lock); struct mtk_wed_flow_block_priv { @@ -49,6 +56,39 @@ struct mtk_wed_flow_block_priv { struct net_device *dev; }; +static const struct mtk_wed_soc_data mt7622_data = { + .regmap = { + .tx_bm_tkid = 0x088, + .wpdma_rx_ring0 = 0x770, + .reset_idx_tx_mask = GENMASK(3, 0), + .reset_idx_rx_mask = GENMASK(17, 16), + }, + .tx_ring_desc_size = sizeof(struct mtk_wdma_desc), + .wdma_desc_size = sizeof(struct mtk_wdma_desc), +}; + +static const struct mtk_wed_soc_data mt7986_data = { + .regmap = { + .tx_bm_tkid = 0x0c8, + .wpdma_rx_ring0 = 0x770, + .reset_idx_tx_mask = GENMASK(1, 0), + .reset_idx_rx_mask = GENMASK(7, 6), + }, + .tx_ring_desc_size = sizeof(struct mtk_wdma_desc), + .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc), +}; + +static const struct mtk_wed_soc_data mt7988_data = { + .regmap = { + .tx_bm_tkid = 0x0c8, + .wpdma_rx_ring0 = 0x7d0, + .reset_idx_tx_mask = GENMASK(1, 0), + .reset_idx_rx_mask = GENMASK(7, 6), + }, + .tx_ring_desc_size = sizeof(struct mtk_wed_bm_desc), + .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc), +}; + static void wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) { @@ -109,6 +149,90 @@ mtk_wdma_read_reset(struct mtk_wed_device *dev) return wdma_r32(dev, MTK_WDMA_GLO_CFG); } +static void +mtk_wdma_v3_rx_reset(struct mtk_wed_device *dev) +{ + u32 status; + + if (!mtk_wed_is_v3_or_greater(dev->hw)) + return; + + wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN); + wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN); + + if (read_poll_timeout(wdma_r32, status, + !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY), + 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG)) + dev_err(dev->hw->dev, "rx reset failed\n"); + + if (read_poll_timeout(wdma_r32, status, + !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY), + 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG)) + dev_err(dev->hw->dev, "rx reset failed\n"); + + wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN); + wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN); + + if (read_poll_timeout(wdma_r32, status, + !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY), + 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG)) + dev_err(dev->hw->dev, "rx reset failed\n"); + + if (read_poll_timeout(wdma_r32, status, + !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY), + 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG)) + dev_err(dev->hw->dev, "rx reset failed\n"); + + /* prefetch FIFO */ + wdma_w32(dev, MTK_WDMA_PREF_RX_FIFO_CFG, + MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR | + MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR); + wdma_clr(dev, MTK_WDMA_PREF_RX_FIFO_CFG, + MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR | + MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR); + + /* core FIFO */ + wdma_w32(dev, MTK_WDMA_XDMA_RX_FIFO_CFG, + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR); + wdma_clr(dev, MTK_WDMA_XDMA_RX_FIFO_CFG, + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR); + + /* writeback FIFO */ + wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0), + MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); + wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1), + MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); + + wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0), + MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); + wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1), + MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); + + /* prefetch ring status */ + wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG, + MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR); + wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG, + MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR); + + /* writeback ring status */ + wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG, + MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR); + wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG, + MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR); +} + static int mtk_wdma_rx_reset(struct mtk_wed_device *dev) { @@ -121,6 +245,7 @@ mtk_wdma_rx_reset(struct mtk_wed_device *dev) if (ret) dev_err(dev->hw->dev, "rx reset failed\n"); + mtk_wdma_v3_rx_reset(dev); wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); @@ -135,6 +260,101 @@ mtk_wdma_rx_reset(struct mtk_wed_device *dev) return ret; } +static u32 +mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) +{ + return !!(wed_r32(dev, reg) & mask); +} + +static int +mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) +{ + int sleep = 15000; + int timeout = 100 * sleep; + u32 val; + + return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, + timeout, false, dev, reg, mask); +} + +static void +mtk_wdma_v3_tx_reset(struct mtk_wed_device *dev) +{ + u32 status; + + if (!mtk_wed_is_v3_or_greater(dev->hw)) + return; + + wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN); + wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN); + + if (read_poll_timeout(wdma_r32, status, + !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY), + 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG)) + dev_err(dev->hw->dev, "tx reset failed\n"); + + if (read_poll_timeout(wdma_r32, status, + !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY), + 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG)) + dev_err(dev->hw->dev, "tx reset failed\n"); + + wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN); + wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN); + + if (read_poll_timeout(wdma_r32, status, + !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY), + 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG)) + dev_err(dev->hw->dev, "tx reset failed\n"); + + if (read_poll_timeout(wdma_r32, status, + !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY), + 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG)) + dev_err(dev->hw->dev, "tx reset failed\n"); + + /* prefetch FIFO */ + wdma_w32(dev, MTK_WDMA_PREF_TX_FIFO_CFG, + MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR | + MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR); + wdma_clr(dev, MTK_WDMA_PREF_TX_FIFO_CFG, + MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR | + MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR); + + /* core FIFO */ + wdma_w32(dev, MTK_WDMA_XDMA_TX_FIFO_CFG, + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR | + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR | + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR | + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR); + wdma_clr(dev, MTK_WDMA_XDMA_TX_FIFO_CFG, + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR | + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR | + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR | + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR); + + /* writeback FIFO */ + wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0), + MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); + wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1), + MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); + + wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0), + MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); + wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1), + MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); + + /* prefetch ring status */ + wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG, + MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR); + wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG, + MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR); + + /* writeback ring status */ + wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG, + MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR); + wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG, + MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR); +} + static void mtk_wdma_tx_reset(struct mtk_wed_device *dev) { @@ -146,6 +366,7 @@ mtk_wdma_tx_reset(struct mtk_wed_device *dev) !(status & mask), 0, 10000)) dev_err(dev->hw->dev, "tx reset failed\n"); + mtk_wdma_v3_tx_reset(dev); wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX); wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); @@ -278,7 +499,7 @@ mtk_wed_assign(struct mtk_wed_device *dev) if (!hw->wed_dev) goto out; - if (hw->version == 1) + if (mtk_wed_is_v1(hw)) return NULL; /* MT7986 WED devices do not have any pcie slot restrictions */ @@ -298,35 +519,152 @@ out: } static int +mtk_wed_amsdu_buffer_alloc(struct mtk_wed_device *dev) +{ + struct mtk_wed_hw *hw = dev->hw; + struct mtk_wed_amsdu *wed_amsdu; + int i; + + if (!mtk_wed_is_v3_or_greater(hw)) + return 0; + + wed_amsdu = devm_kcalloc(hw->dev, MTK_WED_AMSDU_NPAGES, + sizeof(*wed_amsdu), GFP_KERNEL); + if (!wed_amsdu) + return -ENOMEM; + + for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) { + void *ptr; + + /* each segment is 64K */ + ptr = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | + __GFP_ZERO | __GFP_COMP | + GFP_DMA32, + get_order(MTK_WED_AMSDU_BUF_SIZE)); + if (!ptr) + goto error; + + wed_amsdu[i].txd = ptr; + wed_amsdu[i].txd_phy = dma_map_single(hw->dev, ptr, + MTK_WED_AMSDU_BUF_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(hw->dev, wed_amsdu[i].txd_phy)) + goto error; + } + dev->hw->wed_amsdu = wed_amsdu; + + return 0; + +error: + for (i--; i >= 0; i--) + dma_unmap_single(hw->dev, wed_amsdu[i].txd_phy, + MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE); + return -ENOMEM; +} + +static void +mtk_wed_amsdu_free_buffer(struct mtk_wed_device *dev) +{ + struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu; + int i; + + if (!wed_amsdu) + return; + + for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) { + dma_unmap_single(dev->hw->dev, wed_amsdu[i].txd_phy, + MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE); + free_pages((unsigned long)wed_amsdu[i].txd, + get_order(MTK_WED_AMSDU_BUF_SIZE)); + } +} + +static int +mtk_wed_amsdu_init(struct mtk_wed_device *dev) +{ + struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu; + int i, ret; + + if (!wed_amsdu) + return 0; + + for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) + wed_w32(dev, MTK_WED_AMSDU_HIFTXD_BASE_L(i), + wed_amsdu[i].txd_phy); + + /* init all sta parameter */ + wed_w32(dev, MTK_WED_AMSDU_STA_INFO_INIT, MTK_WED_AMSDU_STA_RMVL | + MTK_WED_AMSDU_STA_WTBL_HDRT_MODE | + FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_LEN, + dev->wlan.amsdu_max_len >> 8) | + FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_NUM, + dev->wlan.amsdu_max_subframes)); + + wed_w32(dev, MTK_WED_AMSDU_STA_INFO, MTK_WED_AMSDU_STA_INFO_DO_INIT); + + ret = mtk_wed_poll_busy(dev, MTK_WED_AMSDU_STA_INFO, + MTK_WED_AMSDU_STA_INFO_DO_INIT); + if (ret) { + dev_err(dev->hw->dev, "amsdu initialization failed\n"); + return ret; + } + + /* init partial amsdu offload txd src */ + wed_set(dev, MTK_WED_AMSDU_HIFTXD_CFG, + FIELD_PREP(MTK_WED_AMSDU_HIFTXD_SRC, dev->hw->index)); + + /* init qmem */ + wed_set(dev, MTK_WED_AMSDU_PSE, MTK_WED_AMSDU_PSE_RESET); + ret = mtk_wed_poll_busy(dev, MTK_WED_MON_AMSDU_QMEM_STS1, BIT(29)); + if (ret) { + pr_info("%s: amsdu qmem initialization failed\n", __func__); + return ret; + } + + /* eagle E1 PCIE1 tx ring 22 flow control issue */ + if (dev->wlan.id == 0x7991) + wed_clr(dev, MTK_WED_AMSDU_FIFO, MTK_WED_AMSDU_IS_PRIOR0_RING); + + wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN); + + return 0; +} + +static int mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev) { - struct mtk_wdma_desc *desc; - dma_addr_t desc_phys; - void **page_list; + u32 desc_size = dev->hw->soc->tx_ring_desc_size; + int i, page_idx = 0, n_pages, ring_size; int token = dev->wlan.token_start; - int ring_size; - int n_pages; - int i, page_idx; + struct mtk_wed_buf *page_list; + dma_addr_t desc_phys; + void *desc_ptr; - ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); - n_pages = ring_size / MTK_WED_BUF_PER_PAGE; + if (!mtk_wed_is_v3_or_greater(dev->hw)) { + ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); + dev->tx_buf_ring.size = ring_size; + } else { + dev->tx_buf_ring.size = MTK_WED_TX_BM_DMA_SIZE; + ring_size = MTK_WED_TX_BM_PKT_CNT; + } + n_pages = dev->tx_buf_ring.size / MTK_WED_BUF_PER_PAGE; page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL); if (!page_list) return -ENOMEM; - dev->tx_buf_ring.size = ring_size; dev->tx_buf_ring.pages = page_list; - desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc), - &desc_phys, GFP_KERNEL); - if (!desc) + desc_ptr = dma_alloc_coherent(dev->hw->dev, + dev->tx_buf_ring.size * desc_size, + &desc_phys, GFP_KERNEL); + if (!desc_ptr) return -ENOMEM; - dev->tx_buf_ring.desc = desc; + dev->tx_buf_ring.desc = desc_ptr; dev->tx_buf_ring.desc_phys = desc_phys; - for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) { + for (i = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) { dma_addr_t page_phys, buf_phys; struct page *page; void *buf; @@ -343,7 +681,8 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev) return -ENOMEM; } - page_list[page_idx++] = page; + page_list[page_idx].p = page; + page_list[page_idx++].phy_addr = page_phys; dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE, DMA_BIDIRECTIONAL); @@ -351,28 +690,31 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev) buf_phys = page_phys; for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) { - u32 txd_size; - u32 ctrl; - - txd_size = dev->wlan.init_buf(buf, buf_phys, token++); + struct mtk_wdma_desc *desc = desc_ptr; desc->buf0 = cpu_to_le32(buf_phys); - desc->buf1 = cpu_to_le32(buf_phys + txd_size); - - if (dev->hw->version == 1) - ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) | - FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, - MTK_WED_BUF_SIZE - txd_size) | - MTK_WDMA_DESC_CTRL_LAST_SEG1; - else - ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) | - FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2, - MTK_WED_BUF_SIZE - txd_size) | - MTK_WDMA_DESC_CTRL_LAST_SEG0; - desc->ctrl = cpu_to_le32(ctrl); - desc->info = 0; - desc++; - + if (!mtk_wed_is_v3_or_greater(dev->hw)) { + u32 txd_size, ctrl; + + txd_size = dev->wlan.init_buf(buf, buf_phys, + token++); + desc->buf1 = cpu_to_le32(buf_phys + txd_size); + ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size); + if (mtk_wed_is_v1(dev->hw)) + ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG1 | + FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, + MTK_WED_BUF_SIZE - txd_size); + else + ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG0 | + FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2, + MTK_WED_BUF_SIZE - txd_size); + desc->ctrl = cpu_to_le32(ctrl); + desc->info = 0; + } else { + desc->ctrl = cpu_to_le32(token << 16); + } + + desc_ptr += desc_size; buf += MTK_WED_BUF_SIZE; buf_phys += MTK_WED_BUF_SIZE; } @@ -387,42 +729,103 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev) static void mtk_wed_free_tx_buffer(struct mtk_wed_device *dev) { - struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc; - void **page_list = dev->tx_buf_ring.pages; - int page_idx; - int i; + struct mtk_wed_buf *page_list = dev->tx_buf_ring.pages; + struct mtk_wed_hw *hw = dev->hw; + int i, page_idx = 0; if (!page_list) return; - if (!desc) + if (!dev->tx_buf_ring.desc) goto free_pagelist; - for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size; - i += MTK_WED_BUF_PER_PAGE) { - void *page = page_list[page_idx++]; - dma_addr_t buf_addr; + for (i = 0; i < dev->tx_buf_ring.size; i += MTK_WED_BUF_PER_PAGE) { + dma_addr_t page_phy = page_list[page_idx].phy_addr; + void *page = page_list[page_idx++].p; if (!page) break; - buf_addr = le32_to_cpu(desc[i].buf0); - dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE, + dma_unmap_page(dev->hw->dev, page_phy, PAGE_SIZE, DMA_BIDIRECTIONAL); __free_page(page); } - dma_free_coherent(dev->hw->dev, dev->tx_buf_ring.size * sizeof(*desc), - desc, dev->tx_buf_ring.desc_phys); + dma_free_coherent(dev->hw->dev, + dev->tx_buf_ring.size * hw->soc->tx_ring_desc_size, + dev->tx_buf_ring.desc, + dev->tx_buf_ring.desc_phys); free_pagelist: kfree(page_list); } static int +mtk_wed_hwrro_buffer_alloc(struct mtk_wed_device *dev) +{ + int n_pages = MTK_WED_RX_PG_BM_CNT / MTK_WED_RX_BUF_PER_PAGE; + struct mtk_wed_buf *page_list; + struct mtk_wed_bm_desc *desc; + dma_addr_t desc_phys; + int i, page_idx = 0; + + if (!dev->wlan.hw_rro) + return 0; + + page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL); + if (!page_list) + return -ENOMEM; + + dev->hw_rro.size = dev->wlan.rx_nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); + dev->hw_rro.pages = page_list; + desc = dma_alloc_coherent(dev->hw->dev, + dev->wlan.rx_nbuf * sizeof(*desc), + &desc_phys, GFP_KERNEL); + if (!desc) + return -ENOMEM; + + dev->hw_rro.desc = desc; + dev->hw_rro.desc_phys = desc_phys; + + for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) { + dma_addr_t page_phys, buf_phys; + struct page *page; + int s; + + page = __dev_alloc_page(GFP_KERNEL); + if (!page) + return -ENOMEM; + + page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev->hw->dev, page_phys)) { + __free_page(page); + return -ENOMEM; + } + + page_list[page_idx].p = page; + page_list[page_idx++].phy_addr = page_phys; + dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE, + DMA_BIDIRECTIONAL); + + buf_phys = page_phys; + for (s = 0; s < MTK_WED_RX_BUF_PER_PAGE; s++) { + desc->buf0 = cpu_to_le32(buf_phys); + buf_phys += MTK_WED_PAGE_BUF_SIZE; + desc++; + } + + dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE, + DMA_BIDIRECTIONAL); + } + + return 0; +} + +static int mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev) { - struct mtk_rxbm_desc *desc; + struct mtk_wed_bm_desc *desc; dma_addr_t desc_phys; dev->rx_buf_ring.size = dev->wlan.rx_nbuf; @@ -436,13 +839,48 @@ mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev) dev->rx_buf_ring.desc_phys = desc_phys; dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt); - return 0; + return mtk_wed_hwrro_buffer_alloc(dev); +} + +static void +mtk_wed_hwrro_free_buffer(struct mtk_wed_device *dev) +{ + struct mtk_wed_buf *page_list = dev->hw_rro.pages; + struct mtk_wed_bm_desc *desc = dev->hw_rro.desc; + int i, page_idx = 0; + + if (!dev->wlan.hw_rro) + return; + + if (!page_list) + return; + + if (!desc) + goto free_pagelist; + + for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) { + dma_addr_t buf_addr = page_list[page_idx].phy_addr; + void *page = page_list[page_idx++].p; + + if (!page) + break; + + dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE, + DMA_BIDIRECTIONAL); + __free_page(page); + } + + dma_free_coherent(dev->hw->dev, dev->hw_rro.size * sizeof(*desc), + desc, dev->hw_rro.desc_phys); + +free_pagelist: + kfree(page_list); } static void mtk_wed_free_rx_buffer(struct mtk_wed_device *dev) { - struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc; + struct mtk_wed_bm_desc *desc = dev->rx_buf_ring.desc; if (!desc) return; @@ -450,6 +888,28 @@ mtk_wed_free_rx_buffer(struct mtk_wed_device *dev) dev->wlan.release_rx_buf(dev); dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc), desc, dev->rx_buf_ring.desc_phys); + + mtk_wed_hwrro_free_buffer(dev); +} + +static void +mtk_wed_hwrro_init(struct mtk_wed_device *dev) +{ + if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro) + return; + + wed_set(dev, MTK_WED_RRO_PG_BM_RX_DMAM, + FIELD_PREP(MTK_WED_RRO_PG_BM_RX_SDL0, 128)); + + wed_w32(dev, MTK_WED_RRO_PG_BM_BASE, dev->hw_rro.desc_phys); + + wed_w32(dev, MTK_WED_RRO_PG_BM_INIT_PTR, + MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX | + FIELD_PREP(MTK_WED_RRO_PG_BM_SW_TAIL_IDX, + MTK_WED_RX_PG_BM_CNT)); + + /* enable rx_page_bm to fetch dmad */ + wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN); } static void @@ -463,6 +923,8 @@ mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev) wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH, FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff)); wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); + + mtk_wed_hwrro_init(dev); } static void @@ -498,13 +960,23 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en) { u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; - if (dev->hw->version == 1) + switch (dev->hw->version) { + case 1: mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR; - else + break; + case 2: mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR; + break; + case 3: + mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | + MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; + break; + default: + break; + } if (!dev->hw->num_flows) mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; @@ -516,6 +988,9 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en) static void mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable) { + if (!mtk_wed_is_v2(dev->hw)) + return; + if (enable) { wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); wed_w32(dev, MTK_WED_TXP_DW1, @@ -527,22 +1002,15 @@ mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable) } } -#define MTK_WFMDA_RX_DMA_EN BIT(2) -static void -mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx) +static int +mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, + struct mtk_wed_ring *ring) { - u32 val; int i; - if (!(dev->rx_ring[idx].flags & MTK_WED_RING_CONFIGURED)) - return; /* queue is not configured by mt76 */ - for (i = 0; i < 3; i++) { - u32 cur_idx; + u32 cur_idx = readl(ring->wpdma + MTK_WED_RING_OFS_CPU_IDX); - cur_idx = wed_r32(dev, - MTK_WED_WPDMA_RING_RX_DATA(idx) + - MTK_WED_RING_OFS_CPU_IDX); if (cur_idx == MTK_WED_RX_RING_SIZE - 1) break; @@ -551,12 +1019,10 @@ mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx) if (i == 3) { dev_err(dev->hw->dev, "rx dma enable failed\n"); - return; + return -ETIMEDOUT; } - val = wifi_r32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base) | - MTK_WFMDA_RX_DMA_EN; - wifi_w32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, val); + return 0; } static void @@ -577,7 +1043,7 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev) MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); - if (dev->hw->version == 1) { + if (mtk_wed_is_v1(dev->hw)) { regmap_write(dev->hw->mirror, dev->hw->index * 4, 0); wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); @@ -590,6 +1056,14 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev) MTK_WED_WPDMA_RX_D_RX_DRV_EN); wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); + + if (mtk_wed_is_v3_or_greater(dev->hw) && + mtk_wed_get_rx_capa(dev)) { + wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, + MTK_WDMA_PREF_TX_CFG_PREF_EN); + wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, + MTK_WDMA_PREF_RX_CFG_PREF_EN); + } } mtk_wed_set_512_support(dev, false); @@ -606,7 +1080,7 @@ mtk_wed_stop(struct mtk_wed_device *dev) wdma_w32(dev, MTK_WDMA_INT_GRP2, 0); wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0); - if (dev->hw->version == 1) + if (!mtk_wed_get_rx_capa(dev)) return; wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0); @@ -625,13 +1099,21 @@ mtk_wed_deinit(struct mtk_wed_device *dev) MTK_WED_CTRL_WED_TX_BM_EN | MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); - if (dev->hw->version == 1) + if (mtk_wed_is_v1(dev->hw)) return; wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN | MTK_WED_CTRL_WED_RX_BM_EN | MTK_WED_CTRL_RX_RRO_QM_EN); + + if (mtk_wed_is_v3_or_greater(dev->hw)) { + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN); + wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_TX_AMSDU); + wed_clr(dev, MTK_WED_PCIE_INT_CTRL, + MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA | + MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER); + } } static void @@ -643,6 +1125,7 @@ __mtk_wed_detach(struct mtk_wed_device *dev) mtk_wdma_rx_reset(dev); mtk_wed_reset(dev, MTK_WED_RESET_WED); + mtk_wed_amsdu_free_buffer(dev); mtk_wed_free_tx_buffer(dev); mtk_wed_free_tx_rings(dev); @@ -681,21 +1164,37 @@ mtk_wed_detach(struct mtk_wed_device *dev) mutex_unlock(&hw_lock); } -#define PCIE_BASE_ADDR0 0x11280000 static void mtk_wed_bus_init(struct mtk_wed_device *dev) { switch (dev->wlan.bus_type) { case MTK_WED_BUS_PCIE: { struct device_node *np = dev->hw->eth->dev->of_node; - struct regmap *regs; - regs = syscon_regmap_lookup_by_phandle(np, - "mediatek,wed-pcie"); - if (IS_ERR(regs)) - break; + if (mtk_wed_is_v2(dev->hw)) { + struct regmap *regs; + + regs = syscon_regmap_lookup_by_phandle(np, + "mediatek,wed-pcie"); + if (IS_ERR(regs)) + break; - regmap_update_bits(regs, 0, BIT(0), BIT(0)); + regmap_update_bits(regs, 0, BIT(0), BIT(0)); + } + + if (dev->wlan.msi) { + wed_w32(dev, MTK_WED_PCIE_CFG_INTM, + dev->hw->pcie_base | 0xc08); + wed_w32(dev, MTK_WED_PCIE_CFG_BASE, + dev->hw->pcie_base | 0xc04); + wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(8)); + } else { + wed_w32(dev, MTK_WED_PCIE_CFG_INTM, + dev->hw->pcie_base | 0x180); + wed_w32(dev, MTK_WED_PCIE_CFG_BASE, + dev->hw->pcie_base | 0x184); + wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24)); + } wed_w32(dev, MTK_WED_PCIE_INT_CTRL, FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2)); @@ -703,19 +1202,9 @@ mtk_wed_bus_init(struct mtk_wed_device *dev) /* pcie interrupt control: pola/source selection */ wed_set(dev, MTK_WED_PCIE_INT_CTRL, MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA | - FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1)); - wed_r32(dev, MTK_WED_PCIE_INT_CTRL); - - wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180); - wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184); - - /* pcie interrupt status trigger register */ - wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24)); - wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER); - - /* pola setting */ - wed_set(dev, MTK_WED_PCIE_INT_CTRL, - MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA); + MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER | + FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, + dev->hw->index)); break; } case MTK_WED_BUS_AXI: @@ -731,38 +1220,55 @@ mtk_wed_bus_init(struct mtk_wed_device *dev) static void mtk_wed_set_wpdma(struct mtk_wed_device *dev) { - if (dev->hw->version == 1) { - wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys); - } else { - mtk_wed_bus_init(dev); + int i; - wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int); - wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask); - wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx); - wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree); - wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo); - wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx); + if (mtk_wed_is_v1(dev->hw)) { + wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys); + return; } + + mtk_wed_bus_init(dev); + + wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int); + wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask); + wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx); + wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree); + + if (!mtk_wed_get_rx_capa(dev)) + return; + + wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo); + wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring0, dev->wlan.wpdma_rx); + + if (!dev->wlan.hw_rro) + return; + + wed_w32(dev, MTK_WED_RRO_RX_D_CFG(0), dev->wlan.wpdma_rx_rro[0]); + wed_w32(dev, MTK_WED_RRO_RX_D_CFG(1), dev->wlan.wpdma_rx_rro[1]); + for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) + wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING_CFG(i), + dev->wlan.wpdma_rx_pg + i * 0x10); } static void mtk_wed_hw_init_early(struct mtk_wed_device *dev) { - u32 mask, set; + u32 set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2); + u32 mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE; mtk_wed_deinit(dev); mtk_wed_reset(dev, MTK_WED_RESET_WED); mtk_wed_set_wpdma(dev); - mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE | - MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE | - MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE; - set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) | - MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP | - MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY; + if (!mtk_wed_is_v3_or_greater(dev->hw)) { + mask |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE | + MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE; + set |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP | + MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY; + } wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set); - if (dev->hw->version == 1) { + if (mtk_wed_is_v1(dev->hw)) { u32 offset = dev->hw->index ? 0x04000400 : 0; wdma_set(dev, MTK_WDMA_GLO_CFG, @@ -907,11 +1413,18 @@ mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev) } /* configure RX_ROUTE_QM */ - wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); - wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT); - wed_set(dev, MTK_WED_RTQM_GLO_CFG, - FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index)); - wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); + if (mtk_wed_is_v2(dev->hw)) { + wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); + wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT); + wed_set(dev, MTK_WED_RTQM_GLO_CFG, + FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, + 0x3 + dev->hw->index)); + wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); + } else { + wed_set(dev, MTK_WED_RTQM_ENQ_CFG0, + FIELD_PREP(MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT, + 0x3 + dev->hw->index)); + } /* enable RX_ROUTE_QM */ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); } @@ -924,34 +1437,30 @@ mtk_wed_hw_init(struct mtk_wed_device *dev) dev->init_done = true; mtk_wed_set_ext_int(dev, false); - wed_w32(dev, MTK_WED_TX_BM_CTRL, - MTK_WED_TX_BM_CTRL_PAUSE | - FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, - dev->tx_buf_ring.size / 128) | - FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, - MTK_WED_TX_RING_SIZE / 256)); wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys); - wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE); - if (dev->hw->version == 1) { - wed_w32(dev, MTK_WED_TX_BM_TKID, - FIELD_PREP(MTK_WED_TX_BM_TKID_START, - dev->wlan.token_start) | - FIELD_PREP(MTK_WED_TX_BM_TKID_END, - dev->wlan.token_start + - dev->wlan.nbuf - 1)); + if (mtk_wed_is_v1(dev->hw)) { + wed_w32(dev, MTK_WED_TX_BM_CTRL, + MTK_WED_TX_BM_CTRL_PAUSE | + FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, + dev->tx_buf_ring.size / 128) | + FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, + MTK_WED_TX_RING_SIZE / 256)); wed_w32(dev, MTK_WED_TX_BM_DYN_THR, FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) | MTK_WED_TX_BM_DYN_THR_HI); - } else { - wed_w32(dev, MTK_WED_TX_BM_TKID_V2, - FIELD_PREP(MTK_WED_TX_BM_TKID_START, - dev->wlan.token_start) | - FIELD_PREP(MTK_WED_TX_BM_TKID_END, - dev->wlan.token_start + - dev->wlan.nbuf - 1)); + } else if (mtk_wed_is_v2(dev->hw)) { + wed_w32(dev, MTK_WED_TX_BM_CTRL, + MTK_WED_TX_BM_CTRL_PAUSE | + FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, + dev->tx_buf_ring.size / 128) | + FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, + MTK_WED_TX_RING_SIZE / 256)); + wed_w32(dev, MTK_WED_TX_TKID_DYN_THR, + FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) | + MTK_WED_TX_TKID_DYN_THR_HI); wed_w32(dev, MTK_WED_TX_BM_DYN_THR, FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) | MTK_WED_TX_BM_DYN_THR_HI_V2); @@ -961,31 +1470,71 @@ mtk_wed_hw_init(struct mtk_wed_device *dev) dev->tx_buf_ring.size / 128) | FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM, dev->tx_buf_ring.size / 128)); - wed_w32(dev, MTK_WED_TX_TKID_DYN_THR, - FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) | - MTK_WED_TX_TKID_DYN_THR_HI); } + wed_w32(dev, dev->hw->soc->regmap.tx_bm_tkid, + FIELD_PREP(MTK_WED_TX_BM_TKID_START, dev->wlan.token_start) | + FIELD_PREP(MTK_WED_TX_BM_TKID_END, + dev->wlan.token_start + dev->wlan.nbuf - 1)); + mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); - if (dev->hw->version == 1) { + if (mtk_wed_is_v3_or_greater(dev->hw)) { + /* switch to new bm architecture */ + wed_clr(dev, MTK_WED_TX_BM_CTRL, + MTK_WED_TX_BM_CTRL_LEGACY_EN); + + wed_w32(dev, MTK_WED_TX_TKID_CTRL, + MTK_WED_TX_TKID_CTRL_PAUSE | + FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM_V3, + dev->wlan.nbuf / 128) | + FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM_V3, + dev->wlan.nbuf / 128)); + /* return SKBID + SDP back to bm */ + wed_set(dev, MTK_WED_TX_TKID_CTRL, + MTK_WED_TX_TKID_CTRL_FREE_FORMAT); + + wed_w32(dev, MTK_WED_TX_BM_INIT_PTR, + MTK_WED_TX_BM_PKT_CNT | + MTK_WED_TX_BM_INIT_SW_TAIL_IDX); + } + + if (mtk_wed_is_v1(dev->hw)) { wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN | MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); - } else { - wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE); + } else if (mtk_wed_get_rx_capa(dev)) { /* rx hw init */ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, MTK_WED_WPDMA_RX_D_RST_CRX_IDX | MTK_WED_WPDMA_RX_D_RST_DRV_IDX); wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0); + /* reset prefetch index of ring */ + wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX, + MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); + wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX, + MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); + + wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX, + MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); + wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX, + MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); + + /* reset prefetch FIFO of ring */ + wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, + MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR | + MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR); + wed_w32(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, 0); + mtk_wed_rx_buffer_hw_init(dev); mtk_wed_rro_hw_init(dev); mtk_wed_route_qm_hw_init(dev); } wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE); + if (!mtk_wed_is_v1(dev->hw)) + wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE); } static void @@ -1008,23 +1557,6 @@ mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx) } } -static u32 -mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) -{ - return !!(wed_r32(dev, reg) & mask); -} - -static int -mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) -{ - int sleep = 15000; - int timeout = 100 * sleep; - u32 val; - - return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, - timeout, false, dev, reg, mask); -} - static int mtk_wed_rx_reset(struct mtk_wed_device *dev) { @@ -1038,13 +1570,33 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev) if (ret) return ret; + if (dev->wlan.hw_rro) { + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN); + mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_HW_STS, + MTK_WED_RX_IND_CMD_BUSY); + mtk_wed_reset(dev, MTK_WED_RESET_RRO_RX_TO_PG); + } + wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN); ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_BUSY); + if (!ret && mtk_wed_is_v3_or_greater(dev->hw)) + ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, + MTK_WED_WPDMA_RX_D_PREF_BUSY); if (ret) { mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV); } else { + if (mtk_wed_is_v3_or_greater(dev->hw)) { + /* 1.a. disable prefetch HW */ + wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, + MTK_WED_WPDMA_RX_D_PREF_EN); + mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, + MTK_WED_WPDMA_RX_D_PREF_BUSY); + wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, + MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL); + } + wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, MTK_WED_WPDMA_RX_D_RST_CRX_IDX | MTK_WED_WPDMA_RX_D_RST_DRV_IDX); @@ -1072,23 +1624,52 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev) wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); } + if (dev->wlan.hw_rro) { + /* disable rro msdu page drv */ + wed_clr(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, + MTK_WED_RRO_MSDU_PG_DRV_EN); + + /* disable rro data drv */ + wed_clr(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN); + + /* rro msdu page drv reset */ + wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, + MTK_WED_RRO_MSDU_PG_DRV_CLR); + mtk_wed_poll_busy(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, + MTK_WED_RRO_MSDU_PG_DRV_CLR); + + /* rro data drv reset */ + wed_w32(dev, MTK_WED_RRO_RX_D_CFG(2), + MTK_WED_RRO_RX_D_DRV_CLR); + mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_D_CFG(2), + MTK_WED_RRO_RX_D_DRV_CLR); + } + /* reset route qm */ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_BUSY); - if (ret) + if (ret) { mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM); - else - wed_set(dev, MTK_WED_RTQM_GLO_CFG, - MTK_WED_RTQM_Q_RST); + } else if (mtk_wed_is_v3_or_greater(dev->hw)) { + wed_set(dev, MTK_WED_RTQM_RST, BIT(0)); + wed_clr(dev, MTK_WED_RTQM_RST, BIT(0)); + mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM); + } else { + wed_set(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); + } /* reset tx wdma */ mtk_wdma_tx_reset(dev); /* reset tx wdma drv */ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN); - mtk_wed_poll_busy(dev, MTK_WED_CTRL, - MTK_WED_CTRL_WDMA_INT_AGENT_BUSY); + if (mtk_wed_is_v3_or_greater(dev->hw)) + mtk_wed_poll_busy(dev, MTK_WED_WPDMA_STATUS, + MTK_WED_WPDMA_STATUS_TX_DRV); + else + mtk_wed_poll_busy(dev, MTK_WED_CTRL, + MTK_WED_CTRL_WDMA_INT_AGENT_BUSY); mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV); /* reset wed rx dma */ @@ -1098,13 +1679,8 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev) if (ret) { mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA); } else { - struct mtk_eth *eth = dev->hw->eth; - - if (mtk_is_netsys_v2_or_greater(eth)) - wed_set(dev, MTK_WED_RESET_IDX, - MTK_WED_RESET_IDX_RX_V2); - else - wed_set(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_RX); + wed_set(dev, MTK_WED_RESET_IDX, + dev->hw->soc->regmap.reset_idx_rx_mask); wed_w32(dev, MTK_WED_RESET_IDX, 0); } @@ -1114,6 +1690,14 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev) MTK_WED_CTRL_WED_RX_BM_BUSY); mtk_wed_reset(dev, MTK_WED_RESET_RX_BM); + if (dev->wlan.hw_rro) { + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN); + mtk_wed_poll_busy(dev, MTK_WED_CTRL, + MTK_WED_CTRL_WED_RX_PG_BM_BUSY); + wed_set(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM); + wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM); + } + /* wo change to enable state */ val = MTK_WED_WO_STATE_ENABLE; ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, @@ -1131,6 +1715,7 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev) false); } mtk_wed_free_rx_buffer(dev); + mtk_wed_hwrro_free_buffer(dev); return 0; } @@ -1157,21 +1742,48 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) if (busy) { mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA); } else { - wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_TX); + wed_w32(dev, MTK_WED_RESET_IDX, + dev->hw->soc->regmap.reset_idx_tx_mask); wed_w32(dev, MTK_WED_RESET_IDX, 0); } /* 2. reset WDMA rx DMA */ busy = !!mtk_wdma_rx_reset(dev); - wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); + if (mtk_wed_is_v3_or_greater(dev->hw)) { + val = MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE | + wed_r32(dev, MTK_WED_WDMA_GLO_CFG); + val &= ~MTK_WED_WDMA_GLO_CFG_RX_DRV_EN; + wed_w32(dev, MTK_WED_WDMA_GLO_CFG, val); + } else { + wed_clr(dev, MTK_WED_WDMA_GLO_CFG, + MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); + } + if (!busy) busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY); + if (!busy && mtk_wed_is_v3_or_greater(dev->hw)) + busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG, + MTK_WED_WDMA_RX_PREF_BUSY); if (busy) { mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV); } else { + if (mtk_wed_is_v3_or_greater(dev->hw)) { + /* 1.a. disable prefetch HW */ + wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, + MTK_WED_WDMA_RX_PREF_EN); + mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG, + MTK_WED_WDMA_RX_PREF_BUSY); + wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, + MTK_WED_WDMA_RX_PREF_DDONE2_EN); + + /* 2. Reset dma index */ + wed_w32(dev, MTK_WED_WDMA_RESET_IDX, + MTK_WED_WDMA_RESET_IDX_RX_ALL); + } + wed_w32(dev, MTK_WED_WDMA_RESET_IDX, MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV); wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0); @@ -1187,8 +1799,13 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); for (i = 0; i < 100; i++) { - val = wed_r32(dev, MTK_WED_TX_BM_INTF); - if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40) + if (mtk_wed_is_v1(dev->hw)) + val = FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, + wed_r32(dev, MTK_WED_TX_BM_INTF)); + else + val = FIELD_GET(MTK_WED_TX_TKID_INTF_TKFIFO_FDEP, + wed_r32(dev, MTK_WED_TX_TKID_INTF)); + if (val == 0x40) break; } @@ -1210,6 +1827,8 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV); mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV); + if (mtk_wed_is_v3_or_greater(dev->hw)) + wed_w32(dev, MTK_WED_RX1_CTRL2, 0); } else { wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, MTK_WED_WPDMA_RESET_IDX_TX | @@ -1218,7 +1837,7 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) } dev->init_done = false; - if (dev->hw->version == 1) + if (mtk_wed_is_v1(dev->hw)) return; if (!busy) { @@ -1226,7 +1845,14 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) wed_w32(dev, MTK_WED_RESET_IDX, 0); } - mtk_wed_rx_reset(dev); + if (mtk_wed_is_v3_or_greater(dev->hw)) { + /* reset amsdu engine */ + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN); + mtk_wed_reset(dev, MTK_WED_RESET_TX_AMSDU); + } + + if (mtk_wed_get_rx_capa(dev)) + mtk_wed_rx_reset(dev); } static int @@ -1249,7 +1875,6 @@ static int mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size, bool reset) { - u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; struct mtk_wed_ring *wdma; if (idx >= ARRAY_SIZE(dev->rx_wdma)) @@ -1257,7 +1882,7 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size, wdma = &dev->rx_wdma[idx]; if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, - desc_size, true)) + dev->hw->soc->wdma_desc_size, true)) return -ENOMEM; wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, @@ -1278,7 +1903,6 @@ static int mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size, bool reset) { - u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; struct mtk_wed_ring *wdma; if (idx >= ARRAY_SIZE(dev->tx_wdma)) @@ -1286,9 +1910,27 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size, wdma = &dev->tx_wdma[idx]; if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, - desc_size, true)) + dev->hw->soc->wdma_desc_size, true)) return -ENOMEM; + if (mtk_wed_is_v3_or_greater(dev->hw)) { + struct mtk_wdma_desc *desc = wdma->desc; + int i; + + for (i = 0; i < MTK_WED_WDMA_RING_SIZE; i++) { + desc->buf0 = 0; + desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); + desc->buf1 = 0; + desc->info = cpu_to_le32(MTK_WDMA_TXD0_DESC_INFO_DMA_DONE); + desc++; + desc->buf0 = 0; + desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); + desc->buf1 = 0; + desc->info = cpu_to_le32(MTK_WDMA_TXD1_DESC_INFO_DMA_DONE); + desc++; + } + } + wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, wdma->desc_phys); wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, @@ -1344,7 +1986,7 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) MTK_WED_CTRL_WED_TX_BM_EN | MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); - if (dev->hw->version == 1) { + if (mtk_wed_is_v1(dev->hw)) { wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS); @@ -1354,8 +1996,9 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask); } else { - wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE, - GENMASK(1, 0)); + if (mtk_wed_is_v3_or_greater(dev->hw)) + wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_TKID_ALI_EN); + /* initail tx interrupt trigger */ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX, MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN | @@ -1374,15 +2017,20 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG, dev->wlan.txfree_tbit)); - wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX, - MTK_WED_WPDMA_INT_CTRL_RX0_EN | - MTK_WED_WPDMA_INT_CTRL_RX0_CLR | - MTK_WED_WPDMA_INT_CTRL_RX1_EN | - MTK_WED_WPDMA_INT_CTRL_RX1_CLR | - FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG, - dev->wlan.rx_tbit[0]) | - FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG, - dev->wlan.rx_tbit[1])); + if (mtk_wed_get_rx_capa(dev)) { + wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX, + MTK_WED_WPDMA_INT_CTRL_RX0_EN | + MTK_WED_WPDMA_INT_CTRL_RX0_CLR | + MTK_WED_WPDMA_INT_CTRL_RX1_EN | + MTK_WED_WPDMA_INT_CTRL_RX1_CLR | + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG, + dev->wlan.rx_tbit[0]) | + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG, + dev->wlan.rx_tbit[1])); + + wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE, + GENMASK(1, 0)); + } wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask); wed_set(dev, MTK_WED_WDMA_INT_CTRL, @@ -1398,55 +2046,280 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) wed_w32(dev, MTK_WED_INT_MASK, irq_mask); } +#define MTK_WFMDA_RX_DMA_EN BIT(2) static void mtk_wed_dma_enable(struct mtk_wed_device *dev) { - wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV); + int i; + + if (!mtk_wed_is_v3_or_greater(dev->hw)) { + wed_set(dev, MTK_WED_WPDMA_INT_CTRL, + MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV); + wed_set(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); + wdma_set(dev, MTK_WDMA_GLO_CFG, + MTK_WDMA_GLO_CFG_TX_DMA_EN | + MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | + MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); + wed_set(dev, MTK_WED_WPDMA_CTRL, MTK_WED_WPDMA_CTRL_SDL1_FIXED); + } else { + wed_set(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN | + MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR); + wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); + } wed_set(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN | MTK_WED_GLO_CFG_RX_DMA_EN); - wed_set(dev, MTK_WED_WPDMA_GLO_CFG, - MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | - MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); + wed_set(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); - wdma_set(dev, MTK_WDMA_GLO_CFG, - MTK_WDMA_GLO_CFG_TX_DMA_EN | - MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | - MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); - - if (dev->hw->version == 1) { + if (mtk_wed_is_v1(dev->hw)) { wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); - } else { - int i; + return; + } - wed_set(dev, MTK_WED_WPDMA_CTRL, - MTK_WED_WPDMA_CTRL_SDL1_FIXED); + wed_set(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); - wed_set(dev, MTK_WED_WDMA_GLO_CFG, - MTK_WED_WDMA_GLO_CFG_TX_DRV_EN | - MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); + if (mtk_wed_is_v3_or_greater(dev->hw)) { + wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, + FIELD_PREP(MTK_WED_WDMA_RX_PREF_BURST_SIZE, 0x10) | + FIELD_PREP(MTK_WED_WDMA_RX_PREF_LOW_THRES, 0x8)); + wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, + MTK_WED_WDMA_RX_PREF_DDONE2_EN); + wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_EN); + wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST); wed_set(dev, MTK_WED_WPDMA_GLO_CFG, - MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | - MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); + MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4); - wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, - MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP | - MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV); + wdma_set(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN); + wdma_set(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN); + } - wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, - MTK_WED_WPDMA_RX_D_RX_DRV_EN | - FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) | - FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL, - 0x2)); + wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP | + MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV); + + if (!mtk_wed_get_rx_capa(dev)) + return; + + wed_set(dev, MTK_WED_WDMA_GLO_CFG, + MTK_WED_WDMA_GLO_CFG_TX_DRV_EN | + MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); + + wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RXD_READ_LEN); + wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, + MTK_WED_WPDMA_RX_D_RX_DRV_EN | + FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) | + FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL, 0x2)); + + if (mtk_wed_is_v3_or_greater(dev->hw)) { + wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, + MTK_WED_WPDMA_RX_D_PREF_EN | + FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE, 0x10) | + FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_LOW_THRES, 0x8)); + + wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN); + wdma_set(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN); + wdma_set(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN); + } + + for (i = 0; i < MTK_WED_RX_QUEUES; i++) { + struct mtk_wed_ring *ring = &dev->rx_ring[i]; + u32 val; + + if (!(ring->flags & MTK_WED_RING_CONFIGURED)) + continue; /* queue is not configured by mt76 */ + + if (mtk_wed_check_wfdma_rx_fill(dev, ring)) { + dev_err(dev->hw->dev, + "rx_ring(%d) dma enable failed\n", i); + continue; + } + + val = wifi_r32(dev, + dev->wlan.wpdma_rx_glo - + dev->wlan.phy_base) | MTK_WFMDA_RX_DMA_EN; + wifi_w32(dev, + dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, + val); + } +} + +static void +mtk_wed_start_hw_rro(struct mtk_wed_device *dev, u32 irq_mask, bool reset) +{ + int i; + + wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask); + wed_w32(dev, MTK_WED_INT_MASK, irq_mask); + + if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro) + return; + + if (reset) { + wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, + MTK_WED_RRO_MSDU_PG_DRV_EN); + return; + } + + wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR); + wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, + MTK_WED_RRO_MSDU_PG_DRV_CLR); + + wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_RX, + MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN | + MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR | + MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN | + MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR | + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG, + dev->wlan.rro_rx_tbit[0]) | + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG, + dev->wlan.rro_rx_tbit[1])); + + wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG, + MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN | + MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR | + MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN | + MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR | + MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN | + MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR | + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG, + dev->wlan.rx_pg_tbit[0]) | + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG, + dev->wlan.rx_pg_tbit[1]) | + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG, + dev->wlan.rx_pg_tbit[2])); + + /* RRO_MSDU_PG_RING2_CFG1_FLD_DRV_EN should be enabled after + * WM FWDL completed, otherwise RRO_MSDU_PG ring may broken + */ + wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, + MTK_WED_RRO_MSDU_PG_DRV_EN); + + for (i = 0; i < MTK_WED_RX_QUEUES; i++) { + struct mtk_wed_ring *ring = &dev->rx_rro_ring[i]; + + if (!(ring->flags & MTK_WED_RING_CONFIGURED)) + continue; + + if (mtk_wed_check_wfdma_rx_fill(dev, ring)) + dev_err(dev->hw->dev, + "rx_rro_ring(%d) initialization failed\n", i); + } + + for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) { + struct mtk_wed_ring *ring = &dev->rx_page_ring[i]; + + if (!(ring->flags & MTK_WED_RING_CONFIGURED)) + continue; + + if (mtk_wed_check_wfdma_rx_fill(dev, ring)) + dev_err(dev->hw->dev, + "rx_page_ring(%d) initialization failed\n", i); + } +} + +static void +mtk_wed_rro_rx_ring_setup(struct mtk_wed_device *dev, int idx, + void __iomem *regs) +{ + struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx]; + + ring->wpdma = regs; + wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_BASE, + readl(regs)); + wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_COUNT, + readl(regs + MTK_WED_RING_OFS_COUNT)); + ring->flags |= MTK_WED_RING_CONFIGURED; +} + +static void +mtk_wed_msdu_pg_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) +{ + struct mtk_wed_ring *ring = &dev->rx_page_ring[idx]; + + ring->wpdma = regs; + wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_BASE, + readl(regs)); + wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_COUNT, + readl(regs + MTK_WED_RING_OFS_COUNT)); + ring->flags |= MTK_WED_RING_CONFIGURED; +} + +static int +mtk_wed_ind_rx_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) +{ + struct mtk_wed_ring *ring = &dev->ind_cmd_ring; + u32 val = readl(regs + MTK_WED_RING_OFS_COUNT); + int i, count = 0; - for (i = 0; i < MTK_WED_RX_QUEUES; i++) - mtk_wed_check_wfdma_rx_fill(dev, i); + ring->wpdma = regs; + wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_BASE, + readl(regs) & 0xfffffff0); + + wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_COUNT, + readl(regs + MTK_WED_RING_OFS_COUNT)); + + /* ack sn cr */ + wed_w32(dev, MTK_WED_RRO_CFG0, dev->wlan.phy_base + + dev->wlan.ind_cmd.ack_sn_addr); + wed_w32(dev, MTK_WED_RRO_CFG1, + FIELD_PREP(MTK_WED_RRO_CFG1_MAX_WIN_SZ, + dev->wlan.ind_cmd.win_size) | + FIELD_PREP(MTK_WED_RRO_CFG1_PARTICL_SE_ID, + dev->wlan.ind_cmd.particular_sid)); + + /* particular session addr element */ + wed_w32(dev, MTK_WED_ADDR_ELEM_CFG0, + dev->wlan.ind_cmd.particular_se_phys); + + for (i = 0; i < dev->wlan.ind_cmd.se_group_nums; i++) { + wed_w32(dev, MTK_WED_RADDR_ELEM_TBL_WDATA, + dev->wlan.ind_cmd.addr_elem_phys[i] >> 4); + wed_w32(dev, MTK_WED_ADDR_ELEM_TBL_CFG, + MTK_WED_ADDR_ELEM_TBL_WR | (i & 0x7f)); + + val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG); + while (!(val & MTK_WED_ADDR_ELEM_TBL_WR_RDY) && count++ < 100) + val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG); + if (count >= 100) + dev_err(dev->hw->dev, + "write ba session base failed\n"); + } + + /* pn check init */ + for (i = 0; i < dev->wlan.ind_cmd.particular_sid; i++) { + wed_w32(dev, MTK_WED_PN_CHECK_WDATA_M, + MTK_WED_PN_CHECK_IS_FIRST); + + wed_w32(dev, MTK_WED_PN_CHECK_CFG, MTK_WED_PN_CHECK_WR | + FIELD_PREP(MTK_WED_PN_CHECK_SE_ID, i)); + + count = 0; + val = wed_r32(dev, MTK_WED_PN_CHECK_CFG); + while (!(val & MTK_WED_PN_CHECK_WR_RDY) && count++ < 100) + val = wed_r32(dev, MTK_WED_PN_CHECK_CFG); + if (count >= 100) + dev_err(dev->hw->dev, + "session(%d) initialization failed\n", i); } + + wed_w32(dev, MTK_WED_RX_IND_CMD_CNT0, MTK_WED_RX_IND_CMD_DBG_CNT_EN); + wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN); + + return 0; } static void @@ -1466,14 +2339,14 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) mtk_wed_set_ext_int(dev, true); - if (dev->hw->version == 1) { + if (mtk_wed_is_v1(dev->hw)) { u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN | FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index); val |= BIT(0) | (BIT(1) * !!dev->hw->index); regmap_write(dev->hw->mirror, dev->hw->index * 4, val); - } else { + } else if (mtk_wed_get_rx_capa(dev)) { /* driver set mid ready and only once */ wed_w32(dev, MTK_WED_EXT_INT_MASK1, MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); @@ -1483,12 +2356,18 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) wed_r32(dev, MTK_WED_EXT_INT_MASK1); wed_r32(dev, MTK_WED_EXT_INT_MASK2); + if (mtk_wed_is_v3_or_greater(dev->hw)) { + wed_w32(dev, MTK_WED_EXT_INT_MASK3, + MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); + wed_r32(dev, MTK_WED_EXT_INT_MASK3); + } + if (mtk_wed_rro_cfg(dev)) return; - } mtk_wed_set_512_support(dev, dev->wlan.wcid_512); + mtk_wed_amsdu_init(dev); mtk_wed_dma_enable(dev); dev->running = true; @@ -1535,6 +2414,7 @@ mtk_wed_attach(struct mtk_wed_device *dev) dev->irq = hw->irq; dev->wdma_idx = hw->index; dev->version = hw->version; + dev->hw->pcie_base = mtk_wed_get_pcie_base(dev); if (hw->eth->dma_dev == hw->eth->dev && of_dma_is_coherent(hw->eth->dev->of_node)) @@ -1544,6 +2424,10 @@ mtk_wed_attach(struct mtk_wed_device *dev) if (ret) goto out; + ret = mtk_wed_amsdu_buffer_alloc(dev); + if (ret) + goto out; + if (mtk_wed_get_rx_capa(dev)) { ret = mtk_wed_rro_alloc(dev); if (ret) @@ -1551,13 +2435,14 @@ mtk_wed_attach(struct mtk_wed_device *dev) } mtk_wed_hw_init_early(dev); - if (hw->version == 1) { + if (mtk_wed_is_v1(hw)) regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0); - } else { + else dev->rev_id = wed_r32(dev, MTK_WED_REV_ID); + + if (mtk_wed_get_rx_capa(dev)) ret = mtk_wed_wo_init(hw); - } out: if (ret) { dev_err(dev->hw->dev, "failed to attach wed device\n"); @@ -1601,6 +2486,23 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, ring->reg_base = MTK_WED_RING_TX(idx); ring->wpdma = regs; + if (mtk_wed_is_v3_or_greater(dev->hw) && idx == 1) { + /* reset prefetch index */ + wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, + MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR | + MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR); + + wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, + MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR | + MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR); + + /* reset prefetch FIFO */ + wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, + MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR | + MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR); + wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, 0); + } + /* WED -> WPDMA */ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE); @@ -1619,7 +2521,7 @@ static int mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) { struct mtk_wed_ring *ring = &dev->txfree_ring; - int i, index = dev->hw->version == 1; + int i, index = mtk_wed_is_v1(dev->hw); /* * For txfree event handling, the same DMA ring is shared between WED @@ -1675,15 +2577,13 @@ mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, static u32 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask) { - u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; + u32 val, ext_mask; - if (dev->hw->version == 1) - ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR; + if (mtk_wed_is_v3_or_greater(dev->hw)) + ext_mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | + MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; else - ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | - MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | - MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | - MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR; + ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; val = wed_r32(dev, MTK_WED_EXT_INT_STATUS); wed_w32(dev, MTK_WED_EXT_INT_STATUS, val); @@ -1713,19 +2613,20 @@ mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask) int mtk_wed_flow_add(int index) { struct mtk_wed_hw *hw = hw_list[index]; - int ret; + int ret = 0; - if (!hw || !hw->wed_dev) - return -ENODEV; + mutex_lock(&hw_lock); - if (hw->num_flows) { - hw->num_flows++; - return 0; + if (!hw || !hw->wed_dev) { + ret = -ENODEV; + goto out; } - mutex_lock(&hw_lock); - if (!hw->wed_dev) { - ret = -ENODEV; + if (!hw->wed_dev->wlan.offload_enable) + goto out; + + if (hw->num_flows) { + hw->num_flows++; goto out; } @@ -1744,14 +2645,15 @@ void mtk_wed_flow_remove(int index) { struct mtk_wed_hw *hw = hw_list[index]; - if (!hw) - return; + mutex_lock(&hw_lock); - if (--hw->num_flows) - return; + if (!hw || !hw->wed_dev) + goto out; - mutex_lock(&hw_lock); - if (!hw->wed_dev) + if (!hw->wed_dev->wlan.offload_disable) + goto out; + + if (--hw->num_flows) goto out; hw->wed_dev->wlan.offload_disable(hw->wed_dev); @@ -1842,7 +2744,7 @@ mtk_wed_setup_tc(struct mtk_wed_device *wed, struct net_device *dev, { struct mtk_wed_hw *hw = wed->hw; - if (hw->version < 2) + if (mtk_wed_is_v1(hw)) return -EOPNOTSUPP; switch (type) { @@ -1874,6 +2776,10 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, .detach = mtk_wed_detach, .ppe_check = mtk_wed_ppe_check, .setup_tc = mtk_wed_setup_tc, + .start_hw_rro = mtk_wed_start_hw_rro, + .rro_rx_ring_setup = mtk_wed_rro_rx_ring_setup, + .msdu_pg_rx_ring_setup = mtk_wed_msdu_pg_rx_ring_setup, + .ind_rx_ring_setup = mtk_wed_ind_rx_ring_setup, }; struct device_node *eth_np = eth->dev->of_node; struct platform_device *pdev; @@ -1916,9 +2822,17 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, hw->wdma = wdma; hw->index = index; hw->irq = irq; - hw->version = mtk_is_netsys_v1(eth) ? 1 : 2; + hw->version = eth->soc->version; - if (hw->version == 1) { + switch (hw->version) { + case 2: + hw->soc = &mt7986_data; + break; + case 3: + hw->soc = &mt7988_data; + break; + default: + case 1: hw->mirror = syscon_regmap_lookup_by_phandle(eth_np, "mediatek,pcie-mirror"); hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np, @@ -1932,6 +2846,8 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, regmap_write(hw->mirror, 0, 0); regmap_write(hw->mirror, 4, 0); } + hw->soc = &mt7622_data; + break; } mtk_wed_hw_add_debugfs(hw); |