From 2c3c1048746a4622d8c89a29670120dc8fab93c4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:49:45 +0200 Subject: Adding upstream version 6.1.76. Signed-off-by: Daniel Baumann --- drivers/net/wwan/t7xx/Makefile | 18 + drivers/net/wwan/t7xx/t7xx_cldma.c | 281 ++++++ drivers/net/wwan/t7xx/t7xx_cldma.h | 180 ++++ drivers/net/wwan/t7xx/t7xx_dpmaif.c | 1281 ++++++++++++++++++++++++++ drivers/net/wwan/t7xx/t7xx_dpmaif.h | 179 ++++ drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 1339 ++++++++++++++++++++++++++++ drivers/net/wwan/t7xx/t7xx_hif_cldma.h | 127 +++ drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c | 574 ++++++++++++ drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h | 206 +++++ drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c | 1243 ++++++++++++++++++++++++++ drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h | 116 +++ drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c | 683 ++++++++++++++ drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.h | 78 ++ drivers/net/wwan/t7xx/t7xx_mhccif.c | 122 +++ drivers/net/wwan/t7xx/t7xx_mhccif.h | 37 + drivers/net/wwan/t7xx/t7xx_modem_ops.c | 729 +++++++++++++++ drivers/net/wwan/t7xx/t7xx_modem_ops.h | 88 ++ drivers/net/wwan/t7xx/t7xx_netdev.c | 423 +++++++++ drivers/net/wwan/t7xx/t7xx_netdev.h | 55 ++ drivers/net/wwan/t7xx/t7xx_pci.c | 781 ++++++++++++++++ drivers/net/wwan/t7xx/t7xx_pci.h | 121 +++ drivers/net/wwan/t7xx/t7xx_pcie_mac.c | 262 ++++++ drivers/net/wwan/t7xx/t7xx_pcie_mac.h | 31 + drivers/net/wwan/t7xx/t7xx_port.h | 135 +++ drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c | 273 ++++++ drivers/net/wwan/t7xx/t7xx_port_proxy.c | 509 +++++++++++ drivers/net/wwan/t7xx/t7xx_port_proxy.h | 98 ++ drivers/net/wwan/t7xx/t7xx_port_wwan.c | 176 ++++ drivers/net/wwan/t7xx/t7xx_reg.h | 350 ++++++++ drivers/net/wwan/t7xx/t7xx_state_monitor.c | 550 ++++++++++++ drivers/net/wwan/t7xx/t7xx_state_monitor.h | 135 +++ 31 files changed, 11180 insertions(+) create mode 100644 drivers/net/wwan/t7xx/Makefile create mode 100644 drivers/net/wwan/t7xx/t7xx_cldma.c create mode 100644 drivers/net/wwan/t7xx/t7xx_cldma.h create mode 100644 drivers/net/wwan/t7xx/t7xx_dpmaif.c create mode 100644 drivers/net/wwan/t7xx/t7xx_dpmaif.h create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_cldma.c create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_cldma.h create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.h create mode 100644 drivers/net/wwan/t7xx/t7xx_mhccif.c create mode 100644 drivers/net/wwan/t7xx/t7xx_mhccif.h create mode 100644 drivers/net/wwan/t7xx/t7xx_modem_ops.c create mode 100644 drivers/net/wwan/t7xx/t7xx_modem_ops.h create mode 100644 drivers/net/wwan/t7xx/t7xx_netdev.c create mode 100644 drivers/net/wwan/t7xx/t7xx_netdev.h create mode 100644 drivers/net/wwan/t7xx/t7xx_pci.c create mode 100644 drivers/net/wwan/t7xx/t7xx_pci.h create mode 100644 drivers/net/wwan/t7xx/t7xx_pcie_mac.c create mode 100644 drivers/net/wwan/t7xx/t7xx_pcie_mac.h create mode 100644 drivers/net/wwan/t7xx/t7xx_port.h create mode 100644 drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c create mode 100644 drivers/net/wwan/t7xx/t7xx_port_proxy.c create mode 100644 drivers/net/wwan/t7xx/t7xx_port_proxy.h create mode 100644 drivers/net/wwan/t7xx/t7xx_port_wwan.c create mode 100644 drivers/net/wwan/t7xx/t7xx_reg.h create mode 100644 drivers/net/wwan/t7xx/t7xx_state_monitor.c create mode 100644 drivers/net/wwan/t7xx/t7xx_state_monitor.h (limited to 'drivers/net/wwan/t7xx') diff --git a/drivers/net/wwan/t7xx/Makefile b/drivers/net/wwan/t7xx/Makefile new file mode 100644 index 000000000..5e6398b52 --- /dev/null +++ b/drivers/net/wwan/t7xx/Makefile @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-${CONFIG_MTK_T7XX} := mtk_t7xx.o +mtk_t7xx-y:= t7xx_pci.o \ + t7xx_pcie_mac.o \ + t7xx_mhccif.o \ + t7xx_state_monitor.o \ + t7xx_modem_ops.o \ + t7xx_cldma.o \ + t7xx_hif_cldma.o \ + t7xx_port_proxy.o \ + t7xx_port_ctrl_msg.o \ + t7xx_port_wwan.o \ + t7xx_hif_dpmaif.o \ + t7xx_hif_dpmaif_tx.o \ + t7xx_hif_dpmaif_rx.o \ + t7xx_dpmaif.o \ + t7xx_netdev.o diff --git a/drivers/net/wwan/t7xx/t7xx_cldma.c b/drivers/net/wwan/t7xx/t7xx_cldma.c new file mode 100644 index 000000000..9f43f256d --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_cldma.c @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Amir Hanania + * Andy Shevchenko + * Eliot Lee + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include + +#include "t7xx_cldma.h" + +#define ADDR_SIZE 8 + +void t7xx_cldma_clear_ip_busy(struct t7xx_cldma_hw *hw_info) +{ + u32 val; + + val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_IP_BUSY); + val |= IP_BUSY_WAKEUP; + iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_IP_BUSY); +} + +/** + * t7xx_cldma_hw_restore() - Restore CLDMA HW registers. + * @hw_info: Pointer to struct t7xx_cldma_hw. + * + * Restore HW after resume. Writes uplink configuration for CLDMA HW. + */ +void t7xx_cldma_hw_restore(struct t7xx_cldma_hw *hw_info) +{ + u32 ul_cfg; + + ul_cfg = ioread32(hw_info->ap_pdn_base + REG_CLDMA_UL_CFG); + ul_cfg &= ~UL_CFG_BIT_MODE_MASK; + + if (hw_info->hw_mode == MODE_BIT_64) + ul_cfg |= UL_CFG_BIT_MODE_64; + else if (hw_info->hw_mode == MODE_BIT_40) + ul_cfg |= UL_CFG_BIT_MODE_40; + else if (hw_info->hw_mode == MODE_BIT_36) + ul_cfg |= UL_CFG_BIT_MODE_36; + + iowrite32(ul_cfg, hw_info->ap_pdn_base + REG_CLDMA_UL_CFG); + /* Disable TX and RX invalid address check */ + iowrite32(UL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_UL_MEM); + iowrite32(DL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_DL_MEM); +} + +void t7xx_cldma_hw_start_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx) +{ + void __iomem *reg; + u32 val; + + reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_DL_START_CMD : + hw_info->ap_pdn_base + REG_CLDMA_UL_START_CMD; + val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); + iowrite32(val, reg); +} + +void t7xx_cldma_hw_start(struct t7xx_cldma_hw *hw_info) +{ + /* Enable the TX & RX interrupts */ + iowrite32(TXRX_STATUS_BITMASK, hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0); + iowrite32(TXRX_STATUS_BITMASK, hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0); + /* Enable the empty queue interrupt */ + iowrite32(EMPTY_STATUS_BITMASK, hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0); + iowrite32(EMPTY_STATUS_BITMASK, hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0); +} + +void t7xx_cldma_hw_reset(void __iomem *ao_base) +{ + u32 val; + + val = ioread32(ao_base + REG_INFRA_RST2_SET); + val |= RST2_PMIC_SW_RST_SET; + iowrite32(val, ao_base + REG_INFRA_RST2_SET); + val = ioread32(ao_base + REG_INFRA_RST4_SET); + val |= RST4_CLDMA1_SW_RST_SET; + iowrite32(val, ao_base + REG_INFRA_RST4_SET); + udelay(1); + + val = ioread32(ao_base + REG_INFRA_RST4_CLR); + val |= RST4_CLDMA1_SW_RST_CLR; + iowrite32(val, ao_base + REG_INFRA_RST4_CLR); + val = ioread32(ao_base + REG_INFRA_RST2_CLR); + val |= RST2_PMIC_SW_RST_CLR; + iowrite32(val, ao_base + REG_INFRA_RST2_CLR); +} + +bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno) +{ + u32 offset = REG_CLDMA_UL_START_ADDRL_0 + qno * ADDR_SIZE; + + return ioread64(hw_info->ap_pdn_base + offset); +} + +void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qno, u64 address, + enum mtk_txrx tx_rx) +{ + u32 offset = qno * ADDR_SIZE; + void __iomem *reg; + + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_START_ADDRL_0 : + hw_info->ap_pdn_base + REG_CLDMA_UL_START_ADDRL_0; + iowrite64(address, reg + offset); +} + +void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx) +{ + void __iomem *base = hw_info->ap_pdn_base; + + if (tx_rx == MTK_RX) + iowrite32(BIT(qno), base + REG_CLDMA_DL_RESUME_CMD); + else + iowrite32(BIT(qno), base + REG_CLDMA_UL_RESUME_CMD); +} + +unsigned int t7xx_cldma_hw_queue_status(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx) +{ + void __iomem *reg; + u32 mask, val; + + mask = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_STATUS : + hw_info->ap_pdn_base + REG_CLDMA_UL_STATUS; + val = ioread32(reg); + + return val & mask; +} + +void t7xx_cldma_hw_tx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask) +{ + unsigned int ch_id; + + ch_id = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0); + ch_id &= bitmask; + /* Clear the ch IDs in the TX interrupt status register */ + iowrite32(ch_id, hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0); + ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0); +} + +void t7xx_cldma_hw_rx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask) +{ + unsigned int ch_id; + + ch_id = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0); + ch_id &= bitmask; + /* Clear the ch IDs in the RX interrupt status register */ + iowrite32(ch_id, hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0); + ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0); +} + +unsigned int t7xx_cldma_hw_int_status(struct t7xx_cldma_hw *hw_info, unsigned int bitmask, + enum mtk_txrx tx_rx) +{ + void __iomem *reg; + u32 val; + + reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0 : + hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0; + val = ioread32(reg); + return val & bitmask; +} + +void t7xx_cldma_hw_irq_dis_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx) +{ + void __iomem *reg; + u32 val; + + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 : + hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0; + val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); + iowrite32(val, reg); +} + +void t7xx_cldma_hw_irq_dis_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx) +{ + void __iomem *reg; + u32 val; + + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 : + hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0; + val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); + iowrite32(val << EQ_STA_BIT_OFFSET, reg); +} + +void t7xx_cldma_hw_irq_en_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx) +{ + void __iomem *reg; + u32 val; + + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0 : + hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0; + val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); + iowrite32(val, reg); +} + +void t7xx_cldma_hw_irq_en_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx) +{ + void __iomem *reg; + u32 val; + + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0 : + hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0; + val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); + iowrite32(val << EQ_STA_BIT_OFFSET, reg); +} + +/** + * t7xx_cldma_hw_init() - Initialize CLDMA HW. + * @hw_info: Pointer to struct t7xx_cldma_hw. + * + * Write uplink and downlink configuration to CLDMA HW. + */ +void t7xx_cldma_hw_init(struct t7xx_cldma_hw *hw_info) +{ + u32 ul_cfg, dl_cfg; + + ul_cfg = ioread32(hw_info->ap_pdn_base + REG_CLDMA_UL_CFG); + dl_cfg = ioread32(hw_info->ap_ao_base + REG_CLDMA_DL_CFG); + /* Configure the DRAM address mode */ + ul_cfg &= ~UL_CFG_BIT_MODE_MASK; + dl_cfg &= ~DL_CFG_BIT_MODE_MASK; + + if (hw_info->hw_mode == MODE_BIT_64) { + ul_cfg |= UL_CFG_BIT_MODE_64; + dl_cfg |= DL_CFG_BIT_MODE_64; + } else if (hw_info->hw_mode == MODE_BIT_40) { + ul_cfg |= UL_CFG_BIT_MODE_40; + dl_cfg |= DL_CFG_BIT_MODE_40; + } else if (hw_info->hw_mode == MODE_BIT_36) { + ul_cfg |= UL_CFG_BIT_MODE_36; + dl_cfg |= DL_CFG_BIT_MODE_36; + } + + iowrite32(ul_cfg, hw_info->ap_pdn_base + REG_CLDMA_UL_CFG); + dl_cfg |= DL_CFG_UP_HW_LAST; + iowrite32(dl_cfg, hw_info->ap_ao_base + REG_CLDMA_DL_CFG); + iowrite32(0, hw_info->ap_ao_base + REG_CLDMA_INT_MASK); + iowrite32(BUSY_MASK_MD, hw_info->ap_ao_base + REG_CLDMA_BUSY_MASK); + iowrite32(UL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_UL_MEM); + iowrite32(DL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_DL_MEM); +} + +void t7xx_cldma_hw_stop_all_qs(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx) +{ + void __iomem *reg; + + reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_DL_STOP_CMD : + hw_info->ap_pdn_base + REG_CLDMA_UL_STOP_CMD; + iowrite32(CLDMA_ALL_Q, reg); +} + +void t7xx_cldma_hw_stop(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx) +{ + void __iomem *reg; + + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 : + hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0; + iowrite32(TXRX_STATUS_BITMASK, reg); + iowrite32(EMPTY_STATUS_BITMASK, reg); +} diff --git a/drivers/net/wwan/t7xx/t7xx_cldma.h b/drivers/net/wwan/t7xx/t7xx_cldma.h new file mode 100644 index 000000000..8949e8377 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_cldma.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Amir Hanania + * Andy Shevchenko + * Sreehari Kancharla + */ + +#ifndef __T7XX_CLDMA_H__ +#define __T7XX_CLDMA_H__ + +#include +#include + +#define CLDMA_TXQ_NUM 8 +#define CLDMA_RXQ_NUM 8 +#define CLDMA_ALL_Q GENMASK(7, 0) + +/* Interrupt status bits */ +#define EMPTY_STATUS_BITMASK GENMASK(15, 8) +#define TXRX_STATUS_BITMASK GENMASK(7, 0) +#define EQ_STA_BIT_OFFSET 8 +#define L2_INT_BIT_COUNT 16 +#define EQ_STA_BIT(index) (BIT((index) + EQ_STA_BIT_OFFSET) & EMPTY_STATUS_BITMASK) + +#define TQ_ERR_INT_BITMASK GENMASK(23, 16) +#define TQ_ACTIVE_START_ERR_INT_BITMASK GENMASK(31, 24) + +#define RQ_ERR_INT_BITMASK GENMASK(23, 16) +#define RQ_ACTIVE_START_ERR_INT_BITMASK GENMASK(31, 24) + +#define CLDMA0_AO_BASE 0x10049000 +#define CLDMA0_PD_BASE 0x1021d000 +#define CLDMA1_AO_BASE 0x1004b000 +#define CLDMA1_PD_BASE 0x1021f000 + +#define CLDMA_R_AO_BASE 0x10023000 +#define CLDMA_R_PD_BASE 0x1023d000 + +/* CLDMA TX */ +#define REG_CLDMA_UL_START_ADDRL_0 0x0004 +#define REG_CLDMA_UL_START_ADDRH_0 0x0008 +#define REG_CLDMA_UL_CURRENT_ADDRL_0 0x0044 +#define REG_CLDMA_UL_CURRENT_ADDRH_0 0x0048 +#define REG_CLDMA_UL_STATUS 0x0084 +#define REG_CLDMA_UL_START_CMD 0x0088 +#define REG_CLDMA_UL_RESUME_CMD 0x008c +#define REG_CLDMA_UL_STOP_CMD 0x0090 +#define REG_CLDMA_UL_ERROR 0x0094 +#define REG_CLDMA_UL_CFG 0x0098 +#define UL_CFG_BIT_MODE_36 BIT(5) +#define UL_CFG_BIT_MODE_40 BIT(6) +#define UL_CFG_BIT_MODE_64 BIT(7) +#define UL_CFG_BIT_MODE_MASK GENMASK(7, 5) + +#define REG_CLDMA_UL_MEM 0x009c +#define UL_MEM_CHECK_DIS BIT(0) + +/* CLDMA RX */ +#define REG_CLDMA_DL_START_CMD 0x05bc +#define REG_CLDMA_DL_RESUME_CMD 0x05c0 +#define REG_CLDMA_DL_STOP_CMD 0x05c4 +#define REG_CLDMA_DL_MEM 0x0508 +#define DL_MEM_CHECK_DIS BIT(0) + +#define REG_CLDMA_DL_CFG 0x0404 +#define DL_CFG_UP_HW_LAST BIT(2) +#define DL_CFG_BIT_MODE_36 BIT(10) +#define DL_CFG_BIT_MODE_40 BIT(11) +#define DL_CFG_BIT_MODE_64 BIT(12) +#define DL_CFG_BIT_MODE_MASK GENMASK(12, 10) + +#define REG_CLDMA_DL_START_ADDRL_0 0x0478 +#define REG_CLDMA_DL_START_ADDRH_0 0x047c +#define REG_CLDMA_DL_CURRENT_ADDRL_0 0x04b8 +#define REG_CLDMA_DL_CURRENT_ADDRH_0 0x04bc +#define REG_CLDMA_DL_STATUS 0x04f8 + +/* CLDMA MISC */ +#define REG_CLDMA_L2TISAR0 0x0810 +#define REG_CLDMA_L2TISAR1 0x0814 +#define REG_CLDMA_L2TIMR0 0x0818 +#define REG_CLDMA_L2TIMR1 0x081c +#define REG_CLDMA_L2TIMCR0 0x0820 +#define REG_CLDMA_L2TIMCR1 0x0824 +#define REG_CLDMA_L2TIMSR0 0x0828 +#define REG_CLDMA_L2TIMSR1 0x082c +#define REG_CLDMA_L3TISAR0 0x0830 +#define REG_CLDMA_L3TISAR1 0x0834 +#define REG_CLDMA_L2RISAR0 0x0850 +#define REG_CLDMA_L2RISAR1 0x0854 +#define REG_CLDMA_L3RISAR0 0x0870 +#define REG_CLDMA_L3RISAR1 0x0874 +#define REG_CLDMA_IP_BUSY 0x08b4 +#define IP_BUSY_WAKEUP BIT(0) +#define CLDMA_L2TISAR0_ALL_INT_MASK GENMASK(15, 0) +#define CLDMA_L2RISAR0_ALL_INT_MASK GENMASK(15, 0) + +/* CLDMA MISC */ +#define REG_CLDMA_L2RIMR0 0x0858 +#define REG_CLDMA_L2RIMR1 0x085c +#define REG_CLDMA_L2RIMCR0 0x0860 +#define REG_CLDMA_L2RIMCR1 0x0864 +#define REG_CLDMA_L2RIMSR0 0x0868 +#define REG_CLDMA_L2RIMSR1 0x086c +#define REG_CLDMA_BUSY_MASK 0x0954 +#define BUSY_MASK_PCIE BIT(0) +#define BUSY_MASK_AP BIT(1) +#define BUSY_MASK_MD BIT(2) + +#define REG_CLDMA_INT_MASK 0x0960 + +/* CLDMA RESET */ +#define REG_INFRA_RST4_SET 0x0730 +#define RST4_CLDMA1_SW_RST_SET BIT(20) + +#define REG_INFRA_RST4_CLR 0x0734 +#define RST4_CLDMA1_SW_RST_CLR BIT(20) + +#define REG_INFRA_RST2_SET 0x0140 +#define RST2_PMIC_SW_RST_SET BIT(18) + +#define REG_INFRA_RST2_CLR 0x0144 +#define RST2_PMIC_SW_RST_CLR BIT(18) + +enum mtk_txrx { + MTK_TX, + MTK_RX, +}; + +enum t7xx_hw_mode { + MODE_BIT_32, + MODE_BIT_36, + MODE_BIT_40, + MODE_BIT_64, +}; + +struct t7xx_cldma_hw { + enum t7xx_hw_mode hw_mode; + void __iomem *ap_ao_base; + void __iomem *ap_pdn_base; + u32 phy_interrupt_id; +}; + +void t7xx_cldma_hw_irq_dis_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx); +void t7xx_cldma_hw_irq_dis_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx); +void t7xx_cldma_hw_irq_en_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx); +void t7xx_cldma_hw_irq_en_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx); +unsigned int t7xx_cldma_hw_queue_status(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx); +void t7xx_cldma_hw_init(struct t7xx_cldma_hw *hw_info); +void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx); +void t7xx_cldma_hw_start(struct t7xx_cldma_hw *hw_info); +void t7xx_cldma_hw_start_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx); +void t7xx_cldma_hw_tx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask); +void t7xx_cldma_hw_rx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask); +void t7xx_cldma_hw_stop_all_qs(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx); +void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, + unsigned int qno, u64 address, enum mtk_txrx tx_rx); +void t7xx_cldma_hw_reset(void __iomem *ao_base); +void t7xx_cldma_hw_stop(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx); +unsigned int t7xx_cldma_hw_int_status(struct t7xx_cldma_hw *hw_info, unsigned int bitmask, + enum mtk_txrx tx_rx); +void t7xx_cldma_hw_restore(struct t7xx_cldma_hw *hw_info); +void t7xx_cldma_clear_ip_busy(struct t7xx_cldma_hw *hw_info); +bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno); +#endif diff --git a/drivers/net/wwan/t7xx/t7xx_dpmaif.c b/drivers/net/wwan/t7xx/t7xx_dpmaif.c new file mode 100644 index 000000000..6d3edadec --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_dpmaif.c @@ -0,0 +1,1281 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Andy Shevchenko + * Chiranjeevi Rapolu + * Eliot Lee + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_dpmaif.h" +#include "t7xx_reg.h" + +#define ioread32_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ + readx_poll_timeout_atomic(ioread32, addr, val, cond, delay_us, timeout_us) + +static int t7xx_dpmaif_init_intr(struct dpmaif_hw_info *hw_info) +{ + struct dpmaif_isr_en_mask *isr_en_msk = &hw_info->isr_en_mask; + u32 value, ul_intr_enable, dl_intr_enable; + int ret; + + ul_intr_enable = DP_UL_INT_ERR_MSK | DP_UL_INT_QDONE_MSK; + isr_en_msk->ap_ul_l2intr_en_msk = ul_intr_enable; + iowrite32(DPMAIF_AP_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_L2TISAR0); + + /* Set interrupt enable mask */ + iowrite32(ul_intr_enable, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMCR0); + iowrite32(~ul_intr_enable, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMSR0); + + /* Check mask status */ + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0, + value, (value & ul_intr_enable) != ul_intr_enable, 0, + DPMAIF_CHECK_INIT_TIMEOUT_US); + if (ret) + return ret; + + dl_intr_enable = DP_DL_INT_PITCNT_LEN_ERR | DP_DL_INT_BATCNT_LEN_ERR; + isr_en_msk->ap_dl_l2intr_err_en_msk = dl_intr_enable; + ul_intr_enable = DPMAIF_DL_INT_DLQ0_QDONE | DPMAIF_DL_INT_DLQ0_PITCNT_LEN | + DPMAIF_DL_INT_DLQ1_QDONE | DPMAIF_DL_INT_DLQ1_PITCNT_LEN; + isr_en_msk->ap_ul_l2intr_en_msk = ul_intr_enable; + iowrite32(DPMAIF_AP_APDL_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0); + + /* Set DL ISR PD enable mask */ + iowrite32(~ul_intr_enable, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMR0, + value, (value & ul_intr_enable) != ul_intr_enable, 0, + DPMAIF_CHECK_INIT_TIMEOUT_US); + if (ret) + return ret; + + isr_en_msk->ap_udl_ip_busy_en_msk = DPMAIF_UDL_IP_BUSY; + iowrite32(DPMAIF_AP_IP_BUSY_MASK, hw_info->pcie_base + DPMAIF_AP_IP_BUSY); + iowrite32(isr_en_msk->ap_udl_ip_busy_en_msk, + hw_info->pcie_base + DPMAIF_AO_AP_DLUL_IP_BUSY_MASK); + value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_AP_L1TIMR0); + value |= DPMAIF_DL_INT_Q2APTOP | DPMAIF_DL_INT_Q2TOQ1; + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_UL_AP_L1TIMR0); + iowrite32(DPMA_HPC_ALL_INT_MASK, hw_info->pcie_base + DPMAIF_HPC_INTR_MASK); + + return 0; +} + +static void t7xx_dpmaif_mask_ulq_intr(struct dpmaif_hw_info *hw_info, unsigned int q_num) +{ + struct dpmaif_isr_en_mask *isr_en_msk; + u32 value, ul_int_que_done; + int ret; + + isr_en_msk = &hw_info->isr_en_mask; + ul_int_que_done = BIT(q_num + DP_UL_INT_DONE_OFFSET) & DP_UL_INT_QDONE_MSK; + isr_en_msk->ap_ul_l2intr_en_msk &= ~ul_int_que_done; + iowrite32(ul_int_que_done, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMSR0); + + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0, + value, (value & ul_int_que_done) == ul_int_que_done, 0, + DPMAIF_CHECK_TIMEOUT_US); + if (ret) + dev_err(hw_info->dev, + "Could not mask the UL interrupt. DPMAIF_AO_UL_AP_L2TIMR0 is 0x%x\n", + value); +} + +void t7xx_dpmaif_unmask_ulq_intr(struct dpmaif_hw_info *hw_info, unsigned int q_num) +{ + struct dpmaif_isr_en_mask *isr_en_msk; + u32 value, ul_int_que_done; + int ret; + + isr_en_msk = &hw_info->isr_en_mask; + ul_int_que_done = BIT(q_num + DP_UL_INT_DONE_OFFSET) & DP_UL_INT_QDONE_MSK; + isr_en_msk->ap_ul_l2intr_en_msk |= ul_int_que_done; + iowrite32(ul_int_que_done, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMCR0); + + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0, + value, (value & ul_int_que_done) != ul_int_que_done, 0, + DPMAIF_CHECK_TIMEOUT_US); + if (ret) + dev_err(hw_info->dev, + "Could not unmask the UL interrupt. DPMAIF_AO_UL_AP_L2TIMR0 is 0x%x\n", + value); +} + +void t7xx_dpmaif_dl_unmask_batcnt_len_err_intr(struct dpmaif_hw_info *hw_info) +{ + hw_info->isr_en_mask.ap_dl_l2intr_en_msk |= DP_DL_INT_BATCNT_LEN_ERR; + iowrite32(DP_DL_INT_BATCNT_LEN_ERR, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0); +} + +void t7xx_dpmaif_dl_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info) +{ + hw_info->isr_en_mask.ap_dl_l2intr_en_msk |= DP_DL_INT_PITCNT_LEN_ERR; + iowrite32(DP_DL_INT_PITCNT_LEN_ERR, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0); +} + +static u32 t7xx_update_dlq_intr(struct dpmaif_hw_info *hw_info, u32 q_done) +{ + u32 value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0); + iowrite32(q_done, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); + return value; +} + +static int t7xx_mask_dlq_intr(struct dpmaif_hw_info *hw_info, unsigned int qno) +{ + u32 value, q_done; + int ret; + + q_done = qno == DPF_RX_QNO0 ? DPMAIF_DL_INT_DLQ0_QDONE : DPMAIF_DL_INT_DLQ1_QDONE; + iowrite32(q_done, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); + + ret = read_poll_timeout_atomic(t7xx_update_dlq_intr, value, value & q_done, + 0, DPMAIF_CHECK_TIMEOUT_US, false, hw_info, q_done); + if (ret) { + dev_err(hw_info->dev, + "Could not mask the DL interrupt. DPMAIF_AO_UL_AP_L2TIMR0 is 0x%x\n", + value); + return -ETIMEDOUT; + } + + hw_info->isr_en_mask.ap_dl_l2intr_en_msk &= ~q_done; + return 0; +} + +void t7xx_dpmaif_dlq_unmask_rx_done(struct dpmaif_hw_info *hw_info, unsigned int qno) +{ + u32 mask; + + mask = qno == DPF_RX_QNO0 ? DPMAIF_DL_INT_DLQ0_QDONE : DPMAIF_DL_INT_DLQ1_QDONE; + iowrite32(mask, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0); + hw_info->isr_en_mask.ap_dl_l2intr_en_msk |= mask; +} + +void t7xx_dpmaif_clr_ip_busy_sts(struct dpmaif_hw_info *hw_info) +{ + u32 ip_busy_sts; + + ip_busy_sts = ioread32(hw_info->pcie_base + DPMAIF_AP_IP_BUSY); + iowrite32(ip_busy_sts, hw_info->pcie_base + DPMAIF_AP_IP_BUSY); +} + +static void t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info, + unsigned int qno) +{ + if (qno == DPF_RX_QNO0) + iowrite32(DPMAIF_DL_INT_DLQ0_PITCNT_LEN, + hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); + else + iowrite32(DPMAIF_DL_INT_DLQ1_PITCNT_LEN, + hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); +} + +void t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info, + unsigned int qno) +{ + if (qno == DPF_RX_QNO0) + iowrite32(DPMAIF_DL_INT_DLQ0_PITCNT_LEN, + hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0); + else + iowrite32(DPMAIF_DL_INT_DLQ1_PITCNT_LEN, + hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0); +} + +void t7xx_dpmaif_ul_clr_all_intr(struct dpmaif_hw_info *hw_info) +{ + iowrite32(DPMAIF_AP_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_L2TISAR0); +} + +void t7xx_dpmaif_dl_clr_all_intr(struct dpmaif_hw_info *hw_info) +{ + iowrite32(DPMAIF_AP_APDL_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0); +} + +static void t7xx_dpmaif_set_intr_para(struct dpmaif_hw_intr_st_para *para, + enum dpmaif_hw_intr_type intr_type, unsigned int intr_queue) +{ + para->intr_types[para->intr_cnt] = intr_type; + para->intr_queues[para->intr_cnt] = intr_queue; + para->intr_cnt++; +} + +/* The para->intr_cnt counter is set to zero before this function is called. + * It does not check for overflow as there is no risk of overflowing intr_types or intr_queues. + */ +static void t7xx_dpmaif_hw_check_tx_intr(struct dpmaif_hw_info *hw_info, + unsigned int intr_status, + struct dpmaif_hw_intr_st_para *para) +{ + unsigned long value; + + value = FIELD_GET(DP_UL_INT_QDONE_MSK, intr_status); + if (value) { + unsigned int index; + + t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_DONE, value); + + for_each_set_bit(index, &value, DPMAIF_TXQ_NUM) + t7xx_dpmaif_mask_ulq_intr(hw_info, index); + } + + value = FIELD_GET(DP_UL_INT_EMPTY_MSK, intr_status); + if (value) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_DRB_EMPTY, value); + + value = FIELD_GET(DP_UL_INT_MD_NOTREADY_MSK, intr_status); + if (value) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_MD_NOTREADY, value); + + value = FIELD_GET(DP_UL_INT_MD_PWR_NOTREADY_MSK, intr_status); + if (value) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_MD_PWR_NOTREADY, value); + + value = FIELD_GET(DP_UL_INT_ERR_MSK, intr_status); + if (value) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_LEN_ERR, value); + + /* Clear interrupt status */ + iowrite32(intr_status, hw_info->pcie_base + DPMAIF_AP_L2TISAR0); +} + +/* The para->intr_cnt counter is set to zero before this function is called. + * It does not check for overflow as there is no risk of overflowing intr_types or intr_queues. + */ +static void t7xx_dpmaif_hw_check_rx_intr(struct dpmaif_hw_info *hw_info, + unsigned int intr_status, + struct dpmaif_hw_intr_st_para *para, int qno) +{ + if (qno == DPF_RX_QNO_DFT) { + if (intr_status & DP_DL_INT_SKB_LEN_ERR) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_SKB_LEN_ERR, DPF_RX_QNO_DFT); + + if (intr_status & DP_DL_INT_BATCNT_LEN_ERR) { + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_BATCNT_LEN_ERR, DPF_RX_QNO_DFT); + hw_info->isr_en_mask.ap_dl_l2intr_en_msk &= ~DP_DL_INT_BATCNT_LEN_ERR; + iowrite32(DP_DL_INT_BATCNT_LEN_ERR, + hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); + } + + if (intr_status & DP_DL_INT_PITCNT_LEN_ERR) { + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_PITCNT_LEN_ERR, DPF_RX_QNO_DFT); + hw_info->isr_en_mask.ap_dl_l2intr_en_msk &= ~DP_DL_INT_PITCNT_LEN_ERR; + iowrite32(DP_DL_INT_PITCNT_LEN_ERR, + hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); + } + + if (intr_status & DP_DL_INT_PKT_EMPTY_MSK) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_PKT_EMPTY_SET, DPF_RX_QNO_DFT); + + if (intr_status & DP_DL_INT_FRG_EMPTY_MSK) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_FRG_EMPTY_SET, DPF_RX_QNO_DFT); + + if (intr_status & DP_DL_INT_MTU_ERR_MSK) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_MTU_ERR, DPF_RX_QNO_DFT); + + if (intr_status & DP_DL_INT_FRG_LEN_ERR_MSK) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_FRGCNT_LEN_ERR, DPF_RX_QNO_DFT); + + if (intr_status & DP_DL_INT_Q0_PITCNT_LEN_ERR) { + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q0_PITCNT_LEN_ERR, BIT(qno)); + t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr(hw_info, qno); + } + + if (intr_status & DP_DL_INT_HPC_ENT_TYPE_ERR) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_HPC_ENT_TYPE_ERR, + DPF_RX_QNO_DFT); + + if (intr_status & DP_DL_INT_Q0_DONE) { + /* Mask RX done interrupt immediately after it occurs, do not clear + * the interrupt if the mask operation fails. + */ + if (!t7xx_mask_dlq_intr(hw_info, qno)) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q0_DONE, BIT(qno)); + else + intr_status &= ~DP_DL_INT_Q0_DONE; + } + } else { + if (intr_status & DP_DL_INT_Q1_PITCNT_LEN_ERR) { + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q1_PITCNT_LEN_ERR, BIT(qno)); + t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr(hw_info, qno); + } + + if (intr_status & DP_DL_INT_Q1_DONE) { + if (!t7xx_mask_dlq_intr(hw_info, qno)) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q1_DONE, BIT(qno)); + else + intr_status &= ~DP_DL_INT_Q1_DONE; + } + } + + intr_status |= DP_DL_INT_BATCNT_LEN_ERR; + /* Clear interrupt status */ + iowrite32(intr_status, hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0); +} + +/** + * t7xx_dpmaif_hw_get_intr_cnt() - Reads interrupt status and count from HW. + * @hw_info: Pointer to struct hw_info. + * @para: Pointer to struct dpmaif_hw_intr_st_para. + * @qno: Queue number. + * + * Reads RX/TX interrupt status from HW and clears UL/DL status as needed. + * + * Return: Interrupt count. + */ +int t7xx_dpmaif_hw_get_intr_cnt(struct dpmaif_hw_info *hw_info, + struct dpmaif_hw_intr_st_para *para, int qno) +{ + u32 rx_intr_status, tx_intr_status = 0; + u32 rx_intr_qdone, tx_intr_qdone = 0; + + rx_intr_status = ioread32(hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0); + rx_intr_qdone = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMR0); + + /* TX interrupt status */ + if (qno == DPF_RX_QNO_DFT) { + /* All ULQ and DLQ0 interrupts use the same source no need to check ULQ interrupts + * when a DLQ1 interrupt has occurred. + */ + tx_intr_status = ioread32(hw_info->pcie_base + DPMAIF_AP_L2TISAR0); + tx_intr_qdone = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0); + } + + t7xx_dpmaif_clr_ip_busy_sts(hw_info); + + if (qno == DPF_RX_QNO_DFT) { + /* Do not schedule bottom half again or clear UL interrupt status when we + * have already masked it. + */ + tx_intr_status &= ~tx_intr_qdone; + if (tx_intr_status) + t7xx_dpmaif_hw_check_tx_intr(hw_info, tx_intr_status, para); + } + + if (rx_intr_status) { + if (qno == DPF_RX_QNO0) { + rx_intr_status &= DP_DL_Q0_STATUS_MASK; + if (rx_intr_qdone & DPMAIF_DL_INT_DLQ0_QDONE) + /* Do not schedule bottom half again or clear DL + * queue done interrupt status when we have already masked it. + */ + rx_intr_status &= ~DP_DL_INT_Q0_DONE; + } else { + rx_intr_status &= DP_DL_Q1_STATUS_MASK; + if (rx_intr_qdone & DPMAIF_DL_INT_DLQ1_QDONE) + rx_intr_status &= ~DP_DL_INT_Q1_DONE; + } + + if (rx_intr_status) + t7xx_dpmaif_hw_check_rx_intr(hw_info, rx_intr_status, para, qno); + } + + return para->intr_cnt; +} + +static int t7xx_dpmaif_sram_init(struct dpmaif_hw_info *hw_info) +{ + u32 value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AP_MEM_CLR); + value |= DPMAIF_MEM_CLR; + iowrite32(value, hw_info->pcie_base + DPMAIF_AP_MEM_CLR); + + return ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AP_MEM_CLR, + value, !(value & DPMAIF_MEM_CLR), 0, + DPMAIF_CHECK_INIT_TIMEOUT_US); +} + +static void t7xx_dpmaif_hw_reset(struct dpmaif_hw_info *hw_info) +{ + iowrite32(DPMAIF_AP_AO_RST_BIT, hw_info->pcie_base + DPMAIF_AP_AO_RGU_ASSERT); + udelay(2); + iowrite32(DPMAIF_AP_RST_BIT, hw_info->pcie_base + DPMAIF_AP_RGU_ASSERT); + udelay(2); + iowrite32(DPMAIF_AP_AO_RST_BIT, hw_info->pcie_base + DPMAIF_AP_AO_RGU_DEASSERT); + udelay(2); + iowrite32(DPMAIF_AP_RST_BIT, hw_info->pcie_base + DPMAIF_AP_RGU_DEASSERT); + udelay(2); +} + +static int t7xx_dpmaif_hw_config(struct dpmaif_hw_info *hw_info) +{ + u32 ap_port_mode; + int ret; + + t7xx_dpmaif_hw_reset(hw_info); + + ret = t7xx_dpmaif_sram_init(hw_info); + if (ret) + return ret; + + ap_port_mode = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); + ap_port_mode |= DPMAIF_PORT_MODE_PCIE; + iowrite32(ap_port_mode, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); + iowrite32(DPMAIF_CG_EN, hw_info->pcie_base + DPMAIF_AP_CG_EN); + return 0; +} + +static void t7xx_dpmaif_pcie_dpmaif_sign(struct dpmaif_hw_info *hw_info) +{ + iowrite32(DPMAIF_PCIE_MODE_SET_VALUE, hw_info->pcie_base + DPMAIF_UL_RESERVE_AO_RW); +} + +static void t7xx_dpmaif_dl_performance(struct dpmaif_hw_info *hw_info) +{ + u32 enable_bat_cache, enable_pit_burst; + + enable_bat_cache = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); + enable_bat_cache |= DPMAIF_DL_BAT_CACHE_PRI; + iowrite32(enable_bat_cache, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); + + enable_pit_burst = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); + enable_pit_burst |= DPMAIF_DL_BURST_PIT_EN; + iowrite32(enable_pit_burst, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); +} + + /* DPMAIF DL DLQ part HW setting */ + +static void t7xx_dpmaif_hw_hpc_cntl_set(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = DPMAIF_HPC_DLQ_PATH_MODE | DPMAIF_HPC_ADD_MODE_DF << 2; + value |= DPMAIF_HASH_PRIME_DF << 4; + value |= DPMAIF_HPC_TOTAL_NUM << 8; + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_HPC_CNTL); +} + +static void t7xx_dpmaif_hw_agg_cfg_set(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = DPMAIF_AGG_MAX_LEN_DF | DPMAIF_AGG_TBL_ENT_NUM_DF << 16; + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_DLQ_AGG_CFG); +} + +static void t7xx_dpmaif_hw_hash_bit_choose_set(struct dpmaif_hw_info *hw_info) +{ + iowrite32(DPMAIF_DLQ_HASH_BIT_CHOOSE_DF, + hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_INIT_CON5); +} + +static void t7xx_dpmaif_hw_mid_pit_timeout_thres_set(struct dpmaif_hw_info *hw_info) +{ + iowrite32(DPMAIF_MID_TIMEOUT_THRES_DF, hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_TIMEOUT0); +} + +static void t7xx_dpmaif_hw_dlq_timeout_thres_set(struct dpmaif_hw_info *hw_info) +{ + unsigned int value, i; + + /* Each register holds two DLQ threshold timeout values */ + for (i = 0; i < DPMAIF_HPC_MAX_TOTAL_NUM / 2; i++) { + value = FIELD_PREP(DPMAIF_DLQ_LOW_TIMEOUT_THRES_MKS, DPMAIF_DLQ_TIMEOUT_THRES_DF); + value |= FIELD_PREP(DPMAIF_DLQ_HIGH_TIMEOUT_THRES_MSK, + DPMAIF_DLQ_TIMEOUT_THRES_DF); + iowrite32(value, + hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_TIMEOUT1 + sizeof(u32) * i); + } +} + +static void t7xx_dpmaif_hw_dlq_start_prs_thres_set(struct dpmaif_hw_info *hw_info) +{ + iowrite32(DPMAIF_DLQ_PRS_THRES_DF, hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_TRIG_THRES); +} + +static void t7xx_dpmaif_dl_dlq_hpc_hw_init(struct dpmaif_hw_info *hw_info) +{ + t7xx_dpmaif_hw_hpc_cntl_set(hw_info); + t7xx_dpmaif_hw_agg_cfg_set(hw_info); + t7xx_dpmaif_hw_hash_bit_choose_set(hw_info); + t7xx_dpmaif_hw_mid_pit_timeout_thres_set(hw_info); + t7xx_dpmaif_hw_dlq_timeout_thres_set(hw_info); + t7xx_dpmaif_hw_dlq_start_prs_thres_set(hw_info); +} + +static int t7xx_dpmaif_dl_bat_init_done(struct dpmaif_hw_info *hw_info, bool frg_en) +{ + u32 value, dl_bat_init = 0; + int ret; + + if (frg_en) + dl_bat_init = DPMAIF_DL_BAT_FRG_INIT; + + dl_bat_init |= DPMAIF_DL_BAT_INIT_ALLSET; + dl_bat_init |= DPMAIF_DL_BAT_INIT_EN; + + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT, + value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0, + DPMAIF_CHECK_INIT_TIMEOUT_US); + if (ret) { + dev_err(hw_info->dev, "Data plane modem DL BAT is not ready\n"); + return ret; + } + + iowrite32(dl_bat_init, hw_info->pcie_base + DPMAIF_DL_BAT_INIT); + + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT, + value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0, + DPMAIF_CHECK_INIT_TIMEOUT_US); + if (ret) + dev_err(hw_info->dev, "Data plane modem DL BAT initialization failed\n"); + + return ret; +} + +static void t7xx_dpmaif_dl_set_bat_base_addr(struct dpmaif_hw_info *hw_info, + dma_addr_t addr) +{ + iowrite32(lower_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON0); + iowrite32(upper_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON3); +} + +static void t7xx_dpmaif_dl_set_bat_size(struct dpmaif_hw_info *hw_info, unsigned int size) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); + value &= ~DPMAIF_BAT_SIZE_MSK; + value |= size & DPMAIF_BAT_SIZE_MSK; + iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); +} + +static void t7xx_dpmaif_dl_bat_en(struct dpmaif_hw_info *hw_info, bool enable) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); + + if (enable) + value |= DPMAIF_BAT_EN_MSK; + else + value &= ~DPMAIF_BAT_EN_MSK; + + iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); +} + +static void t7xx_dpmaif_dl_set_ao_bid_maxcnt(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0); + value &= ~DPMAIF_BAT_BID_MAXCNT_MSK; + value |= FIELD_PREP(DPMAIF_BAT_BID_MAXCNT_MSK, DPMAIF_HW_PKT_BIDCNT); + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0); +} + +static void t7xx_dpmaif_dl_set_ao_mtu(struct dpmaif_hw_info *hw_info) +{ + iowrite32(DPMAIF_HW_MTU_SIZE, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON1); +} + +static void t7xx_dpmaif_dl_set_ao_pit_chknum(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2); + value &= ~DPMAIF_PIT_CHK_NUM_MSK; + value |= FIELD_PREP(DPMAIF_PIT_CHK_NUM_MSK, DPMAIF_HW_CHK_PIT_NUM); + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2); +} + +static void t7xx_dpmaif_dl_set_ao_remain_minsz(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0); + value &= ~DPMAIF_BAT_REMAIN_MINSZ_MSK; + value |= FIELD_PREP(DPMAIF_BAT_REMAIN_MINSZ_MSK, + DPMAIF_HW_BAT_REMAIN / DPMAIF_BAT_REMAIN_SZ_BASE); + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0); +} + +static void t7xx_dpmaif_dl_set_ao_bat_bufsz(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2); + value &= ~DPMAIF_BAT_BUF_SZ_MSK; + value |= FIELD_PREP(DPMAIF_BAT_BUF_SZ_MSK, + DPMAIF_HW_BAT_PKTBUF / DPMAIF_BAT_BUFFER_SZ_BASE); + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2); +} + +static void t7xx_dpmaif_dl_set_ao_bat_rsv_length(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2); + value &= ~DPMAIF_BAT_RSV_LEN_MSK; + value |= DPMAIF_HW_BAT_RSVLEN; + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2); +} + +static void t7xx_dpmaif_dl_set_pkt_alignment(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); + value &= ~DPMAIF_PKT_ALIGN_MSK; + value |= DPMAIF_PKT_ALIGN_EN; + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); +} + +static void t7xx_dpmaif_dl_set_pkt_checksum(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); + value |= DPMAIF_DL_PKT_CHECKSUM_EN; + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); +} + +static void t7xx_dpmaif_dl_set_ao_frg_check_thres(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES); + value &= ~DPMAIF_FRG_CHECK_THRES_MSK; + value |= DPMAIF_HW_CHK_FRG_NUM; + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES); +} + +static void t7xx_dpmaif_dl_set_ao_frg_bufsz(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES); + value &= ~DPMAIF_FRG_BUF_SZ_MSK; + value |= FIELD_PREP(DPMAIF_FRG_BUF_SZ_MSK, + DPMAIF_HW_FRG_PKTBUF / DPMAIF_FRG_BUFFER_SZ_BASE); + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES); +} + +static void t7xx_dpmaif_dl_frg_ao_en(struct dpmaif_hw_info *hw_info, bool enable) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES); + + if (enable) + value |= DPMAIF_FRG_EN_MSK; + else + value &= ~DPMAIF_FRG_EN_MSK; + + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES); +} + +static void t7xx_dpmaif_dl_set_ao_bat_check_thres(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); + value &= ~DPMAIF_BAT_CHECK_THRES_MSK; + value |= FIELD_PREP(DPMAIF_BAT_CHECK_THRES_MSK, DPMAIF_HW_CHK_BAT_NUM); + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); +} + +static void t7xx_dpmaif_dl_set_pit_seqnum(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PIT_SEQ_END); + value &= ~DPMAIF_DL_PIT_SEQ_MSK; + value |= DPMAIF_DL_PIT_SEQ_VALUE; + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PIT_SEQ_END); +} + +static void t7xx_dpmaif_dl_set_dlq_pit_base_addr(struct dpmaif_hw_info *hw_info, + dma_addr_t addr) +{ + iowrite32(lower_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON0); + iowrite32(upper_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON4); +} + +static void t7xx_dpmaif_dl_set_dlq_pit_size(struct dpmaif_hw_info *hw_info, unsigned int size) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON1); + value &= ~DPMAIF_PIT_SIZE_MSK; + value |= size & DPMAIF_PIT_SIZE_MSK; + iowrite32(value, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON1); + iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON2); + iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON3); + iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON5); + iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON6); +} + +static void t7xx_dpmaif_dl_dlq_pit_en(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON3); + value |= DPMAIF_DLQPIT_EN_MSK; + iowrite32(value, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON3); +} + +static void t7xx_dpmaif_dl_dlq_pit_init_done(struct dpmaif_hw_info *hw_info, + unsigned int pit_idx) +{ + unsigned int dl_pit_init; + int timeout; + u32 value; + + dl_pit_init = DPMAIF_DL_PIT_INIT_ALLSET; + dl_pit_init |= (pit_idx << DPMAIF_DLQPIT_CHAN_OFS); + dl_pit_init |= DPMAIF_DL_PIT_INIT_EN; + + timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT, + value, !(value & DPMAIF_DL_PIT_INIT_NOT_READY), + DPMAIF_CHECK_DELAY_US, + DPMAIF_CHECK_INIT_TIMEOUT_US); + if (timeout) { + dev_err(hw_info->dev, "Data plane modem DL PIT is not ready\n"); + return; + } + + iowrite32(dl_pit_init, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT); + timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT, + value, !(value & DPMAIF_DL_PIT_INIT_NOT_READY), + DPMAIF_CHECK_DELAY_US, + DPMAIF_CHECK_INIT_TIMEOUT_US); + if (timeout) + dev_err(hw_info->dev, "Data plane modem DL PIT initialization failed\n"); +} + +static void t7xx_dpmaif_config_dlq_pit_hw(struct dpmaif_hw_info *hw_info, unsigned int q_num, + struct dpmaif_dl *dl_que) +{ + t7xx_dpmaif_dl_set_dlq_pit_base_addr(hw_info, dl_que->pit_base); + t7xx_dpmaif_dl_set_dlq_pit_size(hw_info, dl_que->pit_size_cnt); + t7xx_dpmaif_dl_dlq_pit_en(hw_info); + t7xx_dpmaif_dl_dlq_pit_init_done(hw_info, q_num); +} + +static void t7xx_dpmaif_config_all_dlq_hw(struct dpmaif_hw_info *hw_info) +{ + int i; + + for (i = 0; i < DPMAIF_RXQ_NUM; i++) + t7xx_dpmaif_config_dlq_pit_hw(hw_info, i, &hw_info->dl_que[i]); +} + +static void t7xx_dpmaif_dl_all_q_en(struct dpmaif_hw_info *hw_info, bool enable) +{ + u32 dl_bat_init, value; + int timeout; + + value = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); + + if (enable) + value |= DPMAIF_BAT_EN_MSK; + else + value &= ~DPMAIF_BAT_EN_MSK; + + iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); + dl_bat_init = DPMAIF_DL_BAT_INIT_ONLY_ENABLE_BIT; + dl_bat_init |= DPMAIF_DL_BAT_INIT_EN; + + timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT, + value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0, + DPMAIF_CHECK_TIMEOUT_US); + if (timeout) + dev_err(hw_info->dev, "Timeout updating BAT setting to HW\n"); + + iowrite32(dl_bat_init, hw_info->pcie_base + DPMAIF_DL_BAT_INIT); + timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT, + value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0, + DPMAIF_CHECK_TIMEOUT_US); + if (timeout) + dev_err(hw_info->dev, "Data plane modem DL BAT is not ready\n"); +} + +static int t7xx_dpmaif_config_dlq_hw(struct dpmaif_hw_info *hw_info) +{ + struct dpmaif_dl *dl_que; + int ret; + + t7xx_dpmaif_dl_dlq_hpc_hw_init(hw_info); + + dl_que = &hw_info->dl_que[0]; /* All queues share one BAT/frag BAT table */ + if (!dl_que->que_started) + return -EBUSY; + + t7xx_dpmaif_dl_set_ao_remain_minsz(hw_info); + t7xx_dpmaif_dl_set_ao_bat_bufsz(hw_info); + t7xx_dpmaif_dl_set_ao_frg_bufsz(hw_info); + t7xx_dpmaif_dl_set_ao_bat_rsv_length(hw_info); + t7xx_dpmaif_dl_set_ao_bid_maxcnt(hw_info); + t7xx_dpmaif_dl_set_pkt_alignment(hw_info); + t7xx_dpmaif_dl_set_pit_seqnum(hw_info); + t7xx_dpmaif_dl_set_ao_mtu(hw_info); + t7xx_dpmaif_dl_set_ao_pit_chknum(hw_info); + t7xx_dpmaif_dl_set_ao_bat_check_thres(hw_info); + t7xx_dpmaif_dl_set_ao_frg_check_thres(hw_info); + t7xx_dpmaif_dl_frg_ao_en(hw_info, true); + + t7xx_dpmaif_dl_set_bat_base_addr(hw_info, dl_que->frg_base); + t7xx_dpmaif_dl_set_bat_size(hw_info, dl_que->frg_size_cnt); + t7xx_dpmaif_dl_bat_en(hw_info, true); + + ret = t7xx_dpmaif_dl_bat_init_done(hw_info, true); + if (ret) + return ret; + + t7xx_dpmaif_dl_set_bat_base_addr(hw_info, dl_que->bat_base); + t7xx_dpmaif_dl_set_bat_size(hw_info, dl_que->bat_size_cnt); + t7xx_dpmaif_dl_bat_en(hw_info, false); + + ret = t7xx_dpmaif_dl_bat_init_done(hw_info, false); + if (ret) + return ret; + + /* Init PIT (two PIT table) */ + t7xx_dpmaif_config_all_dlq_hw(hw_info); + t7xx_dpmaif_dl_all_q_en(hw_info, true); + t7xx_dpmaif_dl_set_pkt_checksum(hw_info); + return 0; +} + +static void t7xx_dpmaif_ul_update_drb_size(struct dpmaif_hw_info *hw_info, + unsigned int q_num, unsigned int size) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_UL_DRBSIZE_ADDRH_n(q_num)); + value &= ~DPMAIF_DRB_SIZE_MSK; + value |= size & DPMAIF_DRB_SIZE_MSK; + iowrite32(value, hw_info->pcie_base + DPMAIF_UL_DRBSIZE_ADDRH_n(q_num)); +} + +static void t7xx_dpmaif_ul_update_drb_base_addr(struct dpmaif_hw_info *hw_info, + unsigned int q_num, dma_addr_t addr) +{ + iowrite32(lower_32_bits(addr), hw_info->pcie_base + DPMAIF_ULQSAR_n(q_num)); + iowrite32(upper_32_bits(addr), hw_info->pcie_base + DPMAIF_UL_DRB_ADDRH_n(q_num)); +} + +static void t7xx_dpmaif_ul_rdy_en(struct dpmaif_hw_info *hw_info, + unsigned int q_num, bool ready) +{ + u32 value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0); + + if (ready) + value |= BIT(q_num); + else + value &= ~BIT(q_num); + + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0); +} + +static void t7xx_dpmaif_ul_arb_en(struct dpmaif_hw_info *hw_info, + unsigned int q_num, bool enable) +{ + u32 value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0); + + if (enable) + value |= BIT(q_num + 8); + else + value &= ~BIT(q_num + 8); + + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0); +} + +static void t7xx_dpmaif_config_ulq_hw(struct dpmaif_hw_info *hw_info) +{ + struct dpmaif_ul *ul_que; + int i; + + for (i = 0; i < DPMAIF_TXQ_NUM; i++) { + ul_que = &hw_info->ul_que[i]; + if (ul_que->que_started) { + t7xx_dpmaif_ul_update_drb_size(hw_info, i, ul_que->drb_size_cnt * + DPMAIF_UL_DRB_SIZE_WORD); + t7xx_dpmaif_ul_update_drb_base_addr(hw_info, i, ul_que->drb_base); + t7xx_dpmaif_ul_rdy_en(hw_info, i, true); + t7xx_dpmaif_ul_arb_en(hw_info, i, true); + } else { + t7xx_dpmaif_ul_arb_en(hw_info, i, false); + } + } +} + +static int t7xx_dpmaif_hw_init_done(struct dpmaif_hw_info *hw_info) +{ + u32 ap_cfg; + int ret; + + ap_cfg = ioread32(hw_info->pcie_base + DPMAIF_AP_OVERWRITE_CFG); + ap_cfg |= DPMAIF_SRAM_SYNC; + iowrite32(ap_cfg, hw_info->pcie_base + DPMAIF_AP_OVERWRITE_CFG); + + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AP_OVERWRITE_CFG, + ap_cfg, !(ap_cfg & DPMAIF_SRAM_SYNC), 0, + DPMAIF_CHECK_TIMEOUT_US); + if (ret) + return ret; + + iowrite32(DPMAIF_UL_INIT_DONE, hw_info->pcie_base + DPMAIF_AO_UL_INIT_SET); + iowrite32(DPMAIF_DL_INIT_DONE, hw_info->pcie_base + DPMAIF_AO_DL_INIT_SET); + return 0; +} + +static bool t7xx_dpmaif_dl_idle_check(struct dpmaif_hw_info *hw_info) +{ + u32 dpmaif_dl_is_busy = ioread32(hw_info->pcie_base + DPMAIF_DL_CHK_BUSY); + + return !(dpmaif_dl_is_busy & DPMAIF_DL_IDLE_STS); +} + +static void t7xx_dpmaif_ul_all_q_en(struct dpmaif_hw_info *hw_info, bool enable) +{ + u32 ul_arb_en = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0); + + if (enable) + ul_arb_en |= DPMAIF_UL_ALL_QUE_ARB_EN; + else + ul_arb_en &= ~DPMAIF_UL_ALL_QUE_ARB_EN; + + iowrite32(ul_arb_en, hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0); +} + +static bool t7xx_dpmaif_ul_idle_check(struct dpmaif_hw_info *hw_info) +{ + u32 dpmaif_ul_is_busy = ioread32(hw_info->pcie_base + DPMAIF_UL_CHK_BUSY); + + return !(dpmaif_ul_is_busy & DPMAIF_UL_IDLE_STS); +} + +void t7xx_dpmaif_ul_update_hw_drb_cnt(struct dpmaif_hw_info *hw_info, unsigned int q_num, + unsigned int drb_entry_cnt) +{ + u32 ul_update, value; + int err; + + ul_update = drb_entry_cnt & DPMAIF_UL_ADD_COUNT_MASK; + ul_update |= DPMAIF_UL_ADD_UPDATE; + + err = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_ULQ_ADD_DESC_CH_n(q_num), + value, !(value & DPMAIF_UL_ADD_NOT_READY), 0, + DPMAIF_CHECK_TIMEOUT_US); + if (err) { + dev_err(hw_info->dev, "UL add is not ready\n"); + return; + } + + iowrite32(ul_update, hw_info->pcie_base + DPMAIF_ULQ_ADD_DESC_CH_n(q_num)); + + err = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_ULQ_ADD_DESC_CH_n(q_num), + value, !(value & DPMAIF_UL_ADD_NOT_READY), 0, + DPMAIF_CHECK_TIMEOUT_US); + if (err) + dev_err(hw_info->dev, "Timeout updating UL add\n"); +} + +unsigned int t7xx_dpmaif_ul_get_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num) +{ + unsigned int value = ioread32(hw_info->pcie_base + DPMAIF_ULQ_STA0_n(q_num)); + + return FIELD_GET(DPMAIF_UL_DRB_RIDX_MSK, value) / DPMAIF_UL_DRB_SIZE_WORD; +} + +int t7xx_dpmaif_dlq_add_pit_remain_cnt(struct dpmaif_hw_info *hw_info, unsigned int dlq_pit_idx, + unsigned int pit_remain_cnt) +{ + u32 dl_update, value; + int ret; + + dl_update = pit_remain_cnt & DPMAIF_PIT_REM_CNT_MSK; + dl_update |= DPMAIF_DL_ADD_UPDATE | (dlq_pit_idx << DPMAIF_ADD_DLQ_PIT_CHAN_OFS); + + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_ADD, + value, !(value & DPMAIF_DL_ADD_NOT_READY), 0, + DPMAIF_CHECK_TIMEOUT_US); + if (ret) { + dev_err(hw_info->dev, "Data plane modem is not ready to add dlq\n"); + return ret; + } + + iowrite32(dl_update, hw_info->pcie_base + DPMAIF_DL_DLQPIT_ADD); + + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_ADD, + value, !(value & DPMAIF_DL_ADD_NOT_READY), 0, + DPMAIF_CHECK_TIMEOUT_US); + if (ret) { + dev_err(hw_info->dev, "Data plane modem add dlq failed\n"); + return ret; + } + + return 0; +} + +unsigned int t7xx_dpmaif_dl_dlq_pit_get_wr_idx(struct dpmaif_hw_info *hw_info, + unsigned int dlq_pit_idx) +{ + u32 value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_DLQ_WR_IDX + + dlq_pit_idx * DLQ_PIT_IDX_SIZE); + return value & DPMAIF_DL_RD_WR_IDX_MSK; +} + +static int t7xx_dl_add_timedout(struct dpmaif_hw_info *hw_info) +{ + u32 value; + + return ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_ADD, + value, !(value & DPMAIF_DL_ADD_NOT_READY), 0, + DPMAIF_CHECK_TIMEOUT_US); +} + +int t7xx_dpmaif_dl_snd_hw_bat_cnt(struct dpmaif_hw_info *hw_info, unsigned int bat_entry_cnt) +{ + unsigned int value; + + if (t7xx_dl_add_timedout(hw_info)) { + dev_err(hw_info->dev, "DL add BAT not ready\n"); + return -EBUSY; + } + + value = bat_entry_cnt & DPMAIF_DL_ADD_COUNT_MASK; + value |= DPMAIF_DL_ADD_UPDATE; + iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_ADD); + + if (t7xx_dl_add_timedout(hw_info)) { + dev_err(hw_info->dev, "DL add BAT timeout\n"); + return -EBUSY; + } + + return 0; +} + +unsigned int t7xx_dpmaif_dl_get_bat_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num) +{ + u32 value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_BAT_RD_IDX); + return value & DPMAIF_DL_RD_WR_IDX_MSK; +} + +unsigned int t7xx_dpmaif_dl_get_bat_wr_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num) +{ + u32 value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_BAT_WR_IDX); + return value & DPMAIF_DL_RD_WR_IDX_MSK; +} + +int t7xx_dpmaif_dl_snd_hw_frg_cnt(struct dpmaif_hw_info *hw_info, unsigned int frg_entry_cnt) +{ + unsigned int value; + + if (t7xx_dl_add_timedout(hw_info)) { + dev_err(hw_info->dev, "Data plane modem is not ready to add frag DLQ\n"); + return -EBUSY; + } + + value = frg_entry_cnt & DPMAIF_DL_ADD_COUNT_MASK; + value |= DPMAIF_DL_FRG_ADD_UPDATE | DPMAIF_DL_ADD_UPDATE; + iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_ADD); + + if (t7xx_dl_add_timedout(hw_info)) { + dev_err(hw_info->dev, "Data plane modem add frag DLQ failed"); + return -EBUSY; + } + + return 0; +} + +unsigned int t7xx_dpmaif_dl_get_frg_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num) +{ + u32 value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_FRGBAT_RD_IDX); + return value & DPMAIF_DL_RD_WR_IDX_MSK; +} + +static void t7xx_dpmaif_set_queue_property(struct dpmaif_hw_info *hw_info, + struct dpmaif_hw_params *init_para) +{ + struct dpmaif_dl *dl_que; + struct dpmaif_ul *ul_que; + int i; + + for (i = 0; i < DPMAIF_RXQ_NUM; i++) { + dl_que = &hw_info->dl_que[i]; + dl_que->bat_base = init_para->pkt_bat_base_addr[i]; + dl_que->bat_size_cnt = init_para->pkt_bat_size_cnt[i]; + dl_que->pit_base = init_para->pit_base_addr[i]; + dl_que->pit_size_cnt = init_para->pit_size_cnt[i]; + dl_que->frg_base = init_para->frg_bat_base_addr[i]; + dl_que->frg_size_cnt = init_para->frg_bat_size_cnt[i]; + dl_que->que_started = true; + } + + for (i = 0; i < DPMAIF_TXQ_NUM; i++) { + ul_que = &hw_info->ul_que[i]; + ul_que->drb_base = init_para->drb_base_addr[i]; + ul_que->drb_size_cnt = init_para->drb_size_cnt[i]; + ul_que->que_started = true; + } +} + +/** + * t7xx_dpmaif_hw_stop_all_txq() - Stop all TX queues. + * @hw_info: Pointer to struct hw_info. + * + * Disable HW UL queues. Checks busy UL queues to go to idle + * with an attempt count of 1000000. + * + * Return: + * * 0 - Success + * * -ETIMEDOUT - Timed out checking busy queues + */ +int t7xx_dpmaif_hw_stop_all_txq(struct dpmaif_hw_info *hw_info) +{ + int count = 0; + + t7xx_dpmaif_ul_all_q_en(hw_info, false); + while (t7xx_dpmaif_ul_idle_check(hw_info)) { + if (++count >= DPMAIF_MAX_CHECK_COUNT) { + dev_err(hw_info->dev, "Failed to stop TX, status: 0x%x\n", + ioread32(hw_info->pcie_base + DPMAIF_UL_CHK_BUSY)); + return -ETIMEDOUT; + } + } + + return 0; +} + +/** + * t7xx_dpmaif_hw_stop_all_rxq() - Stop all RX queues. + * @hw_info: Pointer to struct hw_info. + * + * Disable HW DL queue. Checks busy UL queues to go to idle + * with an attempt count of 1000000. + * Check that HW PIT write index equals read index with the same + * attempt count. + * + * Return: + * * 0 - Success. + * * -ETIMEDOUT - Timed out checking busy queues. + */ +int t7xx_dpmaif_hw_stop_all_rxq(struct dpmaif_hw_info *hw_info) +{ + unsigned int wr_idx, rd_idx; + int count = 0; + + t7xx_dpmaif_dl_all_q_en(hw_info, false); + while (t7xx_dpmaif_dl_idle_check(hw_info)) { + if (++count >= DPMAIF_MAX_CHECK_COUNT) { + dev_err(hw_info->dev, "Failed to stop RX, status: 0x%x\n", + ioread32(hw_info->pcie_base + DPMAIF_DL_CHK_BUSY)); + return -ETIMEDOUT; + } + } + + /* Check middle PIT sync done */ + count = 0; + do { + wr_idx = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PIT_WR_IDX); + wr_idx &= DPMAIF_DL_RD_WR_IDX_MSK; + rd_idx = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PIT_RD_IDX); + rd_idx &= DPMAIF_DL_RD_WR_IDX_MSK; + + if (wr_idx == rd_idx) + return 0; + } while (++count < DPMAIF_MAX_CHECK_COUNT); + + dev_err(hw_info->dev, "Check middle PIT sync fail\n"); + return -ETIMEDOUT; +} + +void t7xx_dpmaif_start_hw(struct dpmaif_hw_info *hw_info) +{ + t7xx_dpmaif_ul_all_q_en(hw_info, true); + t7xx_dpmaif_dl_all_q_en(hw_info, true); +} + +/** + * t7xx_dpmaif_hw_init() - Initialize HW data path API. + * @hw_info: Pointer to struct hw_info. + * @init_param: Pointer to struct dpmaif_hw_params. + * + * Configures port mode, clock config, HW interrupt initialization, and HW queue. + * + * Return: + * * 0 - Success. + * * -ERROR - Error code from failure sub-initializations. + */ +int t7xx_dpmaif_hw_init(struct dpmaif_hw_info *hw_info, struct dpmaif_hw_params *init_param) +{ + int ret; + + ret = t7xx_dpmaif_hw_config(hw_info); + if (ret) { + dev_err(hw_info->dev, "DPMAIF HW config failed\n"); + return ret; + } + + ret = t7xx_dpmaif_init_intr(hw_info); + if (ret) { + dev_err(hw_info->dev, "DPMAIF HW interrupts init failed\n"); + return ret; + } + + t7xx_dpmaif_set_queue_property(hw_info, init_param); + t7xx_dpmaif_pcie_dpmaif_sign(hw_info); + t7xx_dpmaif_dl_performance(hw_info); + + ret = t7xx_dpmaif_config_dlq_hw(hw_info); + if (ret) { + dev_err(hw_info->dev, "DPMAIF HW dlq config failed\n"); + return ret; + } + + t7xx_dpmaif_config_ulq_hw(hw_info); + + ret = t7xx_dpmaif_hw_init_done(hw_info); + if (ret) + dev_err(hw_info->dev, "DPMAIF HW queue init failed\n"); + + return ret; +} + +bool t7xx_dpmaif_ul_clr_done(struct dpmaif_hw_info *hw_info, unsigned int qno) +{ + u32 intr_status; + + intr_status = ioread32(hw_info->pcie_base + DPMAIF_AP_L2TISAR0); + intr_status &= BIT(DP_UL_INT_DONE_OFFSET + qno); + if (intr_status) { + iowrite32(intr_status, hw_info->pcie_base + DPMAIF_AP_L2TISAR0); + return true; + } + + return false; +} diff --git a/drivers/net/wwan/t7xx/t7xx_dpmaif.h b/drivers/net/wwan/t7xx/t7xx_dpmaif.h new file mode 100644 index 000000000..ae292355a --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_dpmaif.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Chiranjeevi Rapolu + * Eliot Lee + * Sreehari Kancharla + */ + +#ifndef __T7XX_DPMAIF_H__ +#define __T7XX_DPMAIF_H__ + +#include +#include + +#define DPMAIF_DL_PIT_SEQ_VALUE 251 +#define DPMAIF_UL_DRB_SIZE_WORD 4 + +#define DPMAIF_MAX_CHECK_COUNT 1000000 +#define DPMAIF_CHECK_TIMEOUT_US 10000 +#define DPMAIF_CHECK_INIT_TIMEOUT_US 100000 +#define DPMAIF_CHECK_DELAY_US 10 + +#define DPMAIF_RXQ_NUM 2 +#define DPMAIF_TXQ_NUM 5 + +struct dpmaif_isr_en_mask { + unsigned int ap_ul_l2intr_en_msk; + unsigned int ap_dl_l2intr_en_msk; + unsigned int ap_udl_ip_busy_en_msk; + unsigned int ap_dl_l2intr_err_en_msk; +}; + +struct dpmaif_ul { + bool que_started; + unsigned char reserved[3]; + dma_addr_t drb_base; + unsigned int drb_size_cnt; +}; + +struct dpmaif_dl { + bool que_started; + unsigned char reserved[3]; + dma_addr_t pit_base; + unsigned int pit_size_cnt; + dma_addr_t bat_base; + unsigned int bat_size_cnt; + dma_addr_t frg_base; + unsigned int frg_size_cnt; + unsigned int pit_seq; +}; + +struct dpmaif_hw_info { + struct device *dev; + void __iomem *pcie_base; + struct dpmaif_dl dl_que[DPMAIF_RXQ_NUM]; + struct dpmaif_ul ul_que[DPMAIF_TXQ_NUM]; + struct dpmaif_isr_en_mask isr_en_mask; +}; + +/* DPMAIF HW Initialization parameter structure */ +struct dpmaif_hw_params { + /* UL part */ + dma_addr_t drb_base_addr[DPMAIF_TXQ_NUM]; + unsigned int drb_size_cnt[DPMAIF_TXQ_NUM]; + /* DL part */ + dma_addr_t pkt_bat_base_addr[DPMAIF_RXQ_NUM]; + unsigned int pkt_bat_size_cnt[DPMAIF_RXQ_NUM]; + dma_addr_t frg_bat_base_addr[DPMAIF_RXQ_NUM]; + unsigned int frg_bat_size_cnt[DPMAIF_RXQ_NUM]; + dma_addr_t pit_base_addr[DPMAIF_RXQ_NUM]; + unsigned int pit_size_cnt[DPMAIF_RXQ_NUM]; +}; + +enum dpmaif_hw_intr_type { + DPF_INTR_INVALID_MIN, + DPF_INTR_UL_DONE, + DPF_INTR_UL_DRB_EMPTY, + DPF_INTR_UL_MD_NOTREADY, + DPF_INTR_UL_MD_PWR_NOTREADY, + DPF_INTR_UL_LEN_ERR, + DPF_INTR_DL_DONE, + DPF_INTR_DL_SKB_LEN_ERR, + DPF_INTR_DL_BATCNT_LEN_ERR, + DPF_INTR_DL_PITCNT_LEN_ERR, + DPF_INTR_DL_PKT_EMPTY_SET, + DPF_INTR_DL_FRG_EMPTY_SET, + DPF_INTR_DL_MTU_ERR, + DPF_INTR_DL_FRGCNT_LEN_ERR, + DPF_INTR_DL_Q0_PITCNT_LEN_ERR, + DPF_INTR_DL_Q1_PITCNT_LEN_ERR, + DPF_INTR_DL_HPC_ENT_TYPE_ERR, + DPF_INTR_DL_Q0_DONE, + DPF_INTR_DL_Q1_DONE, + DPF_INTR_INVALID_MAX +}; + +#define DPF_RX_QNO0 0 +#define DPF_RX_QNO1 1 +#define DPF_RX_QNO_DFT DPF_RX_QNO0 + +struct dpmaif_hw_intr_st_para { + unsigned int intr_cnt; + enum dpmaif_hw_intr_type intr_types[DPF_INTR_INVALID_MAX - 1]; + unsigned int intr_queues[DPF_INTR_INVALID_MAX - 1]; +}; + +#define DPMAIF_HW_BAT_REMAIN 64 +#define DPMAIF_HW_BAT_PKTBUF (128 * 28) +#define DPMAIF_HW_FRG_PKTBUF 128 +#define DPMAIF_HW_BAT_RSVLEN 64 +#define DPMAIF_HW_PKT_BIDCNT 1 +#define DPMAIF_HW_MTU_SIZE (3 * 1024 + 8) +#define DPMAIF_HW_CHK_BAT_NUM 62 +#define DPMAIF_HW_CHK_FRG_NUM 3 +#define DPMAIF_HW_CHK_PIT_NUM (2 * DPMAIF_HW_CHK_BAT_NUM) + +#define DP_UL_INT_DONE_OFFSET 0 +#define DP_UL_INT_QDONE_MSK GENMASK(4, 0) +#define DP_UL_INT_EMPTY_MSK GENMASK(9, 5) +#define DP_UL_INT_MD_NOTREADY_MSK GENMASK(14, 10) +#define DP_UL_INT_MD_PWR_NOTREADY_MSK GENMASK(19, 15) +#define DP_UL_INT_ERR_MSK GENMASK(24, 20) + +#define DP_DL_INT_QDONE_MSK BIT(0) +#define DP_DL_INT_SKB_LEN_ERR BIT(1) +#define DP_DL_INT_BATCNT_LEN_ERR BIT(2) +#define DP_DL_INT_PITCNT_LEN_ERR BIT(3) +#define DP_DL_INT_PKT_EMPTY_MSK BIT(4) +#define DP_DL_INT_FRG_EMPTY_MSK BIT(5) +#define DP_DL_INT_MTU_ERR_MSK BIT(6) +#define DP_DL_INT_FRG_LEN_ERR_MSK BIT(7) +#define DP_DL_INT_Q0_PITCNT_LEN_ERR BIT(8) +#define DP_DL_INT_Q1_PITCNT_LEN_ERR BIT(9) +#define DP_DL_INT_HPC_ENT_TYPE_ERR BIT(10) +#define DP_DL_INT_Q0_DONE BIT(13) +#define DP_DL_INT_Q1_DONE BIT(14) + +#define DP_DL_Q0_STATUS_MASK (DP_DL_INT_Q0_PITCNT_LEN_ERR | DP_DL_INT_Q0_DONE) +#define DP_DL_Q1_STATUS_MASK (DP_DL_INT_Q1_PITCNT_LEN_ERR | DP_DL_INT_Q1_DONE) + +int t7xx_dpmaif_hw_init(struct dpmaif_hw_info *hw_info, struct dpmaif_hw_params *init_param); +int t7xx_dpmaif_hw_stop_all_txq(struct dpmaif_hw_info *hw_info); +int t7xx_dpmaif_hw_stop_all_rxq(struct dpmaif_hw_info *hw_info); +void t7xx_dpmaif_start_hw(struct dpmaif_hw_info *hw_info); +int t7xx_dpmaif_hw_get_intr_cnt(struct dpmaif_hw_info *hw_info, + struct dpmaif_hw_intr_st_para *para, int qno); +void t7xx_dpmaif_unmask_ulq_intr(struct dpmaif_hw_info *hw_info, unsigned int q_num); +void t7xx_dpmaif_ul_update_hw_drb_cnt(struct dpmaif_hw_info *hw_info, unsigned int q_num, + unsigned int drb_entry_cnt); +int t7xx_dpmaif_dl_snd_hw_bat_cnt(struct dpmaif_hw_info *hw_info, unsigned int bat_entry_cnt); +int t7xx_dpmaif_dl_snd_hw_frg_cnt(struct dpmaif_hw_info *hw_info, unsigned int frg_entry_cnt); +int t7xx_dpmaif_dlq_add_pit_remain_cnt(struct dpmaif_hw_info *hw_info, unsigned int dlq_pit_idx, + unsigned int pit_remain_cnt); +void t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info, + unsigned int qno); +void t7xx_dpmaif_dlq_unmask_rx_done(struct dpmaif_hw_info *hw_info, unsigned int qno); +bool t7xx_dpmaif_ul_clr_done(struct dpmaif_hw_info *hw_info, unsigned int qno); +void t7xx_dpmaif_ul_clr_all_intr(struct dpmaif_hw_info *hw_info); +void t7xx_dpmaif_dl_clr_all_intr(struct dpmaif_hw_info *hw_info); +void t7xx_dpmaif_clr_ip_busy_sts(struct dpmaif_hw_info *hw_info); +void t7xx_dpmaif_dl_unmask_batcnt_len_err_intr(struct dpmaif_hw_info *hw_info); +void t7xx_dpmaif_dl_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info); +unsigned int t7xx_dpmaif_ul_get_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num); +unsigned int t7xx_dpmaif_dl_get_bat_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num); +unsigned int t7xx_dpmaif_dl_get_bat_wr_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num); +unsigned int t7xx_dpmaif_dl_get_frg_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num); +unsigned int t7xx_dpmaif_dl_dlq_pit_get_wr_idx(struct dpmaif_hw_info *hw_info, + unsigned int dlq_pit_idx); + +#endif /* __T7XX_DPMAIF_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c new file mode 100644 index 000000000..6ff30cb8e --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c @@ -0,0 +1,1339 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * Sreehari Kancharla + * + * Contributors: + * Andy Shevchenko + * Chiranjeevi Rapolu + * Eliot Lee + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_cldma.h" +#include "t7xx_hif_cldma.h" +#include "t7xx_mhccif.h" +#include "t7xx_pci.h" +#include "t7xx_pcie_mac.h" +#include "t7xx_port_proxy.h" +#include "t7xx_reg.h" +#include "t7xx_state_monitor.h" + +#define MAX_TX_BUDGET 16 +#define MAX_RX_BUDGET 16 + +#define CHECK_Q_STOP_TIMEOUT_US 1000000 +#define CHECK_Q_STOP_STEP_US 10000 + +#define CLDMA_JUMBO_BUFF_SZ (63 * 1024 + sizeof(struct ccci_header)) + +static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl, + enum mtk_txrx tx_rx, unsigned int index) +{ + queue->dir = tx_rx; + queue->index = index; + queue->md_ctrl = md_ctrl; + queue->tr_ring = NULL; + queue->tr_done = NULL; + queue->tx_next = NULL; +} + +static void md_cd_queue_struct_init(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl, + enum mtk_txrx tx_rx, unsigned int index) +{ + md_cd_queue_struct_reset(queue, md_ctrl, tx_rx, index); + init_waitqueue_head(&queue->req_wq); + spin_lock_init(&queue->ring_lock); +} + +static void t7xx_cldma_gpd_set_data_ptr(struct cldma_gpd *gpd, dma_addr_t data_ptr) +{ + gpd->data_buff_bd_ptr_h = cpu_to_le32(upper_32_bits(data_ptr)); + gpd->data_buff_bd_ptr_l = cpu_to_le32(lower_32_bits(data_ptr)); +} + +static void t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd *gpd, dma_addr_t next_ptr) +{ + gpd->next_gpd_ptr_h = cpu_to_le32(upper_32_bits(next_ptr)); + gpd->next_gpd_ptr_l = cpu_to_le32(lower_32_bits(next_ptr)); +} + +static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req, + size_t size, gfp_t gfp_mask) +{ + req->skb = __dev_alloc_skb(size, gfp_mask); + if (!req->skb) + return -ENOMEM; + + req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data, size, DMA_FROM_DEVICE); + if (dma_mapping_error(md_ctrl->dev, req->mapped_buff)) { + dev_kfree_skb_any(req->skb); + req->skb = NULL; + req->mapped_buff = 0; + dev_err(md_ctrl->dev, "DMA mapping failed\n"); + return -ENOMEM; + } + + return 0; +} + +static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool *over_budget) +{ + struct cldma_ctrl *md_ctrl = queue->md_ctrl; + unsigned int hwo_polling_count = 0; + struct t7xx_cldma_hw *hw_info; + bool rx_not_done = true; + unsigned long flags; + int count = 0; + + hw_info = &md_ctrl->hw_info; + + do { + struct cldma_request *req; + struct cldma_gpd *gpd; + struct sk_buff *skb; + int ret; + + req = queue->tr_done; + if (!req) + return -ENODATA; + + gpd = req->gpd; + if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) { + dma_addr_t gpd_addr; + + if (!pci_device_is_present(to_pci_dev(md_ctrl->dev))) { + dev_err(md_ctrl->dev, "PCIe Link disconnected\n"); + return -ENODEV; + } + + gpd_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_DL_CURRENT_ADDRL_0 + + queue->index * sizeof(u64)); + if (req->gpd_addr == gpd_addr || hwo_polling_count++ >= 100) + return 0; + + udelay(1); + continue; + } + + hwo_polling_count = 0; + skb = req->skb; + + if (req->mapped_buff) { + dma_unmap_single(md_ctrl->dev, req->mapped_buff, + queue->tr_ring->pkt_size, DMA_FROM_DEVICE); + req->mapped_buff = 0; + } + + skb->len = 0; + skb_reset_tail_pointer(skb); + skb_put(skb, le16_to_cpu(gpd->data_buff_len)); + + ret = md_ctrl->recv_skb(queue, skb); + /* Break processing, will try again later */ + if (ret < 0) + return ret; + + req->skb = NULL; + t7xx_cldma_gpd_set_data_ptr(gpd, 0); + + spin_lock_irqsave(&queue->ring_lock, flags); + queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry); + spin_unlock_irqrestore(&queue->ring_lock, flags); + req = queue->rx_refill; + + ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, GFP_KERNEL); + if (ret) + return ret; + + gpd = req->gpd; + t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff); + gpd->data_buff_len = 0; + gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO; + + spin_lock_irqsave(&queue->ring_lock, flags); + queue->rx_refill = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry); + spin_unlock_irqrestore(&queue->ring_lock, flags); + + rx_not_done = ++count < budget || !need_resched(); + } while (rx_not_done); + + *over_budget = true; + return 0; +} + +static int t7xx_cldma_gpd_rx_collect(struct cldma_queue *queue, int budget) +{ + struct cldma_ctrl *md_ctrl = queue->md_ctrl; + struct t7xx_cldma_hw *hw_info; + unsigned int pending_rx_int; + bool over_budget = false; + unsigned long flags; + int ret; + + hw_info = &md_ctrl->hw_info; + + do { + ret = t7xx_cldma_gpd_rx_from_q(queue, budget, &over_budget); + if (ret == -ENODATA) + return 0; + else if (ret) + return ret; + + pending_rx_int = 0; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + if (md_ctrl->rxq_active & BIT(queue->index)) { + if (!t7xx_cldma_hw_queue_status(hw_info, queue->index, MTK_RX)) + t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_RX); + + pending_rx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index), + MTK_RX); + if (pending_rx_int) { + t7xx_cldma_hw_rx_done(hw_info, pending_rx_int); + + if (over_budget) { + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + return -EAGAIN; + } + } + } + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + } while (pending_rx_int); + + return 0; +} + +static void t7xx_cldma_rx_done(struct work_struct *work) +{ + struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work); + struct cldma_ctrl *md_ctrl = queue->md_ctrl; + int value; + + value = t7xx_cldma_gpd_rx_collect(queue, queue->budget); + if (value && md_ctrl->rxq_active & BIT(queue->index)) { + queue_work(queue->worker, &queue->cldma_work); + return; + } + + t7xx_cldma_clear_ip_busy(&md_ctrl->hw_info); + t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, queue->index, MTK_RX); + t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, queue->index, MTK_RX); + pm_runtime_mark_last_busy(md_ctrl->dev); + pm_runtime_put_autosuspend(md_ctrl->dev); +} + +static int t7xx_cldma_gpd_tx_collect(struct cldma_queue *queue) +{ + struct cldma_ctrl *md_ctrl = queue->md_ctrl; + unsigned int dma_len, count = 0; + struct cldma_request *req; + struct cldma_gpd *gpd; + unsigned long flags; + dma_addr_t dma_free; + struct sk_buff *skb; + + while (!kthread_should_stop()) { + spin_lock_irqsave(&queue->ring_lock, flags); + req = queue->tr_done; + if (!req) { + spin_unlock_irqrestore(&queue->ring_lock, flags); + break; + } + gpd = req->gpd; + if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) { + spin_unlock_irqrestore(&queue->ring_lock, flags); + break; + } + queue->budget++; + dma_free = req->mapped_buff; + dma_len = le16_to_cpu(gpd->data_buff_len); + skb = req->skb; + req->skb = NULL; + queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry); + spin_unlock_irqrestore(&queue->ring_lock, flags); + + count++; + dma_unmap_single(md_ctrl->dev, dma_free, dma_len, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + } + + if (count) + wake_up_nr(&queue->req_wq, count); + + return count; +} + +static void t7xx_cldma_txq_empty_hndl(struct cldma_queue *queue) +{ + struct cldma_ctrl *md_ctrl = queue->md_ctrl; + struct cldma_request *req; + dma_addr_t ul_curr_addr; + unsigned long flags; + bool pending_gpd; + + if (!(md_ctrl->txq_active & BIT(queue->index))) + return; + + spin_lock_irqsave(&queue->ring_lock, flags); + req = list_prev_entry_circular(queue->tx_next, &queue->tr_ring->gpd_ring, entry); + spin_unlock_irqrestore(&queue->ring_lock, flags); + + pending_gpd = (req->gpd->flags & GPD_FLAGS_HWO) && req->skb; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + if (pending_gpd) { + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + + /* Check current processing TGPD, 64-bit address is in a table by Q index */ + ul_curr_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 + + queue->index * sizeof(u64)); + if (req->gpd_addr != ul_curr_addr) { + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + dev_err(md_ctrl->dev, "CLDMA%d queue %d is not empty\n", + md_ctrl->hif_id, queue->index); + return; + } + + t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_TX); + } + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); +} + +static void t7xx_cldma_tx_done(struct work_struct *work) +{ + struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work); + struct cldma_ctrl *md_ctrl = queue->md_ctrl; + struct t7xx_cldma_hw *hw_info; + unsigned int l2_tx_int; + unsigned long flags; + + hw_info = &md_ctrl->hw_info; + t7xx_cldma_gpd_tx_collect(queue); + l2_tx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index) | EQ_STA_BIT(queue->index), + MTK_TX); + if (l2_tx_int & EQ_STA_BIT(queue->index)) { + t7xx_cldma_hw_tx_done(hw_info, EQ_STA_BIT(queue->index)); + t7xx_cldma_txq_empty_hndl(queue); + } + + if (l2_tx_int & BIT(queue->index)) { + t7xx_cldma_hw_tx_done(hw_info, BIT(queue->index)); + queue_work(queue->worker, &queue->cldma_work); + return; + } + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + if (md_ctrl->txq_active & BIT(queue->index)) { + t7xx_cldma_clear_ip_busy(hw_info); + t7xx_cldma_hw_irq_en_eq(hw_info, queue->index, MTK_TX); + t7xx_cldma_hw_irq_en_txrx(hw_info, queue->index, MTK_TX); + } + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + + pm_runtime_mark_last_busy(md_ctrl->dev); + pm_runtime_put_autosuspend(md_ctrl->dev); +} + +static void t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl, + struct cldma_ring *ring, enum dma_data_direction tx_rx) +{ + struct cldma_request *req_cur, *req_next; + + list_for_each_entry_safe(req_cur, req_next, &ring->gpd_ring, entry) { + if (req_cur->mapped_buff && req_cur->skb) { + dma_unmap_single(md_ctrl->dev, req_cur->mapped_buff, + ring->pkt_size, tx_rx); + req_cur->mapped_buff = 0; + } + + dev_kfree_skb_any(req_cur->skb); + + if (req_cur->gpd) + dma_pool_free(md_ctrl->gpd_dmapool, req_cur->gpd, req_cur->gpd_addr); + + list_del(&req_cur->entry); + kfree(req_cur); + } +} + +static struct cldma_request *t7xx_alloc_rx_request(struct cldma_ctrl *md_ctrl, size_t pkt_size) +{ + struct cldma_request *req; + int val; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return NULL; + + req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr); + if (!req->gpd) + goto err_free_req; + + val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, GFP_KERNEL); + if (val) + goto err_free_pool; + + return req; + +err_free_pool: + dma_pool_free(md_ctrl->gpd_dmapool, req->gpd, req->gpd_addr); + +err_free_req: + kfree(req); + + return NULL; +} + +static int t7xx_cldma_rx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring) +{ + struct cldma_request *req; + struct cldma_gpd *gpd; + int i; + + INIT_LIST_HEAD(&ring->gpd_ring); + ring->length = MAX_RX_BUDGET; + + for (i = 0; i < ring->length; i++) { + req = t7xx_alloc_rx_request(md_ctrl, ring->pkt_size); + if (!req) { + t7xx_cldma_ring_free(md_ctrl, ring, DMA_FROM_DEVICE); + return -ENOMEM; + } + + gpd = req->gpd; + t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff); + gpd->rx_data_allow_len = cpu_to_le16(ring->pkt_size); + gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO; + INIT_LIST_HEAD(&req->entry); + list_add_tail(&req->entry, &ring->gpd_ring); + } + + /* Link previous GPD to next GPD, circular */ + list_for_each_entry(req, &ring->gpd_ring, entry) { + t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr); + gpd = req->gpd; + } + + return 0; +} + +static struct cldma_request *t7xx_alloc_tx_request(struct cldma_ctrl *md_ctrl) +{ + struct cldma_request *req; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return NULL; + + req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr); + if (!req->gpd) { + kfree(req); + return NULL; + } + + return req; +} + +static int t7xx_cldma_tx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring) +{ + struct cldma_request *req; + struct cldma_gpd *gpd; + int i; + + INIT_LIST_HEAD(&ring->gpd_ring); + ring->length = MAX_TX_BUDGET; + + for (i = 0; i < ring->length; i++) { + req = t7xx_alloc_tx_request(md_ctrl); + if (!req) { + t7xx_cldma_ring_free(md_ctrl, ring, DMA_TO_DEVICE); + return -ENOMEM; + } + + gpd = req->gpd; + gpd->flags = GPD_FLAGS_IOC; + INIT_LIST_HEAD(&req->entry); + list_add_tail(&req->entry, &ring->gpd_ring); + } + + /* Link previous GPD to next GPD, circular */ + list_for_each_entry(req, &ring->gpd_ring, entry) { + t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr); + gpd = req->gpd; + } + + return 0; +} + +/** + * t7xx_cldma_q_reset() - Reset CLDMA request pointers to their initial values. + * @queue: Pointer to the queue structure. + * + * Called with ring_lock (unless called during initialization phase) + */ +static void t7xx_cldma_q_reset(struct cldma_queue *queue) +{ + struct cldma_request *req; + + req = list_first_entry(&queue->tr_ring->gpd_ring, struct cldma_request, entry); + queue->tr_done = req; + queue->budget = queue->tr_ring->length; + + if (queue->dir == MTK_TX) + queue->tx_next = req; + else + queue->rx_refill = req; +} + +static void t7xx_cldma_rxq_init(struct cldma_queue *queue) +{ + struct cldma_ctrl *md_ctrl = queue->md_ctrl; + + queue->dir = MTK_RX; + queue->tr_ring = &md_ctrl->rx_ring[queue->index]; + t7xx_cldma_q_reset(queue); +} + +static void t7xx_cldma_txq_init(struct cldma_queue *queue) +{ + struct cldma_ctrl *md_ctrl = queue->md_ctrl; + + queue->dir = MTK_TX; + queue->tr_ring = &md_ctrl->tx_ring[queue->index]; + t7xx_cldma_q_reset(queue); +} + +static void t7xx_cldma_enable_irq(struct cldma_ctrl *md_ctrl) +{ + t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id); +} + +static void t7xx_cldma_disable_irq(struct cldma_ctrl *md_ctrl) +{ + t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id); +} + +static void t7xx_cldma_irq_work_cb(struct cldma_ctrl *md_ctrl) +{ + unsigned long l2_tx_int_msk, l2_rx_int_msk, l2_tx_int, l2_rx_int, val; + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + int i; + + /* L2 raw interrupt status */ + l2_tx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0); + l2_rx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0); + l2_tx_int_msk = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TIMR0); + l2_rx_int_msk = ioread32(hw_info->ap_ao_base + REG_CLDMA_L2RIMR0); + l2_tx_int &= ~l2_tx_int_msk; + l2_rx_int &= ~l2_rx_int_msk; + + if (l2_tx_int) { + if (l2_tx_int & (TQ_ERR_INT_BITMASK | TQ_ACTIVE_START_ERR_INT_BITMASK)) { + /* Read and clear L3 TX interrupt status */ + val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0); + iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0); + val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1); + iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1); + } + + t7xx_cldma_hw_tx_done(hw_info, l2_tx_int); + if (l2_tx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) { + for_each_set_bit(i, &l2_tx_int, L2_INT_BIT_COUNT) { + if (i < CLDMA_TXQ_NUM) { + pm_runtime_get(md_ctrl->dev); + t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_TX); + t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_TX); + queue_work(md_ctrl->txq[i].worker, + &md_ctrl->txq[i].cldma_work); + } else { + t7xx_cldma_txq_empty_hndl(&md_ctrl->txq[i - CLDMA_TXQ_NUM]); + } + } + } + } + + if (l2_rx_int) { + if (l2_rx_int & (RQ_ERR_INT_BITMASK | RQ_ACTIVE_START_ERR_INT_BITMASK)) { + /* Read and clear L3 RX interrupt status */ + val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0); + iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0); + val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1); + iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1); + } + + t7xx_cldma_hw_rx_done(hw_info, l2_rx_int); + if (l2_rx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) { + l2_rx_int |= l2_rx_int >> CLDMA_RXQ_NUM; + for_each_set_bit(i, &l2_rx_int, CLDMA_RXQ_NUM) { + pm_runtime_get(md_ctrl->dev); + t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_RX); + t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_RX); + queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].cldma_work); + } + } + } +} + +static bool t7xx_cldma_qs_are_active(struct cldma_ctrl *md_ctrl) +{ + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + unsigned int tx_active; + unsigned int rx_active; + + if (!pci_device_is_present(to_pci_dev(md_ctrl->dev))) + return false; + + tx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_TX); + rx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_RX); + + return tx_active || rx_active; +} + +/** + * t7xx_cldma_stop() - Stop CLDMA. + * @md_ctrl: CLDMA context structure. + * + * Stop TX and RX queues. Disable L1 and L2 interrupts. + * Clear status registers. + * + * Return: + * * 0 - Success. + * * -ERROR - Error code from polling cldma_queues_active. + */ +int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl) +{ + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + bool active; + int i, ret; + + md_ctrl->rxq_active = 0; + t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX); + md_ctrl->txq_active = 0; + t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX); + md_ctrl->txq_started = 0; + t7xx_cldma_disable_irq(md_ctrl); + t7xx_cldma_hw_stop(hw_info, MTK_RX); + t7xx_cldma_hw_stop(hw_info, MTK_TX); + t7xx_cldma_hw_tx_done(hw_info, CLDMA_L2TISAR0_ALL_INT_MASK); + t7xx_cldma_hw_rx_done(hw_info, CLDMA_L2RISAR0_ALL_INT_MASK); + + if (md_ctrl->is_late_init) { + for (i = 0; i < CLDMA_TXQ_NUM; i++) + flush_work(&md_ctrl->txq[i].cldma_work); + + for (i = 0; i < CLDMA_RXQ_NUM; i++) + flush_work(&md_ctrl->rxq[i].cldma_work); + } + + ret = read_poll_timeout(t7xx_cldma_qs_are_active, active, !active, CHECK_Q_STOP_STEP_US, + CHECK_Q_STOP_TIMEOUT_US, true, md_ctrl); + if (ret) + dev_err(md_ctrl->dev, "Could not stop CLDMA%d queues", md_ctrl->hif_id); + + return ret; +} + +static void t7xx_cldma_late_release(struct cldma_ctrl *md_ctrl) +{ + int i; + + if (!md_ctrl->is_late_init) + return; + + for (i = 0; i < CLDMA_TXQ_NUM; i++) + t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE); + + for (i = 0; i < CLDMA_RXQ_NUM; i++) + t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[i], DMA_FROM_DEVICE); + + dma_pool_destroy(md_ctrl->gpd_dmapool); + md_ctrl->gpd_dmapool = NULL; + md_ctrl->is_late_init = false; +} + +void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + md_ctrl->txq_active = 0; + md_ctrl->rxq_active = 0; + t7xx_cldma_disable_irq(md_ctrl); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + + for (i = 0; i < CLDMA_TXQ_NUM; i++) { + cancel_work_sync(&md_ctrl->txq[i].cldma_work); + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + md_cd_queue_struct_reset(&md_ctrl->txq[i], md_ctrl, MTK_TX, i); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + } + + for (i = 0; i < CLDMA_RXQ_NUM; i++) { + cancel_work_sync(&md_ctrl->rxq[i].cldma_work); + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + md_cd_queue_struct_reset(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + } + + t7xx_cldma_late_release(md_ctrl); +} + +/** + * t7xx_cldma_start() - Start CLDMA. + * @md_ctrl: CLDMA context structure. + * + * Set TX/RX start address. + * Start all RX queues and enable L2 interrupt. + */ +void t7xx_cldma_start(struct cldma_ctrl *md_ctrl) +{ + unsigned long flags; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + if (md_ctrl->is_late_init) { + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + int i; + + t7xx_cldma_enable_irq(md_ctrl); + + for (i = 0; i < CLDMA_TXQ_NUM; i++) { + if (md_ctrl->txq[i].tr_done) + t7xx_cldma_hw_set_start_addr(hw_info, i, + md_ctrl->txq[i].tr_done->gpd_addr, + MTK_TX); + } + + for (i = 0; i < CLDMA_RXQ_NUM; i++) { + if (md_ctrl->rxq[i].tr_done) + t7xx_cldma_hw_set_start_addr(hw_info, i, + md_ctrl->rxq[i].tr_done->gpd_addr, + MTK_RX); + } + + /* Enable L2 interrupt */ + t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX); + t7xx_cldma_hw_start(hw_info); + md_ctrl->txq_started = 0; + md_ctrl->txq_active |= TXRX_STATUS_BITMASK; + md_ctrl->rxq_active |= TXRX_STATUS_BITMASK; + } + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); +} + +static void t7xx_cldma_clear_txq(struct cldma_ctrl *md_ctrl, int qnum) +{ + struct cldma_queue *txq = &md_ctrl->txq[qnum]; + struct cldma_request *req; + struct cldma_gpd *gpd; + unsigned long flags; + + spin_lock_irqsave(&txq->ring_lock, flags); + t7xx_cldma_q_reset(txq); + list_for_each_entry(req, &txq->tr_ring->gpd_ring, entry) { + gpd = req->gpd; + gpd->flags &= ~GPD_FLAGS_HWO; + t7xx_cldma_gpd_set_data_ptr(gpd, 0); + gpd->data_buff_len = 0; + dev_kfree_skb_any(req->skb); + req->skb = NULL; + } + spin_unlock_irqrestore(&txq->ring_lock, flags); +} + +static int t7xx_cldma_clear_rxq(struct cldma_ctrl *md_ctrl, int qnum) +{ + struct cldma_queue *rxq = &md_ctrl->rxq[qnum]; + struct cldma_request *req; + struct cldma_gpd *gpd; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&rxq->ring_lock, flags); + t7xx_cldma_q_reset(rxq); + list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) { + gpd = req->gpd; + gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO; + gpd->data_buff_len = 0; + + if (req->skb) { + req->skb->len = 0; + skb_reset_tail_pointer(req->skb); + } + } + + list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) { + if (req->skb) + continue; + + ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, GFP_ATOMIC); + if (ret) + break; + + t7xx_cldma_gpd_set_data_ptr(req->gpd, req->mapped_buff); + } + spin_unlock_irqrestore(&rxq->ring_lock, flags); + + return ret; +} + +void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx) +{ + int i; + + if (tx_rx == MTK_TX) { + for (i = 0; i < CLDMA_TXQ_NUM; i++) + t7xx_cldma_clear_txq(md_ctrl, i); + } else { + for (i = 0; i < CLDMA_RXQ_NUM; i++) + t7xx_cldma_clear_rxq(md_ctrl, i); + } +} + +void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx) +{ + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + unsigned long flags; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, tx_rx); + t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, tx_rx); + if (tx_rx == MTK_RX) + md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK; + else + md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK; + t7xx_cldma_hw_stop_all_qs(hw_info, tx_rx); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); +} + +static int t7xx_cldma_gpd_handle_tx_request(struct cldma_queue *queue, struct cldma_request *tx_req, + struct sk_buff *skb) +{ + struct cldma_ctrl *md_ctrl = queue->md_ctrl; + struct cldma_gpd *gpd = tx_req->gpd; + unsigned long flags; + + /* Update GPD */ + tx_req->mapped_buff = dma_map_single(md_ctrl->dev, skb->data, skb->len, DMA_TO_DEVICE); + + if (dma_mapping_error(md_ctrl->dev, tx_req->mapped_buff)) { + dev_err(md_ctrl->dev, "DMA mapping failed\n"); + return -ENOMEM; + } + + t7xx_cldma_gpd_set_data_ptr(gpd, tx_req->mapped_buff); + gpd->data_buff_len = cpu_to_le16(skb->len); + + /* This lock must cover TGPD setting, as even without a resume operation, + * CLDMA can send next HWO=1 if last TGPD just finished. + */ + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + if (md_ctrl->txq_active & BIT(queue->index)) + gpd->flags |= GPD_FLAGS_HWO; + + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + + tx_req->skb = skb; + return 0; +} + +/* Called with cldma_lock */ +static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno, + struct cldma_request *prev_req) +{ + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + + /* Check whether the device was powered off (CLDMA start address is not set) */ + if (!t7xx_cldma_tx_addr_is_set(hw_info, qno)) { + t7xx_cldma_hw_init(hw_info); + t7xx_cldma_hw_set_start_addr(hw_info, qno, prev_req->gpd_addr, MTK_TX); + md_ctrl->txq_started &= ~BIT(qno); + } + + if (!t7xx_cldma_hw_queue_status(hw_info, qno, MTK_TX)) { + if (md_ctrl->txq_started & BIT(qno)) + t7xx_cldma_hw_resume_queue(hw_info, qno, MTK_TX); + else + t7xx_cldma_hw_start_queue(hw_info, qno, MTK_TX); + + md_ctrl->txq_started |= BIT(qno); + } +} + +/** + * t7xx_cldma_set_recv_skb() - Set the callback to handle RX packets. + * @md_ctrl: CLDMA context structure. + * @recv_skb: Receiving skb callback. + */ +void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl, + int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb)) +{ + md_ctrl->recv_skb = recv_skb; +} + +/** + * t7xx_cldma_send_skb() - Send control data to modem. + * @md_ctrl: CLDMA context structure. + * @qno: Queue number. + * @skb: Socket buffer. + * + * Return: + * * 0 - Success. + * * -ENOMEM - Allocation failure. + * * -EINVAL - Invalid queue request. + * * -EIO - Queue is not active. + * * -ETIMEDOUT - Timeout waiting for the device to wake up. + */ +int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb) +{ + struct cldma_request *tx_req; + struct cldma_queue *queue; + unsigned long flags; + int ret; + + if (qno >= CLDMA_TXQ_NUM) + return -EINVAL; + + ret = pm_runtime_resume_and_get(md_ctrl->dev); + if (ret < 0 && ret != -EACCES) + return ret; + + t7xx_pci_disable_sleep(md_ctrl->t7xx_dev); + queue = &md_ctrl->txq[qno]; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + if (!(md_ctrl->txq_active & BIT(qno))) { + ret = -EIO; + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + goto allow_sleep; + } + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + + do { + spin_lock_irqsave(&queue->ring_lock, flags); + tx_req = queue->tx_next; + if (queue->budget > 0 && !tx_req->skb) { + struct list_head *gpd_ring = &queue->tr_ring->gpd_ring; + + queue->budget--; + t7xx_cldma_gpd_handle_tx_request(queue, tx_req, skb); + queue->tx_next = list_next_entry_circular(tx_req, gpd_ring, entry); + spin_unlock_irqrestore(&queue->ring_lock, flags); + + if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) { + ret = -ETIMEDOUT; + break; + } + + /* Protect the access to the modem for queues operations (resume/start) + * which access shared locations by all the queues. + * cldma_lock is independent of ring_lock which is per queue. + */ + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + t7xx_cldma_hw_start_send(md_ctrl, qno, tx_req); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + + break; + } + spin_unlock_irqrestore(&queue->ring_lock, flags); + + if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) { + ret = -ETIMEDOUT; + break; + } + + if (!t7xx_cldma_hw_queue_status(&md_ctrl->hw_info, qno, MTK_TX)) { + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + t7xx_cldma_hw_resume_queue(&md_ctrl->hw_info, qno, MTK_TX); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + } + + ret = wait_event_interruptible_exclusive(queue->req_wq, queue->budget > 0); + } while (!ret); + +allow_sleep: + t7xx_pci_enable_sleep(md_ctrl->t7xx_dev); + pm_runtime_mark_last_busy(md_ctrl->dev); + pm_runtime_put_autosuspend(md_ctrl->dev); + return ret; +} + +static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl) +{ + char dma_pool_name[32]; + int i, j, ret; + + if (md_ctrl->is_late_init) { + dev_err(md_ctrl->dev, "CLDMA late init was already done\n"); + return -EALREADY; + } + + snprintf(dma_pool_name, sizeof(dma_pool_name), "cldma_req_hif%d", md_ctrl->hif_id); + + md_ctrl->gpd_dmapool = dma_pool_create(dma_pool_name, md_ctrl->dev, + sizeof(struct cldma_gpd), GPD_DMAPOOL_ALIGN, 0); + if (!md_ctrl->gpd_dmapool) { + dev_err(md_ctrl->dev, "DMA pool alloc fail\n"); + return -ENOMEM; + } + + for (i = 0; i < CLDMA_TXQ_NUM; i++) { + ret = t7xx_cldma_tx_ring_init(md_ctrl, &md_ctrl->tx_ring[i]); + if (ret) { + dev_err(md_ctrl->dev, "control TX ring init fail\n"); + goto err_free_tx_ring; + } + } + + for (j = 0; j < CLDMA_RXQ_NUM; j++) { + md_ctrl->rx_ring[j].pkt_size = CLDMA_MTU; + + if (j == CLDMA_RXQ_NUM - 1) + md_ctrl->rx_ring[j].pkt_size = CLDMA_JUMBO_BUFF_SZ; + + ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]); + if (ret) { + dev_err(md_ctrl->dev, "Control RX ring init fail\n"); + goto err_free_rx_ring; + } + } + + for (i = 0; i < CLDMA_TXQ_NUM; i++) + t7xx_cldma_txq_init(&md_ctrl->txq[i]); + + for (j = 0; j < CLDMA_RXQ_NUM; j++) + t7xx_cldma_rxq_init(&md_ctrl->rxq[j]); + + md_ctrl->is_late_init = true; + return 0; + +err_free_rx_ring: + while (j--) + t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[j], DMA_FROM_DEVICE); + +err_free_tx_ring: + while (i--) + t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE); + + return ret; +} + +static void __iomem *t7xx_pcie_addr_transfer(void __iomem *addr, u32 addr_trs1, u32 phy_addr) +{ + return addr + phy_addr - addr_trs1; +} + +static void t7xx_hw_info_init(struct cldma_ctrl *md_ctrl) +{ + struct t7xx_addr_base *pbase = &md_ctrl->t7xx_dev->base_addr; + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + u32 phy_ao_base, phy_pd_base; + + if (md_ctrl->hif_id != CLDMA_ID_MD) + return; + + phy_ao_base = CLDMA1_AO_BASE; + phy_pd_base = CLDMA1_PD_BASE; + hw_info->phy_interrupt_id = CLDMA1_INT; + hw_info->hw_mode = MODE_BIT_64; + hw_info->ap_ao_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base, + pbase->pcie_dev_reg_trsl_addr, phy_ao_base); + hw_info->ap_pdn_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base, + pbase->pcie_dev_reg_trsl_addr, phy_pd_base); +} + +static int t7xx_cldma_default_recv_skb(struct cldma_queue *queue, struct sk_buff *skb) +{ + dev_kfree_skb_any(skb); + return 0; +} + +int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev) +{ + struct device *dev = &t7xx_dev->pdev->dev; + struct cldma_ctrl *md_ctrl; + + md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL); + if (!md_ctrl) + return -ENOMEM; + + md_ctrl->t7xx_dev = t7xx_dev; + md_ctrl->dev = dev; + md_ctrl->hif_id = hif_id; + md_ctrl->recv_skb = t7xx_cldma_default_recv_skb; + t7xx_hw_info_init(md_ctrl); + t7xx_dev->md->md_ctrl[hif_id] = md_ctrl; + return 0; +} + +static void t7xx_cldma_resume_early(struct t7xx_pci_dev *t7xx_dev, void *entity_param) +{ + struct cldma_ctrl *md_ctrl = entity_param; + struct t7xx_cldma_hw *hw_info; + unsigned long flags; + int qno_t; + + hw_info = &md_ctrl->hw_info; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + t7xx_cldma_hw_restore(hw_info); + for (qno_t = 0; qno_t < CLDMA_TXQ_NUM; qno_t++) { + t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->txq[qno_t].tx_next->gpd_addr, + MTK_TX); + t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->rxq[qno_t].tr_done->gpd_addr, + MTK_RX); + } + t7xx_cldma_enable_irq(md_ctrl); + t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX); + md_ctrl->rxq_active |= TXRX_STATUS_BITMASK; + t7xx_cldma_hw_irq_en_eq(hw_info, CLDMA_ALL_Q, MTK_RX); + t7xx_cldma_hw_irq_en_txrx(hw_info, CLDMA_ALL_Q, MTK_RX); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); +} + +static int t7xx_cldma_resume(struct t7xx_pci_dev *t7xx_dev, void *entity_param) +{ + struct cldma_ctrl *md_ctrl = entity_param; + unsigned long flags; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + md_ctrl->txq_active |= TXRX_STATUS_BITMASK; + t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX); + t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + + if (md_ctrl->hif_id == CLDMA_ID_MD) + t7xx_mhccif_mask_clr(t7xx_dev, D2H_SW_INT_MASK); + + return 0; +} + +static void t7xx_cldma_suspend_late(struct t7xx_pci_dev *t7xx_dev, void *entity_param) +{ + struct cldma_ctrl *md_ctrl = entity_param; + struct t7xx_cldma_hw *hw_info; + unsigned long flags; + + hw_info = &md_ctrl->hw_info; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_RX); + t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_RX); + md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK; + t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX); + t7xx_cldma_clear_ip_busy(hw_info); + t7xx_cldma_disable_irq(md_ctrl); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); +} + +static int t7xx_cldma_suspend(struct t7xx_pci_dev *t7xx_dev, void *entity_param) +{ + struct cldma_ctrl *md_ctrl = entity_param; + struct t7xx_cldma_hw *hw_info; + unsigned long flags; + + if (md_ctrl->hif_id == CLDMA_ID_MD) + t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK); + + hw_info = &md_ctrl->hw_info; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_TX); + t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_TX); + md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK; + t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX); + md_ctrl->txq_started = 0; + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + + return 0; +} + +static int t7xx_cldma_pm_init(struct cldma_ctrl *md_ctrl) +{ + md_ctrl->pm_entity = kzalloc(sizeof(*md_ctrl->pm_entity), GFP_KERNEL); + if (!md_ctrl->pm_entity) + return -ENOMEM; + + md_ctrl->pm_entity->entity_param = md_ctrl; + + if (md_ctrl->hif_id == CLDMA_ID_MD) + md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL1; + else + md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL2; + + md_ctrl->pm_entity->suspend = t7xx_cldma_suspend; + md_ctrl->pm_entity->suspend_late = t7xx_cldma_suspend_late; + md_ctrl->pm_entity->resume = t7xx_cldma_resume; + md_ctrl->pm_entity->resume_early = t7xx_cldma_resume_early; + + return t7xx_pci_pm_entity_register(md_ctrl->t7xx_dev, md_ctrl->pm_entity); +} + +static int t7xx_cldma_pm_uninit(struct cldma_ctrl *md_ctrl) +{ + if (!md_ctrl->pm_entity) + return -EINVAL; + + t7xx_pci_pm_entity_unregister(md_ctrl->t7xx_dev, md_ctrl->pm_entity); + kfree(md_ctrl->pm_entity); + md_ctrl->pm_entity = NULL; + return 0; +} + +void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl) +{ + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + unsigned long flags; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + t7xx_cldma_hw_stop(hw_info, MTK_TX); + t7xx_cldma_hw_stop(hw_info, MTK_RX); + t7xx_cldma_hw_rx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK); + t7xx_cldma_hw_tx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK); + t7xx_cldma_hw_init(hw_info); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); +} + +static irqreturn_t t7xx_cldma_isr_handler(int irq, void *data) +{ + struct cldma_ctrl *md_ctrl = data; + u32 interrupt; + + interrupt = md_ctrl->hw_info.phy_interrupt_id; + t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, interrupt); + t7xx_cldma_irq_work_cb(md_ctrl); + t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, interrupt); + t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, interrupt); + return IRQ_HANDLED; +} + +static void t7xx_cldma_destroy_wqs(struct cldma_ctrl *md_ctrl) +{ + int i; + + for (i = 0; i < CLDMA_TXQ_NUM; i++) { + if (md_ctrl->txq[i].worker) { + destroy_workqueue(md_ctrl->txq[i].worker); + md_ctrl->txq[i].worker = NULL; + } + } + + for (i = 0; i < CLDMA_RXQ_NUM; i++) { + if (md_ctrl->rxq[i].worker) { + destroy_workqueue(md_ctrl->rxq[i].worker); + md_ctrl->rxq[i].worker = NULL; + } + } +} + +/** + * t7xx_cldma_init() - Initialize CLDMA. + * @md_ctrl: CLDMA context structure. + * + * Allocate and initialize device power management entity. + * Initialize HIF TX/RX queue structure. + * Register CLDMA callback ISR with PCIe driver. + * + * Return: + * * 0 - Success. + * * -ERROR - Error code from failure sub-initializations. + */ +int t7xx_cldma_init(struct cldma_ctrl *md_ctrl) +{ + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + int ret, i; + + md_ctrl->txq_active = 0; + md_ctrl->rxq_active = 0; + md_ctrl->is_late_init = false; + + ret = t7xx_cldma_pm_init(md_ctrl); + if (ret) + return ret; + + spin_lock_init(&md_ctrl->cldma_lock); + + for (i = 0; i < CLDMA_TXQ_NUM; i++) { + md_cd_queue_struct_init(&md_ctrl->txq[i], md_ctrl, MTK_TX, i); + md_ctrl->txq[i].worker = + alloc_workqueue("md_hif%d_tx%d_worker", + WQ_UNBOUND | WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI), + 1, md_ctrl->hif_id, i); + if (!md_ctrl->txq[i].worker) + goto err_workqueue; + + INIT_WORK(&md_ctrl->txq[i].cldma_work, t7xx_cldma_tx_done); + } + + for (i = 0; i < CLDMA_RXQ_NUM; i++) { + md_cd_queue_struct_init(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i); + INIT_WORK(&md_ctrl->rxq[i].cldma_work, t7xx_cldma_rx_done); + + md_ctrl->rxq[i].worker = alloc_workqueue("md_hif%d_rx%d_worker", + WQ_UNBOUND | WQ_MEM_RECLAIM, + 1, md_ctrl->hif_id, i); + if (!md_ctrl->rxq[i].worker) + goto err_workqueue; + } + + t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id); + md_ctrl->t7xx_dev->intr_handler[hw_info->phy_interrupt_id] = t7xx_cldma_isr_handler; + md_ctrl->t7xx_dev->intr_thread[hw_info->phy_interrupt_id] = NULL; + md_ctrl->t7xx_dev->callback_param[hw_info->phy_interrupt_id] = md_ctrl; + t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id); + return 0; + +err_workqueue: + t7xx_cldma_destroy_wqs(md_ctrl); + t7xx_cldma_pm_uninit(md_ctrl); + return -ENOMEM; +} + +void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl) +{ + t7xx_cldma_late_release(md_ctrl); + t7xx_cldma_late_init(md_ctrl); +} + +void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl) +{ + t7xx_cldma_stop(md_ctrl); + t7xx_cldma_late_release(md_ctrl); + t7xx_cldma_destroy_wqs(md_ctrl); + t7xx_cldma_pm_uninit(md_ctrl); +} diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h new file mode 100644 index 000000000..47a35e552 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * Sreehari Kancharla + * + * Contributors: + * Amir Hanania + * Chiranjeevi Rapolu + * Eliot Lee + */ + +#ifndef __T7XX_HIF_CLDMA_H__ +#define __T7XX_HIF_CLDMA_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_cldma.h" +#include "t7xx_pci.h" + +/** + * enum cldma_id - Identifiers for CLDMA HW units. + * @CLDMA_ID_MD: Modem control channel. + * @CLDMA_ID_AP: Application Processor control channel (not used at the moment). + * @CLDMA_NUM: Number of CLDMA HW units available. + */ +enum cldma_id { + CLDMA_ID_MD, + CLDMA_ID_AP, + CLDMA_NUM +}; + +struct cldma_gpd { + u8 flags; + u8 not_used1; + __le16 rx_data_allow_len; + __le32 next_gpd_ptr_h; + __le32 next_gpd_ptr_l; + __le32 data_buff_bd_ptr_h; + __le32 data_buff_bd_ptr_l; + __le16 data_buff_len; + __le16 not_used2; +}; + +struct cldma_request { + struct cldma_gpd *gpd; /* Virtual address for CPU */ + dma_addr_t gpd_addr; /* Physical address for DMA */ + struct sk_buff *skb; + dma_addr_t mapped_buff; + struct list_head entry; +}; + +struct cldma_ring { + struct list_head gpd_ring; /* Ring of struct cldma_request */ + unsigned int length; /* Number of struct cldma_request */ + int pkt_size; +}; + +struct cldma_queue { + struct cldma_ctrl *md_ctrl; + enum mtk_txrx dir; + unsigned int index; + struct cldma_ring *tr_ring; + struct cldma_request *tr_done; + struct cldma_request *rx_refill; + struct cldma_request *tx_next; + int budget; /* Same as ring buffer size by default */ + spinlock_t ring_lock; + wait_queue_head_t req_wq; /* Only for TX */ + struct workqueue_struct *worker; + struct work_struct cldma_work; +}; + +struct cldma_ctrl { + enum cldma_id hif_id; + struct device *dev; + struct t7xx_pci_dev *t7xx_dev; + struct cldma_queue txq[CLDMA_TXQ_NUM]; + struct cldma_queue rxq[CLDMA_RXQ_NUM]; + unsigned short txq_active; + unsigned short rxq_active; + unsigned short txq_started; + spinlock_t cldma_lock; /* Protects CLDMA structure */ + /* Assumes T/R GPD/BD/SPD have the same size */ + struct dma_pool *gpd_dmapool; + struct cldma_ring tx_ring[CLDMA_TXQ_NUM]; + struct cldma_ring rx_ring[CLDMA_RXQ_NUM]; + struct md_pm_entity *pm_entity; + struct t7xx_cldma_hw hw_info; + bool is_late_init; + int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb); +}; + +#define GPD_FLAGS_HWO BIT(0) +#define GPD_FLAGS_IOC BIT(7) +#define GPD_DMAPOOL_ALIGN 16 + +#define CLDMA_MTU 3584 /* 3.5kB */ + +int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev); +void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl); +int t7xx_cldma_init(struct cldma_ctrl *md_ctrl); +void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl); +void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl); +void t7xx_cldma_start(struct cldma_ctrl *md_ctrl); +int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl); +void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl); +void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl, + int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb)); +int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb); +void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx); +void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx); + +#endif /* __T7XX_HIF_CLDMA_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c new file mode 100644 index 000000000..7eff3531b --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c @@ -0,0 +1,574 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Chiranjeevi Rapolu + * Eliot Lee + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_dpmaif.h" +#include "t7xx_hif_dpmaif.h" +#include "t7xx_hif_dpmaif_rx.h" +#include "t7xx_hif_dpmaif_tx.h" +#include "t7xx_pci.h" +#include "t7xx_pcie_mac.h" +#include "t7xx_state_monitor.h" + +unsigned int t7xx_ring_buf_get_next_wr_idx(unsigned int buf_len, unsigned int buf_idx) +{ + buf_idx++; + + return buf_idx < buf_len ? buf_idx : 0; +} + +unsigned int t7xx_ring_buf_rd_wr_count(unsigned int total_cnt, unsigned int rd_idx, + unsigned int wr_idx, enum dpmaif_rdwr rd_wr) +{ + int pkt_cnt; + + if (rd_wr == DPMAIF_READ) + pkt_cnt = wr_idx - rd_idx; + else + pkt_cnt = rd_idx - wr_idx - 1; + + if (pkt_cnt < 0) + pkt_cnt += total_cnt; + + return (unsigned int)pkt_cnt; +} + +static void t7xx_dpmaif_enable_irq(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct dpmaif_isr_para *isr_para; + int i; + + for (i = 0; i < ARRAY_SIZE(dpmaif_ctrl->isr_para); i++) { + isr_para = &dpmaif_ctrl->isr_para[i]; + t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int); + } +} + +static void t7xx_dpmaif_disable_irq(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct dpmaif_isr_para *isr_para; + int i; + + for (i = 0; i < ARRAY_SIZE(dpmaif_ctrl->isr_para); i++) { + isr_para = &dpmaif_ctrl->isr_para[i]; + t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int); + } +} + +static void t7xx_dpmaif_irq_cb(struct dpmaif_isr_para *isr_para) +{ + struct dpmaif_ctrl *dpmaif_ctrl = isr_para->dpmaif_ctrl; + struct dpmaif_hw_intr_st_para intr_status; + struct device *dev = dpmaif_ctrl->dev; + struct dpmaif_hw_info *hw_info; + int i; + + memset(&intr_status, 0, sizeof(intr_status)); + hw_info = &dpmaif_ctrl->hw_info; + + if (t7xx_dpmaif_hw_get_intr_cnt(hw_info, &intr_status, isr_para->dlq_id) < 0) { + dev_err(dev, "Failed to get HW interrupt count\n"); + return; + } + + t7xx_pcie_mac_clear_int_status(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int); + + for (i = 0; i < intr_status.intr_cnt; i++) { + switch (intr_status.intr_types[i]) { + case DPF_INTR_UL_DONE: + t7xx_dpmaif_irq_tx_done(dpmaif_ctrl, intr_status.intr_queues[i]); + break; + + case DPF_INTR_UL_DRB_EMPTY: + case DPF_INTR_UL_MD_NOTREADY: + case DPF_INTR_UL_MD_PWR_NOTREADY: + /* No need to log an error for these */ + break; + + case DPF_INTR_DL_BATCNT_LEN_ERR: + dev_err_ratelimited(dev, "DL interrupt: packet BAT count length error\n"); + t7xx_dpmaif_dl_unmask_batcnt_len_err_intr(hw_info); + break; + + case DPF_INTR_DL_PITCNT_LEN_ERR: + dev_err_ratelimited(dev, "DL interrupt: PIT count length error\n"); + t7xx_dpmaif_dl_unmask_pitcnt_len_err_intr(hw_info); + break; + + case DPF_INTR_DL_Q0_PITCNT_LEN_ERR: + dev_err_ratelimited(dev, "DL interrupt: DLQ0 PIT count length error\n"); + t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(hw_info, DPF_RX_QNO_DFT); + break; + + case DPF_INTR_DL_Q1_PITCNT_LEN_ERR: + dev_err_ratelimited(dev, "DL interrupt: DLQ1 PIT count length error\n"); + t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(hw_info, DPF_RX_QNO1); + break; + + case DPF_INTR_DL_DONE: + case DPF_INTR_DL_Q0_DONE: + case DPF_INTR_DL_Q1_DONE: + t7xx_dpmaif_irq_rx_done(dpmaif_ctrl, intr_status.intr_queues[i]); + break; + + default: + dev_err_ratelimited(dev, "DL interrupt error: unknown type : %d\n", + intr_status.intr_types[i]); + } + } +} + +static irqreturn_t t7xx_dpmaif_isr_handler(int irq, void *data) +{ + struct dpmaif_isr_para *isr_para = data; + struct dpmaif_ctrl *dpmaif_ctrl; + + dpmaif_ctrl = isr_para->dpmaif_ctrl; + if (dpmaif_ctrl->state != DPMAIF_STATE_PWRON) { + dev_err(dpmaif_ctrl->dev, "Interrupt received before initializing DPMAIF\n"); + return IRQ_HANDLED; + } + + t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int); + t7xx_dpmaif_irq_cb(isr_para); + t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int); + return IRQ_HANDLED; +} + +static void t7xx_dpmaif_isr_parameter_init(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct dpmaif_isr_para *isr_para; + unsigned char i; + + dpmaif_ctrl->rxq_int_mapping[DPF_RX_QNO0] = DPMAIF_INT; + dpmaif_ctrl->rxq_int_mapping[DPF_RX_QNO1] = DPMAIF2_INT; + + for (i = 0; i < DPMAIF_RXQ_NUM; i++) { + isr_para = &dpmaif_ctrl->isr_para[i]; + isr_para->dpmaif_ctrl = dpmaif_ctrl; + isr_para->dlq_id = i; + isr_para->pcie_int = dpmaif_ctrl->rxq_int_mapping[i]; + } +} + +static void t7xx_dpmaif_register_pcie_irq(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct t7xx_pci_dev *t7xx_dev = dpmaif_ctrl->t7xx_dev; + struct dpmaif_isr_para *isr_para; + enum t7xx_int int_type; + int i; + + t7xx_dpmaif_isr_parameter_init(dpmaif_ctrl); + + for (i = 0; i < DPMAIF_RXQ_NUM; i++) { + isr_para = &dpmaif_ctrl->isr_para[i]; + int_type = isr_para->pcie_int; + t7xx_pcie_mac_clear_int(t7xx_dev, int_type); + + t7xx_dev->intr_handler[int_type] = t7xx_dpmaif_isr_handler; + t7xx_dev->intr_thread[int_type] = NULL; + t7xx_dev->callback_param[int_type] = isr_para; + + t7xx_pcie_mac_clear_int_status(t7xx_dev, int_type); + t7xx_pcie_mac_set_int(t7xx_dev, int_type); + } +} + +static int t7xx_dpmaif_rxtx_sw_allocs(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct dpmaif_rx_queue *rx_q; + struct dpmaif_tx_queue *tx_q; + int ret, rx_idx, tx_idx, i; + + ret = t7xx_dpmaif_bat_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_req, BAT_TYPE_NORMAL); + if (ret) { + dev_err(dpmaif_ctrl->dev, "Failed to allocate normal BAT table: %d\n", ret); + return ret; + } + + ret = t7xx_dpmaif_bat_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_frag, BAT_TYPE_FRAG); + if (ret) { + dev_err(dpmaif_ctrl->dev, "Failed to allocate frag BAT table: %d\n", ret); + goto err_free_normal_bat; + } + + for (rx_idx = 0; rx_idx < DPMAIF_RXQ_NUM; rx_idx++) { + rx_q = &dpmaif_ctrl->rxq[rx_idx]; + rx_q->index = rx_idx; + rx_q->dpmaif_ctrl = dpmaif_ctrl; + ret = t7xx_dpmaif_rxq_init(rx_q); + if (ret) + goto err_free_rxq; + } + + for (tx_idx = 0; tx_idx < DPMAIF_TXQ_NUM; tx_idx++) { + tx_q = &dpmaif_ctrl->txq[tx_idx]; + tx_q->index = tx_idx; + tx_q->dpmaif_ctrl = dpmaif_ctrl; + ret = t7xx_dpmaif_txq_init(tx_q); + if (ret) + goto err_free_txq; + } + + ret = t7xx_dpmaif_tx_thread_init(dpmaif_ctrl); + if (ret) { + dev_err(dpmaif_ctrl->dev, "Failed to start TX thread\n"); + goto err_free_txq; + } + + ret = t7xx_dpmaif_bat_rel_wq_alloc(dpmaif_ctrl); + if (ret) + goto err_thread_rel; + + return 0; + +err_thread_rel: + t7xx_dpmaif_tx_thread_rel(dpmaif_ctrl); + +err_free_txq: + for (i = 0; i < tx_idx; i++) { + tx_q = &dpmaif_ctrl->txq[i]; + t7xx_dpmaif_txq_free(tx_q); + } + +err_free_rxq: + for (i = 0; i < rx_idx; i++) { + rx_q = &dpmaif_ctrl->rxq[i]; + t7xx_dpmaif_rxq_free(rx_q); + } + + t7xx_dpmaif_bat_free(dpmaif_ctrl, &dpmaif_ctrl->bat_frag); + +err_free_normal_bat: + t7xx_dpmaif_bat_free(dpmaif_ctrl, &dpmaif_ctrl->bat_req); + + return ret; +} + +static void t7xx_dpmaif_sw_release(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct dpmaif_rx_queue *rx_q; + struct dpmaif_tx_queue *tx_q; + int i; + + t7xx_dpmaif_tx_thread_rel(dpmaif_ctrl); + t7xx_dpmaif_bat_wq_rel(dpmaif_ctrl); + + for (i = 0; i < DPMAIF_TXQ_NUM; i++) { + tx_q = &dpmaif_ctrl->txq[i]; + t7xx_dpmaif_txq_free(tx_q); + } + + for (i = 0; i < DPMAIF_RXQ_NUM; i++) { + rx_q = &dpmaif_ctrl->rxq[i]; + t7xx_dpmaif_rxq_free(rx_q); + } +} + +static int t7xx_dpmaif_start(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct dpmaif_hw_info *hw_info = &dpmaif_ctrl->hw_info; + struct dpmaif_hw_params hw_init_para; + struct dpmaif_rx_queue *rxq; + struct dpmaif_tx_queue *txq; + unsigned int buf_cnt; + int i, ret = 0; + + if (dpmaif_ctrl->state == DPMAIF_STATE_PWRON) + return -EFAULT; + + memset(&hw_init_para, 0, sizeof(hw_init_para)); + + for (i = 0; i < DPMAIF_RXQ_NUM; i++) { + rxq = &dpmaif_ctrl->rxq[i]; + rxq->que_started = true; + rxq->index = i; + rxq->budget = rxq->bat_req->bat_size_cnt - 1; + + hw_init_para.pkt_bat_base_addr[i] = rxq->bat_req->bat_bus_addr; + hw_init_para.pkt_bat_size_cnt[i] = rxq->bat_req->bat_size_cnt; + hw_init_para.pit_base_addr[i] = rxq->pit_bus_addr; + hw_init_para.pit_size_cnt[i] = rxq->pit_size_cnt; + hw_init_para.frg_bat_base_addr[i] = rxq->bat_frag->bat_bus_addr; + hw_init_para.frg_bat_size_cnt[i] = rxq->bat_frag->bat_size_cnt; + } + + bitmap_zero(dpmaif_ctrl->bat_req.bat_bitmap, dpmaif_ctrl->bat_req.bat_size_cnt); + buf_cnt = dpmaif_ctrl->bat_req.bat_size_cnt - 1; + ret = t7xx_dpmaif_rx_buf_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_req, 0, buf_cnt, true); + if (ret) { + dev_err(dpmaif_ctrl->dev, "Failed to allocate RX buffer: %d\n", ret); + return ret; + } + + buf_cnt = dpmaif_ctrl->bat_frag.bat_size_cnt - 1; + ret = t7xx_dpmaif_rx_frag_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_frag, buf_cnt, true); + if (ret) { + dev_err(dpmaif_ctrl->dev, "Failed to allocate frag RX buffer: %d\n", ret); + goto err_free_normal_bat; + } + + for (i = 0; i < DPMAIF_TXQ_NUM; i++) { + txq = &dpmaif_ctrl->txq[i]; + txq->que_started = true; + + hw_init_para.drb_base_addr[i] = txq->drb_bus_addr; + hw_init_para.drb_size_cnt[i] = txq->drb_size_cnt; + } + + ret = t7xx_dpmaif_hw_init(hw_info, &hw_init_para); + if (ret) { + dev_err(dpmaif_ctrl->dev, "Failed to initialize DPMAIF HW: %d\n", ret); + goto err_free_frag_bat; + } + + ret = t7xx_dpmaif_dl_snd_hw_bat_cnt(hw_info, rxq->bat_req->bat_size_cnt - 1); + if (ret) + goto err_free_frag_bat; + + ret = t7xx_dpmaif_dl_snd_hw_frg_cnt(hw_info, rxq->bat_frag->bat_size_cnt - 1); + if (ret) + goto err_free_frag_bat; + + t7xx_dpmaif_ul_clr_all_intr(hw_info); + t7xx_dpmaif_dl_clr_all_intr(hw_info); + dpmaif_ctrl->state = DPMAIF_STATE_PWRON; + t7xx_dpmaif_enable_irq(dpmaif_ctrl); + wake_up(&dpmaif_ctrl->tx_wq); + return 0; + +err_free_frag_bat: + t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag); + +err_free_normal_bat: + t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req); + + return ret; +} + +static void t7xx_dpmaif_stop_sw(struct dpmaif_ctrl *dpmaif_ctrl) +{ + t7xx_dpmaif_tx_stop(dpmaif_ctrl); + t7xx_dpmaif_rx_stop(dpmaif_ctrl); +} + +static void t7xx_dpmaif_stop_hw(struct dpmaif_ctrl *dpmaif_ctrl) +{ + t7xx_dpmaif_hw_stop_all_txq(&dpmaif_ctrl->hw_info); + t7xx_dpmaif_hw_stop_all_rxq(&dpmaif_ctrl->hw_info); +} + +static int t7xx_dpmaif_stop(struct dpmaif_ctrl *dpmaif_ctrl) +{ + if (!dpmaif_ctrl->dpmaif_sw_init_done) { + dev_err(dpmaif_ctrl->dev, "dpmaif SW init fail\n"); + return -EFAULT; + } + + if (dpmaif_ctrl->state == DPMAIF_STATE_PWROFF) + return -EFAULT; + + t7xx_dpmaif_disable_irq(dpmaif_ctrl); + dpmaif_ctrl->state = DPMAIF_STATE_PWROFF; + t7xx_dpmaif_stop_sw(dpmaif_ctrl); + t7xx_dpmaif_tx_clear(dpmaif_ctrl); + t7xx_dpmaif_rx_clear(dpmaif_ctrl); + return 0; +} + +static int t7xx_dpmaif_suspend(struct t7xx_pci_dev *t7xx_dev, void *param) +{ + struct dpmaif_ctrl *dpmaif_ctrl = param; + + t7xx_dpmaif_tx_stop(dpmaif_ctrl); + t7xx_dpmaif_hw_stop_all_txq(&dpmaif_ctrl->hw_info); + t7xx_dpmaif_hw_stop_all_rxq(&dpmaif_ctrl->hw_info); + t7xx_dpmaif_disable_irq(dpmaif_ctrl); + t7xx_dpmaif_rx_stop(dpmaif_ctrl); + return 0; +} + +static void t7xx_dpmaif_unmask_dlq_intr(struct dpmaif_ctrl *dpmaif_ctrl) +{ + int qno; + + for (qno = 0; qno < DPMAIF_RXQ_NUM; qno++) + t7xx_dpmaif_dlq_unmask_rx_done(&dpmaif_ctrl->hw_info, qno); +} + +static void t7xx_dpmaif_start_txrx_qs(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct dpmaif_rx_queue *rxq; + struct dpmaif_tx_queue *txq; + unsigned int que_cnt; + + for (que_cnt = 0; que_cnt < DPMAIF_TXQ_NUM; que_cnt++) { + txq = &dpmaif_ctrl->txq[que_cnt]; + txq->que_started = true; + } + + for (que_cnt = 0; que_cnt < DPMAIF_RXQ_NUM; que_cnt++) { + rxq = &dpmaif_ctrl->rxq[que_cnt]; + rxq->que_started = true; + } +} + +static int t7xx_dpmaif_resume(struct t7xx_pci_dev *t7xx_dev, void *param) +{ + struct dpmaif_ctrl *dpmaif_ctrl = param; + + if (!dpmaif_ctrl) + return 0; + + t7xx_dpmaif_start_txrx_qs(dpmaif_ctrl); + t7xx_dpmaif_enable_irq(dpmaif_ctrl); + t7xx_dpmaif_unmask_dlq_intr(dpmaif_ctrl); + t7xx_dpmaif_start_hw(&dpmaif_ctrl->hw_info); + wake_up(&dpmaif_ctrl->tx_wq); + return 0; +} + +static int t7xx_dpmaif_pm_entity_init(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct md_pm_entity *dpmaif_pm_entity = &dpmaif_ctrl->dpmaif_pm_entity; + int ret; + + INIT_LIST_HEAD(&dpmaif_pm_entity->entity); + dpmaif_pm_entity->suspend = &t7xx_dpmaif_suspend; + dpmaif_pm_entity->suspend_late = NULL; + dpmaif_pm_entity->resume_early = NULL; + dpmaif_pm_entity->resume = &t7xx_dpmaif_resume; + dpmaif_pm_entity->id = PM_ENTITY_ID_DATA; + dpmaif_pm_entity->entity_param = dpmaif_ctrl; + + ret = t7xx_pci_pm_entity_register(dpmaif_ctrl->t7xx_dev, dpmaif_pm_entity); + if (ret) + dev_err(dpmaif_ctrl->dev, "dpmaif register pm_entity fail\n"); + + return ret; +} + +static int t7xx_dpmaif_pm_entity_release(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct md_pm_entity *dpmaif_pm_entity = &dpmaif_ctrl->dpmaif_pm_entity; + int ret; + + ret = t7xx_pci_pm_entity_unregister(dpmaif_ctrl->t7xx_dev, dpmaif_pm_entity); + if (ret < 0) + dev_err(dpmaif_ctrl->dev, "dpmaif register pm_entity fail\n"); + + return ret; +} + +int t7xx_dpmaif_md_state_callback(struct dpmaif_ctrl *dpmaif_ctrl, enum md_state state) +{ + int ret = 0; + + switch (state) { + case MD_STATE_WAITING_FOR_HS1: + ret = t7xx_dpmaif_start(dpmaif_ctrl); + break; + + case MD_STATE_EXCEPTION: + ret = t7xx_dpmaif_stop(dpmaif_ctrl); + break; + + case MD_STATE_STOPPED: + ret = t7xx_dpmaif_stop(dpmaif_ctrl); + break; + + case MD_STATE_WAITING_TO_STOP: + t7xx_dpmaif_stop_hw(dpmaif_ctrl); + break; + + default: + break; + } + + return ret; +} + +/** + * t7xx_dpmaif_hif_init() - Initialize data path. + * @t7xx_dev: MTK context structure. + * @callbacks: Callbacks implemented by the network layer to handle RX skb and + * event notifications. + * + * Allocate and initialize datapath control block. + * Register datapath ISR, TX and RX resources. + * + * Return: + * * dpmaif_ctrl pointer - Pointer to DPMAIF context structure. + * * NULL - In case of error. + */ +struct dpmaif_ctrl *t7xx_dpmaif_hif_init(struct t7xx_pci_dev *t7xx_dev, + struct dpmaif_callbacks *callbacks) +{ + struct device *dev = &t7xx_dev->pdev->dev; + struct dpmaif_ctrl *dpmaif_ctrl; + int ret; + + if (!callbacks) + return NULL; + + dpmaif_ctrl = devm_kzalloc(dev, sizeof(*dpmaif_ctrl), GFP_KERNEL); + if (!dpmaif_ctrl) + return NULL; + + dpmaif_ctrl->t7xx_dev = t7xx_dev; + dpmaif_ctrl->callbacks = callbacks; + dpmaif_ctrl->dev = dev; + dpmaif_ctrl->dpmaif_sw_init_done = false; + dpmaif_ctrl->hw_info.dev = dev; + dpmaif_ctrl->hw_info.pcie_base = t7xx_dev->base_addr.pcie_ext_reg_base - + t7xx_dev->base_addr.pcie_dev_reg_trsl_addr; + + ret = t7xx_dpmaif_pm_entity_init(dpmaif_ctrl); + if (ret) + return NULL; + + t7xx_dpmaif_register_pcie_irq(dpmaif_ctrl); + t7xx_dpmaif_disable_irq(dpmaif_ctrl); + + ret = t7xx_dpmaif_rxtx_sw_allocs(dpmaif_ctrl); + if (ret) { + t7xx_dpmaif_pm_entity_release(dpmaif_ctrl); + dev_err(dev, "Failed to allocate RX/TX SW resources: %d\n", ret); + return NULL; + } + + dpmaif_ctrl->dpmaif_sw_init_done = true; + return dpmaif_ctrl; +} + +void t7xx_dpmaif_hif_exit(struct dpmaif_ctrl *dpmaif_ctrl) +{ + if (dpmaif_ctrl->dpmaif_sw_init_done) { + t7xx_dpmaif_stop(dpmaif_ctrl); + t7xx_dpmaif_pm_entity_release(dpmaif_ctrl); + t7xx_dpmaif_sw_release(dpmaif_ctrl); + dpmaif_ctrl->dpmaif_sw_init_done = false; + } +} diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h new file mode 100644 index 000000000..1225ca0ed --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h @@ -0,0 +1,206 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Chiranjeevi Rapolu + * Eliot Lee + * Sreehari Kancharla + */ + +#ifndef __T7XX_HIF_DPMAIF_H__ +#define __T7XX_HIF_DPMAIF_H__ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_dpmaif.h" +#include "t7xx_pci.h" +#include "t7xx_state_monitor.h" + +/* SKB control buffer */ +struct t7xx_skb_cb { + u8 netif_idx; + u8 txq_number; + u8 rx_pkt_type; +}; + +#define T7XX_SKB_CB(__skb) ((struct t7xx_skb_cb *)(__skb)->cb) + +enum dpmaif_rdwr { + DPMAIF_READ, + DPMAIF_WRITE, +}; + +/* Structure of DL BAT */ +struct dpmaif_cur_rx_skb_info { + bool msg_pit_received; + struct sk_buff *cur_skb; + unsigned int cur_chn_idx; + unsigned int check_sum; + unsigned int pit_dp; + unsigned int pkt_type; + int err_payload; +}; + +struct dpmaif_bat { + unsigned int p_buffer_addr; + unsigned int buffer_addr_ext; +}; + +struct dpmaif_bat_skb { + struct sk_buff *skb; + dma_addr_t data_bus_addr; + unsigned int data_len; +}; + +struct dpmaif_bat_page { + struct page *page; + dma_addr_t data_bus_addr; + unsigned int offset; + unsigned int data_len; +}; + +enum bat_type { + BAT_TYPE_NORMAL, + BAT_TYPE_FRAG, +}; + +struct dpmaif_bat_request { + void *bat_base; + dma_addr_t bat_bus_addr; + unsigned int bat_size_cnt; + unsigned int bat_wr_idx; + unsigned int bat_release_rd_idx; + void *bat_skb; + unsigned int pkt_buf_sz; + unsigned long *bat_bitmap; + atomic_t refcnt; + spinlock_t mask_lock; /* Protects BAT mask */ + enum bat_type type; +}; + +struct dpmaif_rx_queue { + unsigned int index; + bool que_started; + unsigned int budget; + + void *pit_base; + dma_addr_t pit_bus_addr; + unsigned int pit_size_cnt; + + unsigned int pit_rd_idx; + unsigned int pit_wr_idx; + unsigned int pit_release_rd_idx; + + struct dpmaif_bat_request *bat_req; + struct dpmaif_bat_request *bat_frag; + + wait_queue_head_t rx_wq; + struct task_struct *rx_thread; + struct sk_buff_head skb_list; + unsigned int skb_list_max_len; + + struct workqueue_struct *worker; + struct work_struct dpmaif_rxq_work; + + atomic_t rx_processing; + + struct dpmaif_ctrl *dpmaif_ctrl; + unsigned int expect_pit_seq; + unsigned int pit_remain_release_cnt; + struct dpmaif_cur_rx_skb_info rx_data_info; +}; + +struct dpmaif_tx_queue { + unsigned int index; + bool que_started; + atomic_t tx_budget; + void *drb_base; + dma_addr_t drb_bus_addr; + unsigned int drb_size_cnt; + unsigned int drb_wr_idx; + unsigned int drb_rd_idx; + unsigned int drb_release_rd_idx; + void *drb_skb_base; + wait_queue_head_t req_wq; + struct workqueue_struct *worker; + struct work_struct dpmaif_tx_work; + spinlock_t tx_lock; /* Protects txq DRB */ + atomic_t tx_processing; + + struct dpmaif_ctrl *dpmaif_ctrl; + struct sk_buff_head tx_skb_head; +}; + +struct dpmaif_isr_para { + struct dpmaif_ctrl *dpmaif_ctrl; + unsigned char pcie_int; + unsigned char dlq_id; +}; + +enum dpmaif_state { + DPMAIF_STATE_MIN, + DPMAIF_STATE_PWROFF, + DPMAIF_STATE_PWRON, + DPMAIF_STATE_EXCEPTION, + DPMAIF_STATE_MAX +}; + +enum dpmaif_txq_state { + DMPAIF_TXQ_STATE_IRQ, + DMPAIF_TXQ_STATE_FULL, +}; + +struct dpmaif_callbacks { + void (*state_notify)(struct t7xx_pci_dev *t7xx_dev, + enum dpmaif_txq_state state, int txq_number); + void (*recv_skb)(struct t7xx_pci_dev *t7xx_dev, struct sk_buff *skb); +}; + +struct dpmaif_ctrl { + struct device *dev; + struct t7xx_pci_dev *t7xx_dev; + struct md_pm_entity dpmaif_pm_entity; + enum dpmaif_state state; + bool dpmaif_sw_init_done; + struct dpmaif_hw_info hw_info; + struct dpmaif_tx_queue txq[DPMAIF_TXQ_NUM]; + struct dpmaif_rx_queue rxq[DPMAIF_RXQ_NUM]; + + unsigned char rxq_int_mapping[DPMAIF_RXQ_NUM]; + struct dpmaif_isr_para isr_para[DPMAIF_RXQ_NUM]; + + struct dpmaif_bat_request bat_req; + struct dpmaif_bat_request bat_frag; + struct workqueue_struct *bat_release_wq; + struct work_struct bat_release_work; + + wait_queue_head_t tx_wq; + struct task_struct *tx_thread; + + struct dpmaif_callbacks *callbacks; +}; + +struct dpmaif_ctrl *t7xx_dpmaif_hif_init(struct t7xx_pci_dev *t7xx_dev, + struct dpmaif_callbacks *callbacks); +void t7xx_dpmaif_hif_exit(struct dpmaif_ctrl *dpmaif_ctrl); +int t7xx_dpmaif_md_state_callback(struct dpmaif_ctrl *dpmaif_ctrl, enum md_state state); +unsigned int t7xx_ring_buf_get_next_wr_idx(unsigned int buf_len, unsigned int buf_idx); +unsigned int t7xx_ring_buf_rd_wr_count(unsigned int total_cnt, unsigned int rd_idx, + unsigned int wr_idx, enum dpmaif_rdwr); + +#endif /* __T7XX_HIF_DPMAIF_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c new file mode 100644 index 000000000..91a0eb19e --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c @@ -0,0 +1,1243 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Eliot Lee + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Andy Shevchenko + * Chiranjeevi Rapolu + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_dpmaif.h" +#include "t7xx_hif_dpmaif.h" +#include "t7xx_hif_dpmaif_rx.h" +#include "t7xx_pci.h" + +#define DPMAIF_BAT_COUNT 8192 +#define DPMAIF_FRG_COUNT 4814 +#define DPMAIF_PIT_COUNT (DPMAIF_BAT_COUNT * 2) + +#define DPMAIF_BAT_CNT_THRESHOLD 30 +#define DPMAIF_PIT_CNT_THRESHOLD 60 +#define DPMAIF_RX_PUSH_THRESHOLD_MASK GENMASK(2, 0) +#define DPMAIF_NOTIFY_RELEASE_COUNT 128 +#define DPMAIF_POLL_PIT_TIME_US 20 +#define DPMAIF_POLL_PIT_MAX_TIME_US 2000 +#define DPMAIF_WQ_TIME_LIMIT_MS 2 +#define DPMAIF_CS_RESULT_PASS 0 + +/* Packet type */ +#define DES_PT_PD 0 +#define DES_PT_MSG 1 +/* Buffer type */ +#define PKT_BUF_FRAG 1 + +static unsigned int t7xx_normal_pit_bid(const struct dpmaif_pit *pit_info) +{ + u32 value; + + value = FIELD_GET(PD_PIT_H_BID, le32_to_cpu(pit_info->pd.footer)); + value <<= 13; + value += FIELD_GET(PD_PIT_BUFFER_ID, le32_to_cpu(pit_info->header)); + return value; +} + +static int t7xx_dpmaif_net_rx_push_thread(void *arg) +{ + struct dpmaif_rx_queue *q = arg; + struct dpmaif_ctrl *hif_ctrl; + struct dpmaif_callbacks *cb; + + hif_ctrl = q->dpmaif_ctrl; + cb = hif_ctrl->callbacks; + + while (!kthread_should_stop()) { + struct sk_buff *skb; + unsigned long flags; + + if (skb_queue_empty(&q->skb_list)) { + if (wait_event_interruptible(q->rx_wq, + !skb_queue_empty(&q->skb_list) || + kthread_should_stop())) + continue; + + if (kthread_should_stop()) + break; + } + + spin_lock_irqsave(&q->skb_list.lock, flags); + skb = __skb_dequeue(&q->skb_list); + spin_unlock_irqrestore(&q->skb_list.lock, flags); + + if (!skb) + continue; + + cb->recv_skb(hif_ctrl->t7xx_dev, skb); + cond_resched(); + } + + return 0; +} + +static int t7xx_dpmaif_update_bat_wr_idx(struct dpmaif_ctrl *dpmaif_ctrl, + const unsigned int q_num, const unsigned int bat_cnt) +{ + struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num]; + struct dpmaif_bat_request *bat_req = rxq->bat_req; + unsigned int old_rl_idx, new_wr_idx, old_wr_idx; + + if (!rxq->que_started) { + dev_err(dpmaif_ctrl->dev, "RX queue %d has not been started\n", rxq->index); + return -EINVAL; + } + + old_rl_idx = bat_req->bat_release_rd_idx; + old_wr_idx = bat_req->bat_wr_idx; + new_wr_idx = old_wr_idx + bat_cnt; + + if (old_rl_idx > old_wr_idx && new_wr_idx >= old_rl_idx) + goto err_flow; + + if (new_wr_idx >= bat_req->bat_size_cnt) { + new_wr_idx -= bat_req->bat_size_cnt; + if (new_wr_idx >= old_rl_idx) + goto err_flow; + } + + bat_req->bat_wr_idx = new_wr_idx; + return 0; + +err_flow: + dev_err(dpmaif_ctrl->dev, "RX BAT flow check fail\n"); + return -EINVAL; +} + +static bool t7xx_alloc_and_map_skb_info(const struct dpmaif_ctrl *dpmaif_ctrl, + const unsigned int size, struct dpmaif_bat_skb *cur_skb) +{ + dma_addr_t data_bus_addr; + struct sk_buff *skb; + + skb = __dev_alloc_skb(size, GFP_KERNEL); + if (!skb) + return false; + + data_bus_addr = dma_map_single(dpmaif_ctrl->dev, skb->data, size, DMA_FROM_DEVICE); + if (dma_mapping_error(dpmaif_ctrl->dev, data_bus_addr)) { + dev_err_ratelimited(dpmaif_ctrl->dev, "DMA mapping error\n"); + dev_kfree_skb_any(skb); + return false; + } + + cur_skb->skb = skb; + cur_skb->data_bus_addr = data_bus_addr; + cur_skb->data_len = size; + + return true; +} + +static void t7xx_unmap_bat_skb(struct device *dev, struct dpmaif_bat_skb *bat_skb_base, + unsigned int index) +{ + struct dpmaif_bat_skb *bat_skb = bat_skb_base + index; + + if (bat_skb->skb) { + dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE); + dev_kfree_skb(bat_skb->skb); + bat_skb->skb = NULL; + } +} + +/** + * t7xx_dpmaif_rx_buf_alloc() - Allocate buffers for the BAT ring. + * @dpmaif_ctrl: Pointer to DPMAIF context structure. + * @bat_req: Pointer to BAT request structure. + * @q_num: Queue number. + * @buf_cnt: Number of buffers to allocate. + * @initial: Indicates if the ring is being populated for the first time. + * + * Allocate skb and store the start address of the data buffer into the BAT ring. + * If this is not the initial call, notify the HW about the new entries. + * + * Return: + * * 0 - Success. + * * -ERROR - Error code. + */ +int t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl *dpmaif_ctrl, + const struct dpmaif_bat_request *bat_req, + const unsigned int q_num, const unsigned int buf_cnt, + const bool initial) +{ + unsigned int i, bat_cnt, bat_max_cnt, bat_start_idx; + int ret; + + if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt) + return -EINVAL; + + /* Check BAT buffer space */ + bat_max_cnt = bat_req->bat_size_cnt; + + bat_cnt = t7xx_ring_buf_rd_wr_count(bat_max_cnt, bat_req->bat_release_rd_idx, + bat_req->bat_wr_idx, DPMAIF_WRITE); + if (buf_cnt > bat_cnt) + return -ENOMEM; + + bat_start_idx = bat_req->bat_wr_idx; + + for (i = 0; i < buf_cnt; i++) { + unsigned int cur_bat_idx = bat_start_idx + i; + struct dpmaif_bat_skb *cur_skb; + struct dpmaif_bat *cur_bat; + + if (cur_bat_idx >= bat_max_cnt) + cur_bat_idx -= bat_max_cnt; + + cur_skb = (struct dpmaif_bat_skb *)bat_req->bat_skb + cur_bat_idx; + if (!cur_skb->skb && + !t7xx_alloc_and_map_skb_info(dpmaif_ctrl, bat_req->pkt_buf_sz, cur_skb)) + break; + + cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx; + cur_bat->buffer_addr_ext = upper_32_bits(cur_skb->data_bus_addr); + cur_bat->p_buffer_addr = lower_32_bits(cur_skb->data_bus_addr); + } + + if (!i) + return -ENOMEM; + + ret = t7xx_dpmaif_update_bat_wr_idx(dpmaif_ctrl, q_num, i); + if (ret) + goto err_unmap_skbs; + + if (!initial) { + unsigned int hw_wr_idx; + + ret = t7xx_dpmaif_dl_snd_hw_bat_cnt(&dpmaif_ctrl->hw_info, i); + if (ret) + goto err_unmap_skbs; + + hw_wr_idx = t7xx_dpmaif_dl_get_bat_wr_idx(&dpmaif_ctrl->hw_info, + DPF_RX_QNO_DFT); + if (hw_wr_idx != bat_req->bat_wr_idx) { + ret = -EFAULT; + dev_err(dpmaif_ctrl->dev, "Write index mismatch in RX ring\n"); + goto err_unmap_skbs; + } + } + + return 0; + +err_unmap_skbs: + while (--i > 0) + t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i); + + return ret; +} + +static int t7xx_dpmaifq_release_pit_entry(struct dpmaif_rx_queue *rxq, + const unsigned int rel_entry_num) +{ + struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info; + unsigned int old_rel_idx, new_rel_idx, hw_wr_idx; + int ret; + + if (!rxq->que_started) + return 0; + + if (rel_entry_num >= rxq->pit_size_cnt) { + dev_err(rxq->dpmaif_ctrl->dev, "Invalid PIT release index\n"); + return -EINVAL; + } + + old_rel_idx = rxq->pit_release_rd_idx; + new_rel_idx = old_rel_idx + rel_entry_num; + hw_wr_idx = rxq->pit_wr_idx; + if (hw_wr_idx < old_rel_idx && new_rel_idx >= rxq->pit_size_cnt) + new_rel_idx -= rxq->pit_size_cnt; + + ret = t7xx_dpmaif_dlq_add_pit_remain_cnt(hw_info, rxq->index, rel_entry_num); + if (ret) { + dev_err(rxq->dpmaif_ctrl->dev, "PIT release failure: %d\n", ret); + return ret; + } + + rxq->pit_release_rd_idx = new_rel_idx; + return 0; +} + +static void t7xx_dpmaif_set_bat_mask(struct dpmaif_bat_request *bat_req, unsigned int idx) +{ + unsigned long flags; + + spin_lock_irqsave(&bat_req->mask_lock, flags); + set_bit(idx, bat_req->bat_bitmap); + spin_unlock_irqrestore(&bat_req->mask_lock, flags); +} + +static int t7xx_frag_bat_cur_bid_check(struct dpmaif_rx_queue *rxq, + const unsigned int cur_bid) +{ + struct dpmaif_bat_request *bat_frag = rxq->bat_frag; + struct dpmaif_bat_page *bat_page; + + if (cur_bid >= DPMAIF_FRG_COUNT) + return -EINVAL; + + bat_page = bat_frag->bat_skb + cur_bid; + if (!bat_page->page) + return -EINVAL; + + return 0; +} + +static void t7xx_unmap_bat_page(struct device *dev, struct dpmaif_bat_page *bat_page_base, + unsigned int index) +{ + struct dpmaif_bat_page *bat_page = bat_page_base + index; + + if (bat_page->page) { + dma_unmap_page(dev, bat_page->data_bus_addr, bat_page->data_len, DMA_FROM_DEVICE); + put_page(bat_page->page); + bat_page->page = NULL; + } +} + +/** + * t7xx_dpmaif_rx_frag_alloc() - Allocates buffers for the Fragment BAT ring. + * @dpmaif_ctrl: Pointer to DPMAIF context structure. + * @bat_req: Pointer to BAT request structure. + * @buf_cnt: Number of buffers to allocate. + * @initial: Indicates if the ring is being populated for the first time. + * + * Fragment BAT is used when the received packet does not fit in a normal BAT entry. + * This function allocates a page fragment and stores the start address of the page + * into the Fragment BAT ring. + * If this is not the initial call, notify the HW about the new entries. + * + * Return: + * * 0 - Success. + * * -ERROR - Error code. + */ +int t7xx_dpmaif_rx_frag_alloc(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req, + const unsigned int buf_cnt, const bool initial) +{ + unsigned int buf_space, cur_bat_idx = bat_req->bat_wr_idx; + struct dpmaif_bat_page *bat_skb = bat_req->bat_skb; + int ret = 0, i; + + if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt) + return -EINVAL; + + buf_space = t7xx_ring_buf_rd_wr_count(bat_req->bat_size_cnt, + bat_req->bat_release_rd_idx, bat_req->bat_wr_idx, + DPMAIF_WRITE); + if (buf_cnt > buf_space) { + dev_err(dpmaif_ctrl->dev, + "Requested more buffers than the space available in RX frag ring\n"); + return -EINVAL; + } + + for (i = 0; i < buf_cnt; i++) { + struct dpmaif_bat_page *cur_page = bat_skb + cur_bat_idx; + struct dpmaif_bat *cur_bat; + dma_addr_t data_base_addr; + + if (!cur_page->page) { + unsigned long offset; + struct page *page; + void *data; + + data = netdev_alloc_frag(bat_req->pkt_buf_sz); + if (!data) + break; + + page = virt_to_head_page(data); + offset = data - page_address(page); + + data_base_addr = dma_map_page(dpmaif_ctrl->dev, page, offset, + bat_req->pkt_buf_sz, DMA_FROM_DEVICE); + if (dma_mapping_error(dpmaif_ctrl->dev, data_base_addr)) { + put_page(virt_to_head_page(data)); + dev_err(dpmaif_ctrl->dev, "DMA mapping fail\n"); + break; + } + + cur_page->page = page; + cur_page->data_bus_addr = data_base_addr; + cur_page->offset = offset; + cur_page->data_len = bat_req->pkt_buf_sz; + } + + data_base_addr = cur_page->data_bus_addr; + cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx; + cur_bat->buffer_addr_ext = upper_32_bits(data_base_addr); + cur_bat->p_buffer_addr = lower_32_bits(data_base_addr); + cur_bat_idx = t7xx_ring_buf_get_next_wr_idx(bat_req->bat_size_cnt, cur_bat_idx); + } + + bat_req->bat_wr_idx = cur_bat_idx; + + if (!initial) + t7xx_dpmaif_dl_snd_hw_frg_cnt(&dpmaif_ctrl->hw_info, i); + + if (i < buf_cnt) { + ret = -ENOMEM; + if (initial) { + while (--i > 0) + t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i); + } + } + + return ret; +} + +static int t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue *rxq, + const struct dpmaif_pit *pkt_info, + struct sk_buff *skb) +{ + unsigned long long data_bus_addr, data_base_addr; + struct device *dev = rxq->dpmaif_ctrl->dev; + struct dpmaif_bat_page *page_info; + unsigned int data_len; + int data_offset; + + page_info = rxq->bat_frag->bat_skb; + page_info += t7xx_normal_pit_bid(pkt_info); + dma_unmap_page(dev, page_info->data_bus_addr, page_info->data_len, DMA_FROM_DEVICE); + + if (!page_info->page) + return -EINVAL; + + data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h); + data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l); + data_base_addr = page_info->data_bus_addr; + data_offset = data_bus_addr - data_base_addr; + data_offset += page_info->offset; + data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header)); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page, + data_offset, data_len, page_info->data_len); + + page_info->page = NULL; + page_info->offset = 0; + page_info->data_len = 0; + return 0; +} + +static int t7xx_dpmaif_get_frag(struct dpmaif_rx_queue *rxq, + const struct dpmaif_pit *pkt_info, + const struct dpmaif_cur_rx_skb_info *skb_info) +{ + unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info); + int ret; + + ret = t7xx_frag_bat_cur_bid_check(rxq, cur_bid); + if (ret < 0) + return ret; + + ret = t7xx_dpmaif_set_frag_to_skb(rxq, pkt_info, skb_info->cur_skb); + if (ret < 0) { + dev_err(rxq->dpmaif_ctrl->dev, "Failed to set frag data to skb: %d\n", ret); + return ret; + } + + t7xx_dpmaif_set_bat_mask(rxq->bat_frag, cur_bid); + return 0; +} + +static int t7xx_bat_cur_bid_check(struct dpmaif_rx_queue *rxq, const unsigned int cur_bid) +{ + struct dpmaif_bat_skb *bat_skb = rxq->bat_req->bat_skb; + + bat_skb += cur_bid; + if (cur_bid >= DPMAIF_BAT_COUNT || !bat_skb->skb) + return -EINVAL; + + return 0; +} + +static int t7xx_dpmaif_read_pit_seq(const struct dpmaif_pit *pit) +{ + return FIELD_GET(PD_PIT_PIT_SEQ, le32_to_cpu(pit->pd.footer)); +} + +static int t7xx_dpmaif_check_pit_seq(struct dpmaif_rx_queue *rxq, + const struct dpmaif_pit *pit) +{ + unsigned int cur_pit_seq, expect_pit_seq = rxq->expect_pit_seq; + + if (read_poll_timeout_atomic(t7xx_dpmaif_read_pit_seq, cur_pit_seq, + cur_pit_seq == expect_pit_seq, DPMAIF_POLL_PIT_TIME_US, + DPMAIF_POLL_PIT_MAX_TIME_US, false, pit)) + return -EFAULT; + + rxq->expect_pit_seq++; + if (rxq->expect_pit_seq >= DPMAIF_DL_PIT_SEQ_VALUE) + rxq->expect_pit_seq = 0; + + return 0; +} + +static unsigned int t7xx_dpmaif_avail_pkt_bat_cnt(struct dpmaif_bat_request *bat_req) +{ + unsigned int zero_index; + unsigned long flags; + + spin_lock_irqsave(&bat_req->mask_lock, flags); + + zero_index = find_next_zero_bit(bat_req->bat_bitmap, bat_req->bat_size_cnt, + bat_req->bat_release_rd_idx); + + if (zero_index < bat_req->bat_size_cnt) { + spin_unlock_irqrestore(&bat_req->mask_lock, flags); + return zero_index - bat_req->bat_release_rd_idx; + } + + /* limiting the search till bat_release_rd_idx */ + zero_index = find_first_zero_bit(bat_req->bat_bitmap, bat_req->bat_release_rd_idx); + spin_unlock_irqrestore(&bat_req->mask_lock, flags); + return bat_req->bat_size_cnt - bat_req->bat_release_rd_idx + zero_index; +} + +static int t7xx_dpmaif_release_bat_entry(const struct dpmaif_rx_queue *rxq, + const unsigned int rel_entry_num, + const enum bat_type buf_type) +{ + struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info; + unsigned int old_rel_idx, new_rel_idx, hw_rd_idx, i; + struct dpmaif_bat_request *bat; + unsigned long flags; + + if (!rxq->que_started || !rel_entry_num) + return -EINVAL; + + if (buf_type == BAT_TYPE_FRAG) { + bat = rxq->bat_frag; + hw_rd_idx = t7xx_dpmaif_dl_get_frg_rd_idx(hw_info, rxq->index); + } else { + bat = rxq->bat_req; + hw_rd_idx = t7xx_dpmaif_dl_get_bat_rd_idx(hw_info, rxq->index); + } + + if (rel_entry_num >= bat->bat_size_cnt) + return -EINVAL; + + old_rel_idx = bat->bat_release_rd_idx; + new_rel_idx = old_rel_idx + rel_entry_num; + + /* Do not need to release if the queue is empty */ + if (bat->bat_wr_idx == old_rel_idx) + return 0; + + if (hw_rd_idx >= old_rel_idx) { + if (new_rel_idx > hw_rd_idx) + return -EINVAL; + } + + if (new_rel_idx >= bat->bat_size_cnt) { + new_rel_idx -= bat->bat_size_cnt; + if (new_rel_idx > hw_rd_idx) + return -EINVAL; + } + + spin_lock_irqsave(&bat->mask_lock, flags); + for (i = 0; i < rel_entry_num; i++) { + unsigned int index = bat->bat_release_rd_idx + i; + + if (index >= bat->bat_size_cnt) + index -= bat->bat_size_cnt; + + clear_bit(index, bat->bat_bitmap); + } + spin_unlock_irqrestore(&bat->mask_lock, flags); + + bat->bat_release_rd_idx = new_rel_idx; + return rel_entry_num; +} + +static int t7xx_dpmaif_pit_release_and_add(struct dpmaif_rx_queue *rxq) +{ + int ret; + + if (rxq->pit_remain_release_cnt < DPMAIF_PIT_CNT_THRESHOLD) + return 0; + + ret = t7xx_dpmaifq_release_pit_entry(rxq, rxq->pit_remain_release_cnt); + if (ret) + return ret; + + rxq->pit_remain_release_cnt = 0; + return 0; +} + +static int t7xx_dpmaif_bat_release_and_add(const struct dpmaif_rx_queue *rxq) +{ + unsigned int bid_cnt; + int ret; + + bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_req); + if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD) + return 0; + + ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_NORMAL); + if (ret <= 0) { + dev_err(rxq->dpmaif_ctrl->dev, "Release PKT BAT failed: %d\n", ret); + return ret; + } + + ret = t7xx_dpmaif_rx_buf_alloc(rxq->dpmaif_ctrl, rxq->bat_req, rxq->index, bid_cnt, false); + if (ret < 0) + dev_err(rxq->dpmaif_ctrl->dev, "Allocate new RX buffer failed: %d\n", ret); + + return ret; +} + +static int t7xx_dpmaif_frag_bat_release_and_add(const struct dpmaif_rx_queue *rxq) +{ + unsigned int bid_cnt; + int ret; + + bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_frag); + if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD) + return 0; + + ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_FRAG); + if (ret <= 0) { + dev_err(rxq->dpmaif_ctrl->dev, "Release BAT entry failed: %d\n", ret); + return ret; + } + + return t7xx_dpmaif_rx_frag_alloc(rxq->dpmaif_ctrl, rxq->bat_frag, bid_cnt, false); +} + +static void t7xx_dpmaif_parse_msg_pit(const struct dpmaif_rx_queue *rxq, + const struct dpmaif_pit *msg_pit, + struct dpmaif_cur_rx_skb_info *skb_info) +{ + int header = le32_to_cpu(msg_pit->header); + + skb_info->cur_chn_idx = FIELD_GET(MSG_PIT_CHANNEL_ID, header); + skb_info->check_sum = FIELD_GET(MSG_PIT_CHECKSUM, header); + skb_info->pit_dp = FIELD_GET(MSG_PIT_DP, header); + skb_info->pkt_type = FIELD_GET(MSG_PIT_IP, le32_to_cpu(msg_pit->msg.params_3)); +} + +static int t7xx_dpmaif_set_data_to_skb(const struct dpmaif_rx_queue *rxq, + const struct dpmaif_pit *pkt_info, + struct dpmaif_cur_rx_skb_info *skb_info) +{ + unsigned long long data_bus_addr, data_base_addr; + struct device *dev = rxq->dpmaif_ctrl->dev; + struct dpmaif_bat_skb *bat_skb; + unsigned int data_len; + struct sk_buff *skb; + int data_offset; + + bat_skb = rxq->bat_req->bat_skb; + bat_skb += t7xx_normal_pit_bid(pkt_info); + dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE); + + data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h); + data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l); + data_base_addr = bat_skb->data_bus_addr; + data_offset = data_bus_addr - data_base_addr; + data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header)); + skb = bat_skb->skb; + skb->len = 0; + skb_reset_tail_pointer(skb); + skb_reserve(skb, data_offset); + + if (skb->tail + data_len > skb->end) { + dev_err(dev, "No buffer space available\n"); + return -ENOBUFS; + } + + skb_put(skb, data_len); + skb_info->cur_skb = skb; + bat_skb->skb = NULL; + return 0; +} + +static int t7xx_dpmaif_get_rx_pkt(struct dpmaif_rx_queue *rxq, + const struct dpmaif_pit *pkt_info, + struct dpmaif_cur_rx_skb_info *skb_info) +{ + unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info); + int ret; + + ret = t7xx_bat_cur_bid_check(rxq, cur_bid); + if (ret < 0) + return ret; + + ret = t7xx_dpmaif_set_data_to_skb(rxq, pkt_info, skb_info); + if (ret < 0) { + dev_err(rxq->dpmaif_ctrl->dev, "RX set data to skb failed: %d\n", ret); + return ret; + } + + t7xx_dpmaif_set_bat_mask(rxq->bat_req, cur_bid); + return 0; +} + +static int t7xx_dpmaifq_rx_notify_hw(struct dpmaif_rx_queue *rxq) +{ + struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl; + int ret; + + queue_work(dpmaif_ctrl->bat_release_wq, &dpmaif_ctrl->bat_release_work); + + ret = t7xx_dpmaif_pit_release_and_add(rxq); + if (ret < 0) + dev_err(dpmaif_ctrl->dev, "RXQ%u update PIT failed: %d\n", rxq->index, ret); + + return ret; +} + +static void t7xx_dpmaif_rx_skb_enqueue(struct dpmaif_rx_queue *rxq, struct sk_buff *skb) +{ + unsigned long flags; + + spin_lock_irqsave(&rxq->skb_list.lock, flags); + if (rxq->skb_list.qlen < rxq->skb_list_max_len) + __skb_queue_tail(&rxq->skb_list, skb); + else + dev_kfree_skb_any(skb); + spin_unlock_irqrestore(&rxq->skb_list.lock, flags); +} + +static void t7xx_dpmaif_rx_skb(struct dpmaif_rx_queue *rxq, + struct dpmaif_cur_rx_skb_info *skb_info) +{ + struct sk_buff *skb = skb_info->cur_skb; + struct t7xx_skb_cb *skb_cb; + u8 netif_id; + + skb_info->cur_skb = NULL; + + if (skb_info->pit_dp) { + dev_kfree_skb_any(skb); + return; + } + + skb->ip_summed = skb_info->check_sum == DPMAIF_CS_RESULT_PASS ? CHECKSUM_UNNECESSARY : + CHECKSUM_NONE; + netif_id = FIELD_GET(NETIF_MASK, skb_info->cur_chn_idx); + skb_cb = T7XX_SKB_CB(skb); + skb_cb->netif_idx = netif_id; + skb_cb->rx_pkt_type = skb_info->pkt_type; + t7xx_dpmaif_rx_skb_enqueue(rxq, skb); +} + +static int t7xx_dpmaif_rx_start(struct dpmaif_rx_queue *rxq, const unsigned int pit_cnt, + const unsigned long timeout) +{ + unsigned int cur_pit, pit_len, rx_cnt, recv_skb_cnt = 0; + struct device *dev = rxq->dpmaif_ctrl->dev; + struct dpmaif_cur_rx_skb_info *skb_info; + int ret = 0; + + pit_len = rxq->pit_size_cnt; + skb_info = &rxq->rx_data_info; + cur_pit = rxq->pit_rd_idx; + + for (rx_cnt = 0; rx_cnt < pit_cnt; rx_cnt++) { + struct dpmaif_pit *pkt_info; + u32 val; + + if (!skb_info->msg_pit_received && time_after_eq(jiffies, timeout)) + break; + + pkt_info = (struct dpmaif_pit *)rxq->pit_base + cur_pit; + if (t7xx_dpmaif_check_pit_seq(rxq, pkt_info)) { + dev_err_ratelimited(dev, "RXQ%u checks PIT SEQ fail\n", rxq->index); + return -EAGAIN; + } + + val = FIELD_GET(PD_PIT_PACKET_TYPE, le32_to_cpu(pkt_info->header)); + if (val == DES_PT_MSG) { + if (skb_info->msg_pit_received) + dev_err(dev, "RXQ%u received repeated PIT\n", rxq->index); + + skb_info->msg_pit_received = true; + t7xx_dpmaif_parse_msg_pit(rxq, pkt_info, skb_info); + } else { /* DES_PT_PD */ + val = FIELD_GET(PD_PIT_BUFFER_TYPE, le32_to_cpu(pkt_info->header)); + if (val != PKT_BUF_FRAG) + ret = t7xx_dpmaif_get_rx_pkt(rxq, pkt_info, skb_info); + else if (!skb_info->cur_skb) + ret = -EINVAL; + else + ret = t7xx_dpmaif_get_frag(rxq, pkt_info, skb_info); + + if (ret < 0) { + skb_info->err_payload = 1; + dev_err_ratelimited(dev, "RXQ%u error payload\n", rxq->index); + } + + val = FIELD_GET(PD_PIT_CONT, le32_to_cpu(pkt_info->header)); + if (!val) { + if (!skb_info->err_payload) { + t7xx_dpmaif_rx_skb(rxq, skb_info); + } else if (skb_info->cur_skb) { + dev_kfree_skb_any(skb_info->cur_skb); + skb_info->cur_skb = NULL; + } + + memset(skb_info, 0, sizeof(*skb_info)); + + recv_skb_cnt++; + if (!(recv_skb_cnt & DPMAIF_RX_PUSH_THRESHOLD_MASK)) { + wake_up_all(&rxq->rx_wq); + recv_skb_cnt = 0; + } + } + } + + cur_pit = t7xx_ring_buf_get_next_wr_idx(pit_len, cur_pit); + rxq->pit_rd_idx = cur_pit; + rxq->pit_remain_release_cnt++; + + if (rx_cnt > 0 && !(rx_cnt % DPMAIF_NOTIFY_RELEASE_COUNT)) { + ret = t7xx_dpmaifq_rx_notify_hw(rxq); + if (ret < 0) + break; + } + } + + if (recv_skb_cnt) + wake_up_all(&rxq->rx_wq); + + if (!ret) + ret = t7xx_dpmaifq_rx_notify_hw(rxq); + + if (ret) + return ret; + + return rx_cnt; +} + +static unsigned int t7xx_dpmaifq_poll_pit(struct dpmaif_rx_queue *rxq) +{ + unsigned int hw_wr_idx, pit_cnt; + + if (!rxq->que_started) + return 0; + + hw_wr_idx = t7xx_dpmaif_dl_dlq_pit_get_wr_idx(&rxq->dpmaif_ctrl->hw_info, rxq->index); + pit_cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx, hw_wr_idx, + DPMAIF_READ); + rxq->pit_wr_idx = hw_wr_idx; + return pit_cnt; +} + +static int t7xx_dpmaif_rx_data_collect(struct dpmaif_ctrl *dpmaif_ctrl, + const unsigned int q_num, const unsigned int budget) +{ + struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num]; + unsigned long time_limit; + unsigned int cnt; + + time_limit = jiffies + msecs_to_jiffies(DPMAIF_WQ_TIME_LIMIT_MS); + + while ((cnt = t7xx_dpmaifq_poll_pit(rxq))) { + unsigned int rd_cnt; + int real_cnt; + + rd_cnt = min(cnt, budget); + + real_cnt = t7xx_dpmaif_rx_start(rxq, rd_cnt, time_limit); + if (real_cnt < 0) + return real_cnt; + + if (real_cnt < cnt) + return -EAGAIN; + } + + return 0; +} + +static void t7xx_dpmaif_do_rx(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_rx_queue *rxq) +{ + struct dpmaif_hw_info *hw_info = &dpmaif_ctrl->hw_info; + int ret; + + ret = t7xx_dpmaif_rx_data_collect(dpmaif_ctrl, rxq->index, rxq->budget); + if (ret < 0) { + /* Try one more time */ + queue_work(rxq->worker, &rxq->dpmaif_rxq_work); + t7xx_dpmaif_clr_ip_busy_sts(hw_info); + } else { + t7xx_dpmaif_clr_ip_busy_sts(hw_info); + t7xx_dpmaif_dlq_unmask_rx_done(hw_info, rxq->index); + } +} + +static void t7xx_dpmaif_rxq_work(struct work_struct *work) +{ + struct dpmaif_rx_queue *rxq = container_of(work, struct dpmaif_rx_queue, dpmaif_rxq_work); + struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl; + int ret; + + atomic_set(&rxq->rx_processing, 1); + /* Ensure rx_processing is changed to 1 before actually begin RX flow */ + smp_mb(); + + if (!rxq->que_started) { + atomic_set(&rxq->rx_processing, 0); + dev_err(dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index); + return; + } + + ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev); + if (ret < 0 && ret != -EACCES) + return; + + t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev); + if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) + t7xx_dpmaif_do_rx(dpmaif_ctrl, rxq); + + t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev); + pm_runtime_mark_last_busy(dpmaif_ctrl->dev); + pm_runtime_put_autosuspend(dpmaif_ctrl->dev); + atomic_set(&rxq->rx_processing, 0); +} + +void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask) +{ + struct dpmaif_rx_queue *rxq; + int qno; + + qno = ffs(que_mask) - 1; + if (qno < 0 || qno > DPMAIF_RXQ_NUM - 1) { + dev_err(dpmaif_ctrl->dev, "Invalid RXQ number: %u\n", qno); + return; + } + + rxq = &dpmaif_ctrl->rxq[qno]; + queue_work(rxq->worker, &rxq->dpmaif_rxq_work); +} + +static void t7xx_dpmaif_base_free(const struct dpmaif_ctrl *dpmaif_ctrl, + const struct dpmaif_bat_request *bat_req) +{ + if (bat_req->bat_base) + dma_free_coherent(dpmaif_ctrl->dev, + bat_req->bat_size_cnt * sizeof(struct dpmaif_bat), + bat_req->bat_base, bat_req->bat_bus_addr); +} + +/** + * t7xx_dpmaif_bat_alloc() - Allocate the BAT ring buffer. + * @dpmaif_ctrl: Pointer to DPMAIF context structure. + * @bat_req: Pointer to BAT request structure. + * @buf_type: BAT ring type. + * + * This function allocates the BAT ring buffer shared with the HW device, also allocates + * a buffer used to store information about the BAT skbs for further release. + * + * Return: + * * 0 - Success. + * * -ERROR - Error code. + */ +int t7xx_dpmaif_bat_alloc(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req, + const enum bat_type buf_type) +{ + int sw_buf_size; + + if (buf_type == BAT_TYPE_FRAG) { + sw_buf_size = sizeof(struct dpmaif_bat_page); + bat_req->bat_size_cnt = DPMAIF_FRG_COUNT; + bat_req->pkt_buf_sz = DPMAIF_HW_FRG_PKTBUF; + } else { + sw_buf_size = sizeof(struct dpmaif_bat_skb); + bat_req->bat_size_cnt = DPMAIF_BAT_COUNT; + bat_req->pkt_buf_sz = DPMAIF_HW_BAT_PKTBUF; + } + + bat_req->type = buf_type; + bat_req->bat_wr_idx = 0; + bat_req->bat_release_rd_idx = 0; + + bat_req->bat_base = dma_alloc_coherent(dpmaif_ctrl->dev, + bat_req->bat_size_cnt * sizeof(struct dpmaif_bat), + &bat_req->bat_bus_addr, GFP_KERNEL | __GFP_ZERO); + if (!bat_req->bat_base) + return -ENOMEM; + + /* For AP SW to record skb information */ + bat_req->bat_skb = devm_kzalloc(dpmaif_ctrl->dev, bat_req->bat_size_cnt * sw_buf_size, + GFP_KERNEL); + if (!bat_req->bat_skb) + goto err_free_dma_mem; + + bat_req->bat_bitmap = bitmap_zalloc(bat_req->bat_size_cnt, GFP_KERNEL); + if (!bat_req->bat_bitmap) + goto err_free_dma_mem; + + spin_lock_init(&bat_req->mask_lock); + atomic_set(&bat_req->refcnt, 0); + return 0; + +err_free_dma_mem: + t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req); + + return -ENOMEM; +} + +void t7xx_dpmaif_bat_free(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req) +{ + if (!bat_req || !atomic_dec_and_test(&bat_req->refcnt)) + return; + + bitmap_free(bat_req->bat_bitmap); + bat_req->bat_bitmap = NULL; + + if (bat_req->bat_skb) { + unsigned int i; + + for (i = 0; i < bat_req->bat_size_cnt; i++) { + if (bat_req->type == BAT_TYPE_FRAG) + t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i); + else + t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i); + } + } + + t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req); +} + +static int t7xx_dpmaif_rx_alloc(struct dpmaif_rx_queue *rxq) +{ + rxq->pit_size_cnt = DPMAIF_PIT_COUNT; + rxq->pit_rd_idx = 0; + rxq->pit_wr_idx = 0; + rxq->pit_release_rd_idx = 0; + rxq->expect_pit_seq = 0; + rxq->pit_remain_release_cnt = 0; + memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info)); + + rxq->pit_base = dma_alloc_coherent(rxq->dpmaif_ctrl->dev, + rxq->pit_size_cnt * sizeof(struct dpmaif_pit), + &rxq->pit_bus_addr, GFP_KERNEL | __GFP_ZERO); + if (!rxq->pit_base) + return -ENOMEM; + + rxq->bat_req = &rxq->dpmaif_ctrl->bat_req; + atomic_inc(&rxq->bat_req->refcnt); + + rxq->bat_frag = &rxq->dpmaif_ctrl->bat_frag; + atomic_inc(&rxq->bat_frag->refcnt); + return 0; +} + +static void t7xx_dpmaif_rx_buf_free(const struct dpmaif_rx_queue *rxq) +{ + if (!rxq->dpmaif_ctrl) + return; + + t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req); + t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag); + + if (rxq->pit_base) + dma_free_coherent(rxq->dpmaif_ctrl->dev, + rxq->pit_size_cnt * sizeof(struct dpmaif_pit), + rxq->pit_base, rxq->pit_bus_addr); +} + +int t7xx_dpmaif_rxq_init(struct dpmaif_rx_queue *queue) +{ + int ret; + + ret = t7xx_dpmaif_rx_alloc(queue); + if (ret < 0) { + dev_err(queue->dpmaif_ctrl->dev, "Failed to allocate RX buffers: %d\n", ret); + return ret; + } + + INIT_WORK(&queue->dpmaif_rxq_work, t7xx_dpmaif_rxq_work); + + queue->worker = alloc_workqueue("dpmaif_rx%d_worker", + WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1, queue->index); + if (!queue->worker) { + ret = -ENOMEM; + goto err_free_rx_buffer; + } + + init_waitqueue_head(&queue->rx_wq); + skb_queue_head_init(&queue->skb_list); + queue->skb_list_max_len = queue->bat_req->pkt_buf_sz; + queue->rx_thread = kthread_run(t7xx_dpmaif_net_rx_push_thread, + queue, "dpmaif_rx%d_push", queue->index); + + ret = PTR_ERR_OR_ZERO(queue->rx_thread); + if (ret) + goto err_free_workqueue; + + return 0; + +err_free_workqueue: + destroy_workqueue(queue->worker); + +err_free_rx_buffer: + t7xx_dpmaif_rx_buf_free(queue); + + return ret; +} + +void t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue *queue) +{ + if (queue->worker) + destroy_workqueue(queue->worker); + + if (queue->rx_thread) + kthread_stop(queue->rx_thread); + + skb_queue_purge(&queue->skb_list); + t7xx_dpmaif_rx_buf_free(queue); +} + +static void t7xx_dpmaif_bat_release_work(struct work_struct *work) +{ + struct dpmaif_ctrl *dpmaif_ctrl = container_of(work, struct dpmaif_ctrl, bat_release_work); + struct dpmaif_rx_queue *rxq; + int ret; + + ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev); + if (ret < 0 && ret != -EACCES) + return; + + t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev); + + /* ALL RXQ use one BAT table, so choose DPF_RX_QNO_DFT */ + rxq = &dpmaif_ctrl->rxq[DPF_RX_QNO_DFT]; + if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) { + t7xx_dpmaif_bat_release_and_add(rxq); + t7xx_dpmaif_frag_bat_release_and_add(rxq); + } + + t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev); + pm_runtime_mark_last_busy(dpmaif_ctrl->dev); + pm_runtime_put_autosuspend(dpmaif_ctrl->dev); +} + +int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl) +{ + dpmaif_ctrl->bat_release_wq = alloc_workqueue("dpmaif_bat_release_work_queue", + WQ_MEM_RECLAIM, 1); + if (!dpmaif_ctrl->bat_release_wq) + return -ENOMEM; + + INIT_WORK(&dpmaif_ctrl->bat_release_work, t7xx_dpmaif_bat_release_work); + return 0; +} + +void t7xx_dpmaif_bat_wq_rel(struct dpmaif_ctrl *dpmaif_ctrl) +{ + flush_work(&dpmaif_ctrl->bat_release_work); + + if (dpmaif_ctrl->bat_release_wq) { + destroy_workqueue(dpmaif_ctrl->bat_release_wq); + dpmaif_ctrl->bat_release_wq = NULL; + } +} + +/** + * t7xx_dpmaif_rx_stop() - Suspend RX flow. + * @dpmaif_ctrl: Pointer to data path control struct dpmaif_ctrl. + * + * Wait for all the RX work to finish executing and mark the RX queue as paused. + */ +void t7xx_dpmaif_rx_stop(struct dpmaif_ctrl *dpmaif_ctrl) +{ + unsigned int i; + + for (i = 0; i < DPMAIF_RXQ_NUM; i++) { + struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[i]; + int timeout, value; + + flush_work(&rxq->dpmaif_rxq_work); + + timeout = readx_poll_timeout_atomic(atomic_read, &rxq->rx_processing, value, + !value, 0, DPMAIF_CHECK_INIT_TIMEOUT_US); + if (timeout) + dev_err(dpmaif_ctrl->dev, "Stop RX SW failed\n"); + + /* Ensure RX processing has stopped before we set rxq->que_started to false */ + smp_mb(); + rxq->que_started = false; + } +} + +static void t7xx_dpmaif_stop_rxq(struct dpmaif_rx_queue *rxq) +{ + int cnt, j = 0; + + flush_work(&rxq->dpmaif_rxq_work); + rxq->que_started = false; + + do { + cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx, + rxq->pit_wr_idx, DPMAIF_READ); + + if (++j >= DPMAIF_MAX_CHECK_COUNT) { + dev_err(rxq->dpmaif_ctrl->dev, "Stop RX SW failed, %d\n", cnt); + break; + } + } while (cnt); + + memset(rxq->pit_base, 0, rxq->pit_size_cnt * sizeof(struct dpmaif_pit)); + memset(rxq->bat_req->bat_base, 0, rxq->bat_req->bat_size_cnt * sizeof(struct dpmaif_bat)); + bitmap_zero(rxq->bat_req->bat_bitmap, rxq->bat_req->bat_size_cnt); + memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info)); + + rxq->pit_rd_idx = 0; + rxq->pit_wr_idx = 0; + rxq->pit_release_rd_idx = 0; + rxq->expect_pit_seq = 0; + rxq->pit_remain_release_cnt = 0; + rxq->bat_req->bat_release_rd_idx = 0; + rxq->bat_req->bat_wr_idx = 0; + rxq->bat_frag->bat_release_rd_idx = 0; + rxq->bat_frag->bat_wr_idx = 0; +} + +void t7xx_dpmaif_rx_clear(struct dpmaif_ctrl *dpmaif_ctrl) +{ + int i; + + for (i = 0; i < DPMAIF_RXQ_NUM; i++) + t7xx_dpmaif_stop_rxq(&dpmaif_ctrl->rxq[i]); +} diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h new file mode 100644 index 000000000..182f62dfe --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Eliot Lee + * Ricardo Martinez + * + * Contributors: + * Amir Hanania + * Moises Veleta + * Sreehari Kancharla + */ + +#ifndef __T7XX_HIF_DPMA_RX_H__ +#define __T7XX_HIF_DPMA_RX_H__ + +#include +#include + +#include "t7xx_hif_dpmaif.h" + +#define NETIF_MASK GENMASK(4, 0) + +#define PKT_TYPE_IP4 0 +#define PKT_TYPE_IP6 1 + +/* Structure of DL PIT */ +struct dpmaif_pit { + __le32 header; + union { + struct { + __le32 data_addr_l; + __le32 data_addr_h; + __le32 footer; + } pd; + struct { + __le32 params_1; + __le32 params_2; + __le32 params_3; + } msg; + }; +}; + +/* PIT header fields */ +#define PD_PIT_DATA_LEN GENMASK(31, 16) +#define PD_PIT_BUFFER_ID GENMASK(15, 3) +#define PD_PIT_BUFFER_TYPE BIT(2) +#define PD_PIT_CONT BIT(1) +#define PD_PIT_PACKET_TYPE BIT(0) +/* PIT footer fields */ +#define PD_PIT_DLQ_DONE GENMASK(31, 30) +#define PD_PIT_ULQ_DONE GENMASK(29, 24) +#define PD_PIT_HEADER_OFFSET GENMASK(23, 19) +#define PD_PIT_BI_F GENMASK(18, 17) +#define PD_PIT_IG BIT(16) +#define PD_PIT_RES GENMASK(15, 11) +#define PD_PIT_H_BID GENMASK(10, 8) +#define PD_PIT_PIT_SEQ GENMASK(7, 0) + +#define MSG_PIT_DP BIT(31) +#define MSG_PIT_RES GENMASK(30, 27) +#define MSG_PIT_NETWORK_TYPE GENMASK(26, 24) +#define MSG_PIT_CHANNEL_ID GENMASK(23, 16) +#define MSG_PIT_RES2 GENMASK(15, 12) +#define MSG_PIT_HPC_IDX GENMASK(11, 8) +#define MSG_PIT_SRC_QID GENMASK(7, 5) +#define MSG_PIT_ERROR_BIT BIT(4) +#define MSG_PIT_CHECKSUM GENMASK(3, 2) +#define MSG_PIT_CONT BIT(1) +#define MSG_PIT_PACKET_TYPE BIT(0) + +#define MSG_PIT_HP_IDX GENMASK(31, 27) +#define MSG_PIT_CMD GENMASK(26, 24) +#define MSG_PIT_RES3 GENMASK(23, 21) +#define MSG_PIT_FLOW GENMASK(20, 16) +#define MSG_PIT_COUNT GENMASK(15, 0) + +#define MSG_PIT_HASH GENMASK(31, 24) +#define MSG_PIT_RES4 GENMASK(23, 18) +#define MSG_PIT_PRO GENMASK(17, 16) +#define MSG_PIT_VBID GENMASK(15, 3) +#define MSG_PIT_RES5 GENMASK(2, 0) + +#define MSG_PIT_DLQ_DONE GENMASK(31, 30) +#define MSG_PIT_ULQ_DONE GENMASK(29, 24) +#define MSG_PIT_IP BIT(23) +#define MSG_PIT_RES6 BIT(22) +#define MSG_PIT_MR GENMASK(21, 20) +#define MSG_PIT_RES7 GENMASK(19, 17) +#define MSG_PIT_IG BIT(16) +#define MSG_PIT_RES8 GENMASK(15, 11) +#define MSG_PIT_H_BID GENMASK(10, 8) +#define MSG_PIT_PIT_SEQ GENMASK(7, 0) + +int t7xx_dpmaif_rxq_init(struct dpmaif_rx_queue *queue); +void t7xx_dpmaif_rx_clear(struct dpmaif_ctrl *dpmaif_ctrl); +int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl); +int t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl *dpmaif_ctrl, + const struct dpmaif_bat_request *bat_req, + const unsigned int q_num, const unsigned int buf_cnt, + const bool initial); +int t7xx_dpmaif_rx_frag_alloc(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req, + const unsigned int buf_cnt, const bool first_time); +void t7xx_dpmaif_rx_stop(struct dpmaif_ctrl *dpmaif_ctrl); +void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask); +void t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue *queue); +void t7xx_dpmaif_bat_wq_rel(struct dpmaif_ctrl *dpmaif_ctrl); +int t7xx_dpmaif_bat_alloc(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req, + const enum bat_type buf_type); +void t7xx_dpmaif_bat_free(const struct dpmaif_ctrl *dpmaif_ctrl, + struct dpmaif_bat_request *bat_req); + +#endif /* __T7XX_HIF_DPMA_RX_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c new file mode 100644 index 000000000..46514208d --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c @@ -0,0 +1,683 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Eliot Lee + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Chiranjeevi Rapolu + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_dpmaif.h" +#include "t7xx_hif_dpmaif.h" +#include "t7xx_hif_dpmaif_tx.h" +#include "t7xx_pci.h" + +#define DPMAIF_SKB_TX_BURST_CNT 5 +#define DPMAIF_DRB_LIST_LEN 6144 + +/* DRB dtype */ +#define DES_DTYP_PD 0 +#define DES_DTYP_MSG 1 + +static unsigned int t7xx_dpmaif_update_drb_rd_idx(struct dpmaif_ctrl *dpmaif_ctrl, + unsigned int q_num) +{ + struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num]; + unsigned int old_sw_rd_idx, new_hw_rd_idx, drb_cnt; + unsigned long flags; + + if (!txq->que_started) + return 0; + + old_sw_rd_idx = txq->drb_rd_idx; + new_hw_rd_idx = t7xx_dpmaif_ul_get_rd_idx(&dpmaif_ctrl->hw_info, q_num); + if (new_hw_rd_idx >= DPMAIF_DRB_LIST_LEN) { + dev_err(dpmaif_ctrl->dev, "Out of range read index: %u\n", new_hw_rd_idx); + return 0; + } + + if (old_sw_rd_idx <= new_hw_rd_idx) + drb_cnt = new_hw_rd_idx - old_sw_rd_idx; + else + drb_cnt = txq->drb_size_cnt - old_sw_rd_idx + new_hw_rd_idx; + + spin_lock_irqsave(&txq->tx_lock, flags); + txq->drb_rd_idx = new_hw_rd_idx; + spin_unlock_irqrestore(&txq->tx_lock, flags); + + return drb_cnt; +} + +static unsigned int t7xx_dpmaif_release_tx_buffer(struct dpmaif_ctrl *dpmaif_ctrl, + unsigned int q_num, unsigned int release_cnt) +{ + struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num]; + struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks; + struct dpmaif_drb_skb *cur_drb_skb, *drb_skb_base; + struct dpmaif_drb *cur_drb, *drb_base; + unsigned int drb_cnt, i, cur_idx; + unsigned long flags; + + drb_skb_base = txq->drb_skb_base; + drb_base = txq->drb_base; + + spin_lock_irqsave(&txq->tx_lock, flags); + drb_cnt = txq->drb_size_cnt; + cur_idx = txq->drb_release_rd_idx; + spin_unlock_irqrestore(&txq->tx_lock, flags); + + for (i = 0; i < release_cnt; i++) { + cur_drb = drb_base + cur_idx; + if (FIELD_GET(DRB_HDR_DTYP, le32_to_cpu(cur_drb->header)) == DES_DTYP_PD) { + cur_drb_skb = drb_skb_base + cur_idx; + if (!cur_drb_skb->is_msg) + dma_unmap_single(dpmaif_ctrl->dev, cur_drb_skb->bus_addr, + cur_drb_skb->data_len, DMA_TO_DEVICE); + + if (!FIELD_GET(DRB_HDR_CONT, le32_to_cpu(cur_drb->header))) { + if (!cur_drb_skb->skb) { + dev_err(dpmaif_ctrl->dev, + "txq%u: DRB check fail, invalid skb\n", q_num); + continue; + } + + dev_kfree_skb_any(cur_drb_skb->skb); + } + + cur_drb_skb->skb = NULL; + } + + spin_lock_irqsave(&txq->tx_lock, flags); + cur_idx = t7xx_ring_buf_get_next_wr_idx(drb_cnt, cur_idx); + txq->drb_release_rd_idx = cur_idx; + spin_unlock_irqrestore(&txq->tx_lock, flags); + + if (atomic_inc_return(&txq->tx_budget) > txq->drb_size_cnt / 8) + cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_IRQ, txq->index); + } + + if (FIELD_GET(DRB_HDR_CONT, le32_to_cpu(cur_drb->header))) + dev_err(dpmaif_ctrl->dev, "txq%u: DRB not marked as the last one\n", q_num); + + return i; +} + +static int t7xx_dpmaif_tx_release(struct dpmaif_ctrl *dpmaif_ctrl, + unsigned int q_num, unsigned int budget) +{ + struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num]; + unsigned int rel_cnt, real_rel_cnt; + + /* Update read index from HW */ + t7xx_dpmaif_update_drb_rd_idx(dpmaif_ctrl, q_num); + + rel_cnt = t7xx_ring_buf_rd_wr_count(txq->drb_size_cnt, txq->drb_release_rd_idx, + txq->drb_rd_idx, DPMAIF_READ); + + real_rel_cnt = min_not_zero(budget, rel_cnt); + if (real_rel_cnt) + real_rel_cnt = t7xx_dpmaif_release_tx_buffer(dpmaif_ctrl, q_num, real_rel_cnt); + + return real_rel_cnt < rel_cnt ? -EAGAIN : 0; +} + +static bool t7xx_dpmaif_drb_ring_not_empty(struct dpmaif_tx_queue *txq) +{ + return !!t7xx_dpmaif_update_drb_rd_idx(txq->dpmaif_ctrl, txq->index); +} + +static void t7xx_dpmaif_tx_done(struct work_struct *work) +{ + struct dpmaif_tx_queue *txq = container_of(work, struct dpmaif_tx_queue, dpmaif_tx_work); + struct dpmaif_ctrl *dpmaif_ctrl = txq->dpmaif_ctrl; + struct dpmaif_hw_info *hw_info; + int ret; + + ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev); + if (ret < 0 && ret != -EACCES) + return; + + /* The device may be in low power state. Disable sleep if needed */ + t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev); + if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) { + hw_info = &dpmaif_ctrl->hw_info; + ret = t7xx_dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt); + if (ret == -EAGAIN || + (t7xx_dpmaif_ul_clr_done(hw_info, txq->index) && + t7xx_dpmaif_drb_ring_not_empty(txq))) { + queue_work(dpmaif_ctrl->txq[txq->index].worker, + &dpmaif_ctrl->txq[txq->index].dpmaif_tx_work); + /* Give the device time to enter the low power state */ + t7xx_dpmaif_clr_ip_busy_sts(hw_info); + } else { + t7xx_dpmaif_clr_ip_busy_sts(hw_info); + t7xx_dpmaif_unmask_ulq_intr(hw_info, txq->index); + } + } + + t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev); + pm_runtime_mark_last_busy(dpmaif_ctrl->dev); + pm_runtime_put_autosuspend(dpmaif_ctrl->dev); +} + +static void t7xx_setup_msg_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num, + unsigned int cur_idx, unsigned int pkt_len, unsigned int count_l, + unsigned int channel_id) +{ + struct dpmaif_drb *drb_base = dpmaif_ctrl->txq[q_num].drb_base; + struct dpmaif_drb *drb = drb_base + cur_idx; + + drb->header = cpu_to_le32(FIELD_PREP(DRB_HDR_DTYP, DES_DTYP_MSG) | + FIELD_PREP(DRB_HDR_CONT, 1) | + FIELD_PREP(DRB_HDR_DATA_LEN, pkt_len)); + + drb->msg.msg_hdr = cpu_to_le32(FIELD_PREP(DRB_MSG_COUNT_L, count_l) | + FIELD_PREP(DRB_MSG_CHANNEL_ID, channel_id) | + FIELD_PREP(DRB_MSG_L4_CHK, 1)); +} + +static void t7xx_setup_payload_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num, + unsigned int cur_idx, dma_addr_t data_addr, + unsigned int pkt_size, bool last_one) +{ + struct dpmaif_drb *drb_base = dpmaif_ctrl->txq[q_num].drb_base; + struct dpmaif_drb *drb = drb_base + cur_idx; + u32 header; + + header = FIELD_PREP(DRB_HDR_DTYP, DES_DTYP_PD) | FIELD_PREP(DRB_HDR_DATA_LEN, pkt_size); + if (!last_one) + header |= FIELD_PREP(DRB_HDR_CONT, 1); + + drb->header = cpu_to_le32(header); + drb->pd.data_addr_l = cpu_to_le32(lower_32_bits(data_addr)); + drb->pd.data_addr_h = cpu_to_le32(upper_32_bits(data_addr)); +} + +static void t7xx_record_drb_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num, + unsigned int cur_idx, struct sk_buff *skb, bool is_msg, + bool is_frag, bool is_last_one, dma_addr_t bus_addr, + unsigned int data_len) +{ + struct dpmaif_drb_skb *drb_skb_base = dpmaif_ctrl->txq[q_num].drb_skb_base; + struct dpmaif_drb_skb *drb_skb = drb_skb_base + cur_idx; + + drb_skb->skb = skb; + drb_skb->bus_addr = bus_addr; + drb_skb->data_len = data_len; + drb_skb->index = cur_idx; + drb_skb->is_msg = is_msg; + drb_skb->is_frag = is_frag; + drb_skb->is_last = is_last_one; +} + +static int t7xx_dpmaif_add_skb_to_ring(struct dpmaif_ctrl *dpmaif_ctrl, struct sk_buff *skb) +{ + struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks; + unsigned int wr_cnt, send_cnt, payload_cnt; + unsigned int cur_idx, drb_wr_idx_backup; + struct skb_shared_info *shinfo; + struct dpmaif_tx_queue *txq; + struct t7xx_skb_cb *skb_cb; + unsigned long flags; + + skb_cb = T7XX_SKB_CB(skb); + txq = &dpmaif_ctrl->txq[skb_cb->txq_number]; + if (!txq->que_started || dpmaif_ctrl->state != DPMAIF_STATE_PWRON) + return -ENODEV; + + atomic_set(&txq->tx_processing, 1); + /* Ensure tx_processing is changed to 1 before actually begin TX flow */ + smp_mb(); + + shinfo = skb_shinfo(skb); + if (shinfo->frag_list) + dev_warn_ratelimited(dpmaif_ctrl->dev, "frag_list not supported\n"); + + payload_cnt = shinfo->nr_frags + 1; + /* nr_frags: frag cnt, 1: skb->data, 1: msg DRB */ + send_cnt = payload_cnt + 1; + + spin_lock_irqsave(&txq->tx_lock, flags); + cur_idx = txq->drb_wr_idx; + drb_wr_idx_backup = cur_idx; + txq->drb_wr_idx += send_cnt; + if (txq->drb_wr_idx >= txq->drb_size_cnt) + txq->drb_wr_idx -= txq->drb_size_cnt; + t7xx_setup_msg_drb(dpmaif_ctrl, txq->index, cur_idx, skb->len, 0, skb_cb->netif_idx); + t7xx_record_drb_skb(dpmaif_ctrl, txq->index, cur_idx, skb, true, 0, 0, 0, 0); + spin_unlock_irqrestore(&txq->tx_lock, flags); + + for (wr_cnt = 0; wr_cnt < payload_cnt; wr_cnt++) { + bool is_frag, is_last_one = wr_cnt == payload_cnt - 1; + unsigned int data_len; + dma_addr_t bus_addr; + void *data_addr; + + if (!wr_cnt) { + data_len = skb_headlen(skb); + data_addr = skb->data; + is_frag = false; + } else { + skb_frag_t *frag = shinfo->frags + wr_cnt - 1; + + data_len = skb_frag_size(frag); + data_addr = skb_frag_address(frag); + is_frag = true; + } + + bus_addr = dma_map_single(dpmaif_ctrl->dev, data_addr, data_len, DMA_TO_DEVICE); + if (dma_mapping_error(dpmaif_ctrl->dev, bus_addr)) + goto unmap_buffers; + + cur_idx = t7xx_ring_buf_get_next_wr_idx(txq->drb_size_cnt, cur_idx); + + spin_lock_irqsave(&txq->tx_lock, flags); + t7xx_setup_payload_drb(dpmaif_ctrl, txq->index, cur_idx, bus_addr, data_len, + is_last_one); + t7xx_record_drb_skb(dpmaif_ctrl, txq->index, cur_idx, skb, false, is_frag, + is_last_one, bus_addr, data_len); + spin_unlock_irqrestore(&txq->tx_lock, flags); + } + + if (atomic_sub_return(send_cnt, &txq->tx_budget) <= (MAX_SKB_FRAGS + 2)) + cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq->index); + + atomic_set(&txq->tx_processing, 0); + + return 0; + +unmap_buffers: + while (wr_cnt--) { + struct dpmaif_drb_skb *drb_skb = txq->drb_skb_base; + + cur_idx = cur_idx ? cur_idx - 1 : txq->drb_size_cnt - 1; + drb_skb += cur_idx; + dma_unmap_single(dpmaif_ctrl->dev, drb_skb->bus_addr, + drb_skb->data_len, DMA_TO_DEVICE); + } + + txq->drb_wr_idx = drb_wr_idx_backup; + atomic_set(&txq->tx_processing, 0); + + return -ENOMEM; +} + +static bool t7xx_tx_lists_are_all_empty(const struct dpmaif_ctrl *dpmaif_ctrl) +{ + int i; + + for (i = 0; i < DPMAIF_TXQ_NUM; i++) { + if (!skb_queue_empty(&dpmaif_ctrl->txq[i].tx_skb_head)) + return false; + } + + return true; +} + +/* Currently, only the default TX queue is used */ +static struct dpmaif_tx_queue *t7xx_select_tx_queue(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct dpmaif_tx_queue *txq; + + txq = &dpmaif_ctrl->txq[DPMAIF_TX_DEFAULT_QUEUE]; + if (!txq->que_started) + return NULL; + + return txq; +} + +static unsigned int t7xx_txq_drb_wr_available(struct dpmaif_tx_queue *txq) +{ + return t7xx_ring_buf_rd_wr_count(txq->drb_size_cnt, txq->drb_release_rd_idx, + txq->drb_wr_idx, DPMAIF_WRITE); +} + +static unsigned int t7xx_skb_drb_cnt(struct sk_buff *skb) +{ + /* Normal DRB (frags data + skb linear data) + msg DRB */ + return skb_shinfo(skb)->nr_frags + 2; +} + +static int t7xx_txq_burst_send_skb(struct dpmaif_tx_queue *txq) +{ + unsigned int drb_remain_cnt, i; + unsigned int send_drb_cnt; + int drb_cnt = 0; + int ret = 0; + + drb_remain_cnt = t7xx_txq_drb_wr_available(txq); + + for (i = 0; i < DPMAIF_SKB_TX_BURST_CNT; i++) { + struct sk_buff *skb; + + skb = skb_peek(&txq->tx_skb_head); + if (!skb) + break; + + send_drb_cnt = t7xx_skb_drb_cnt(skb); + if (drb_remain_cnt < send_drb_cnt) { + drb_remain_cnt = t7xx_txq_drb_wr_available(txq); + continue; + } + + drb_remain_cnt -= send_drb_cnt; + + ret = t7xx_dpmaif_add_skb_to_ring(txq->dpmaif_ctrl, skb); + if (ret < 0) { + dev_err(txq->dpmaif_ctrl->dev, + "Failed to add skb to device's ring: %d\n", ret); + break; + } + + drb_cnt += send_drb_cnt; + skb_unlink(skb, &txq->tx_skb_head); + } + + if (drb_cnt > 0) + return drb_cnt; + + return ret; +} + +static void t7xx_do_tx_hw_push(struct dpmaif_ctrl *dpmaif_ctrl) +{ + bool wait_disable_sleep = true; + + do { + struct dpmaif_tx_queue *txq; + int drb_send_cnt; + + txq = t7xx_select_tx_queue(dpmaif_ctrl); + if (!txq) + return; + + drb_send_cnt = t7xx_txq_burst_send_skb(txq); + if (drb_send_cnt <= 0) { + usleep_range(10, 20); + cond_resched(); + continue; + } + + /* Wait for the PCIe resource to unlock */ + if (wait_disable_sleep) { + if (!t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) + return; + + wait_disable_sleep = false; + } + + t7xx_dpmaif_ul_update_hw_drb_cnt(&dpmaif_ctrl->hw_info, txq->index, + drb_send_cnt * DPMAIF_UL_DRB_SIZE_WORD); + + cond_resched(); + } while (!t7xx_tx_lists_are_all_empty(dpmaif_ctrl) && !kthread_should_stop() && + (dpmaif_ctrl->state == DPMAIF_STATE_PWRON)); +} + +static int t7xx_dpmaif_tx_hw_push_thread(void *arg) +{ + struct dpmaif_ctrl *dpmaif_ctrl = arg; + int ret; + + while (!kthread_should_stop()) { + if (t7xx_tx_lists_are_all_empty(dpmaif_ctrl) || + dpmaif_ctrl->state != DPMAIF_STATE_PWRON) { + if (wait_event_interruptible(dpmaif_ctrl->tx_wq, + (!t7xx_tx_lists_are_all_empty(dpmaif_ctrl) && + dpmaif_ctrl->state == DPMAIF_STATE_PWRON) || + kthread_should_stop())) + continue; + + if (kthread_should_stop()) + break; + } + + ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev); + if (ret < 0 && ret != -EACCES) + return ret; + + t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev); + t7xx_do_tx_hw_push(dpmaif_ctrl); + t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev); + pm_runtime_mark_last_busy(dpmaif_ctrl->dev); + pm_runtime_put_autosuspend(dpmaif_ctrl->dev); + } + + return 0; +} + +int t7xx_dpmaif_tx_thread_init(struct dpmaif_ctrl *dpmaif_ctrl) +{ + init_waitqueue_head(&dpmaif_ctrl->tx_wq); + dpmaif_ctrl->tx_thread = kthread_run(t7xx_dpmaif_tx_hw_push_thread, + dpmaif_ctrl, "dpmaif_tx_hw_push"); + return PTR_ERR_OR_ZERO(dpmaif_ctrl->tx_thread); +} + +void t7xx_dpmaif_tx_thread_rel(struct dpmaif_ctrl *dpmaif_ctrl) +{ + if (dpmaif_ctrl->tx_thread) + kthread_stop(dpmaif_ctrl->tx_thread); +} + +/** + * t7xx_dpmaif_tx_send_skb() - Add skb to the transmit queue. + * @dpmaif_ctrl: Pointer to struct dpmaif_ctrl. + * @txq_number: Queue number to xmit on. + * @skb: Pointer to the skb to transmit. + * + * Add the skb to the queue of the skbs to be transmit. + * Wake up the thread that push the skbs from the queue to the HW. + * + * Return: + * * 0 - Success. + * * -EBUSY - Tx budget exhausted. + * In normal circumstances t7xx_dpmaif_add_skb_to_ring() must report the txq full + * state to prevent this error condition. + */ +int t7xx_dpmaif_tx_send_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int txq_number, + struct sk_buff *skb) +{ + struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[txq_number]; + struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks; + struct t7xx_skb_cb *skb_cb; + + if (atomic_read(&txq->tx_budget) <= t7xx_skb_drb_cnt(skb)) { + cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq_number); + return -EBUSY; + } + + skb_cb = T7XX_SKB_CB(skb); + skb_cb->txq_number = txq_number; + skb_queue_tail(&txq->tx_skb_head, skb); + wake_up(&dpmaif_ctrl->tx_wq); + + return 0; +} + +void t7xx_dpmaif_irq_tx_done(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int que_mask) +{ + int i; + + for (i = 0; i < DPMAIF_TXQ_NUM; i++) { + if (que_mask & BIT(i)) + queue_work(dpmaif_ctrl->txq[i].worker, &dpmaif_ctrl->txq[i].dpmaif_tx_work); + } +} + +static int t7xx_dpmaif_tx_drb_buf_init(struct dpmaif_tx_queue *txq) +{ + size_t brb_skb_size, brb_pd_size; + + brb_pd_size = DPMAIF_DRB_LIST_LEN * sizeof(struct dpmaif_drb); + brb_skb_size = DPMAIF_DRB_LIST_LEN * sizeof(struct dpmaif_drb_skb); + + txq->drb_size_cnt = DPMAIF_DRB_LIST_LEN; + + /* For HW && AP SW */ + txq->drb_base = dma_alloc_coherent(txq->dpmaif_ctrl->dev, brb_pd_size, + &txq->drb_bus_addr, GFP_KERNEL | __GFP_ZERO); + if (!txq->drb_base) + return -ENOMEM; + + /* For AP SW to record the skb information */ + txq->drb_skb_base = devm_kzalloc(txq->dpmaif_ctrl->dev, brb_skb_size, GFP_KERNEL); + if (!txq->drb_skb_base) { + dma_free_coherent(txq->dpmaif_ctrl->dev, brb_pd_size, + txq->drb_base, txq->drb_bus_addr); + return -ENOMEM; + } + + return 0; +} + +static void t7xx_dpmaif_tx_free_drb_skb(struct dpmaif_tx_queue *txq) +{ + struct dpmaif_drb_skb *drb_skb, *drb_skb_base = txq->drb_skb_base; + unsigned int i; + + if (!drb_skb_base) + return; + + for (i = 0; i < txq->drb_size_cnt; i++) { + drb_skb = drb_skb_base + i; + if (!drb_skb->skb) + continue; + + if (!drb_skb->is_msg) + dma_unmap_single(txq->dpmaif_ctrl->dev, drb_skb->bus_addr, + drb_skb->data_len, DMA_TO_DEVICE); + + if (drb_skb->is_last) { + dev_kfree_skb(drb_skb->skb); + drb_skb->skb = NULL; + } + } +} + +static void t7xx_dpmaif_tx_drb_buf_rel(struct dpmaif_tx_queue *txq) +{ + if (txq->drb_base) + dma_free_coherent(txq->dpmaif_ctrl->dev, + txq->drb_size_cnt * sizeof(struct dpmaif_drb), + txq->drb_base, txq->drb_bus_addr); + + t7xx_dpmaif_tx_free_drb_skb(txq); +} + +/** + * t7xx_dpmaif_txq_init() - Initialize TX queue. + * @txq: Pointer to struct dpmaif_tx_queue. + * + * Initialize the TX queue data structure and allocate memory for it to use. + * + * Return: + * * 0 - Success. + * * -ERROR - Error code from failure sub-initializations. + */ +int t7xx_dpmaif_txq_init(struct dpmaif_tx_queue *txq) +{ + int ret; + + skb_queue_head_init(&txq->tx_skb_head); + init_waitqueue_head(&txq->req_wq); + atomic_set(&txq->tx_budget, DPMAIF_DRB_LIST_LEN); + + ret = t7xx_dpmaif_tx_drb_buf_init(txq); + if (ret) { + dev_err(txq->dpmaif_ctrl->dev, "Failed to initialize DRB buffers: %d\n", ret); + return ret; + } + + txq->worker = alloc_workqueue("md_dpmaif_tx%d_worker", WQ_UNBOUND | WQ_MEM_RECLAIM | + (txq->index ? 0 : WQ_HIGHPRI), 1, txq->index); + if (!txq->worker) + return -ENOMEM; + + INIT_WORK(&txq->dpmaif_tx_work, t7xx_dpmaif_tx_done); + spin_lock_init(&txq->tx_lock); + + return 0; +} + +void t7xx_dpmaif_txq_free(struct dpmaif_tx_queue *txq) +{ + if (txq->worker) + destroy_workqueue(txq->worker); + + skb_queue_purge(&txq->tx_skb_head); + t7xx_dpmaif_tx_drb_buf_rel(txq); +} + +void t7xx_dpmaif_tx_stop(struct dpmaif_ctrl *dpmaif_ctrl) +{ + int i; + + for (i = 0; i < DPMAIF_TXQ_NUM; i++) { + struct dpmaif_tx_queue *txq; + int count = 0; + + txq = &dpmaif_ctrl->txq[i]; + txq->que_started = false; + /* Make sure TXQ is disabled */ + smp_mb(); + + /* Wait for active Tx to be done */ + while (atomic_read(&txq->tx_processing)) { + if (++count >= DPMAIF_MAX_CHECK_COUNT) { + dev_err(dpmaif_ctrl->dev, "TX queue stop failed\n"); + break; + } + } + } +} + +static void t7xx_dpmaif_txq_flush_rel(struct dpmaif_tx_queue *txq) +{ + txq->que_started = false; + + cancel_work_sync(&txq->dpmaif_tx_work); + flush_work(&txq->dpmaif_tx_work); + t7xx_dpmaif_tx_free_drb_skb(txq); + + txq->drb_rd_idx = 0; + txq->drb_wr_idx = 0; + txq->drb_release_rd_idx = 0; +} + +void t7xx_dpmaif_tx_clear(struct dpmaif_ctrl *dpmaif_ctrl) +{ + int i; + + for (i = 0; i < DPMAIF_TXQ_NUM; i++) + t7xx_dpmaif_txq_flush_rel(&dpmaif_ctrl->txq[i]); +} diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.h b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.h new file mode 100644 index 000000000..ca9b9ea2d --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Eliot Lee + * Ricardo Martinez + * + * Contributors: + * Amir Hanania + * Chiranjeevi Rapolu + * Moises Veleta + * Sreehari Kancharla + */ + +#ifndef __T7XX_HIF_DPMA_TX_H__ +#define __T7XX_HIF_DPMA_TX_H__ + +#include +#include +#include + +#include "t7xx_hif_dpmaif.h" + +#define DPMAIF_TX_DEFAULT_QUEUE 0 + +struct dpmaif_drb { + __le32 header; + union { + struct { + __le32 data_addr_l; + __le32 data_addr_h; + } pd; + struct { + __le32 msg_hdr; + __le32 reserved1; + } msg; + }; + __le32 reserved2; +}; + +/* Header fields */ +#define DRB_HDR_DATA_LEN GENMASK(31, 16) +#define DRB_HDR_RESERVED GENMASK(15, 3) +#define DRB_HDR_CONT BIT(2) +#define DRB_HDR_DTYP GENMASK(1, 0) + +#define DRB_MSG_DW2_RES GENMASK(31, 30) +#define DRB_MSG_L4_CHK BIT(29) +#define DRB_MSG_IP_CHK BIT(28) +#define DRB_MSG_RESERVED BIT(27) +#define DRB_MSG_NETWORK_TYPE GENMASK(26, 24) +#define DRB_MSG_CHANNEL_ID GENMASK(23, 16) +#define DRB_MSG_COUNT_L GENMASK(15, 0) + +struct dpmaif_drb_skb { + struct sk_buff *skb; + dma_addr_t bus_addr; + unsigned int data_len; + u16 index:13; + u16 is_msg:1; + u16 is_frag:1; + u16 is_last:1; +}; + +int t7xx_dpmaif_tx_send_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int txq_number, + struct sk_buff *skb); +void t7xx_dpmaif_tx_thread_rel(struct dpmaif_ctrl *dpmaif_ctrl); +int t7xx_dpmaif_tx_thread_init(struct dpmaif_ctrl *dpmaif_ctrl); +void t7xx_dpmaif_txq_free(struct dpmaif_tx_queue *txq); +void t7xx_dpmaif_irq_tx_done(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int que_mask); +int t7xx_dpmaif_txq_init(struct dpmaif_tx_queue *txq); +void t7xx_dpmaif_tx_stop(struct dpmaif_ctrl *dpmaif_ctrl); +void t7xx_dpmaif_tx_clear(struct dpmaif_ctrl *dpmaif_ctrl); + +#endif /* __T7XX_HIF_DPMA_TX_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_mhccif.c b/drivers/net/wwan/t7xx/t7xx_mhccif.c new file mode 100644 index 000000000..3ee18d46f --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_mhccif.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Sreehari Kancharla + * + * Contributors: + * Amir Hanania + * Ricardo Martinez + */ + +#include +#include +#include +#include +#include + +#include "t7xx_mhccif.h" +#include "t7xx_modem_ops.h" +#include "t7xx_pci.h" +#include "t7xx_pcie_mac.h" +#include "t7xx_reg.h" + +#define D2H_INT_SR_ACK (D2H_INT_SUSPEND_ACK | \ + D2H_INT_RESUME_ACK | \ + D2H_INT_SUSPEND_ACK_AP | \ + D2H_INT_RESUME_ACK_AP) + +static void t7xx_mhccif_clear_interrupts(struct t7xx_pci_dev *t7xx_dev, u32 mask) +{ + void __iomem *mhccif_pbase = t7xx_dev->base_addr.mhccif_rc_base; + + /* Clear level 2 interrupt */ + iowrite32(mask, mhccif_pbase + REG_EP2RC_SW_INT_ACK); + /* Ensure write is complete */ + t7xx_mhccif_read_sw_int_sts(t7xx_dev); + /* Clear level 1 interrupt */ + t7xx_pcie_mac_clear_int_status(t7xx_dev, MHCCIF_INT); +} + +static irqreturn_t t7xx_mhccif_isr_thread(int irq, void *data) +{ + struct t7xx_pci_dev *t7xx_dev = data; + u32 int_status, val; + + val = T7XX_L1_1_BIT(1) | T7XX_L1_2_BIT(1); + iowrite32(val, IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); + + int_status = t7xx_mhccif_read_sw_int_sts(t7xx_dev); + if (int_status & D2H_SW_INT_MASK) { + int ret = t7xx_pci_mhccif_isr(t7xx_dev); + + if (ret) + dev_err(&t7xx_dev->pdev->dev, "PCI MHCCIF ISR failure: %d", ret); + } + + t7xx_mhccif_clear_interrupts(t7xx_dev, int_status); + + if (int_status & D2H_INT_DS_LOCK_ACK) + complete_all(&t7xx_dev->sleep_lock_acquire); + + if (int_status & D2H_INT_SR_ACK) + complete(&t7xx_dev->pm_sr_ack); + + iowrite32(T7XX_L1_BIT(1), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); + + int_status = t7xx_mhccif_read_sw_int_sts(t7xx_dev); + if (!int_status) { + val = T7XX_L1_1_BIT(1) | T7XX_L1_2_BIT(1); + iowrite32(val, IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); + } + + t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT); + return IRQ_HANDLED; +} + +u32 t7xx_mhccif_read_sw_int_sts(struct t7xx_pci_dev *t7xx_dev) +{ + return ioread32(t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_STS); +} + +void t7xx_mhccif_mask_set(struct t7xx_pci_dev *t7xx_dev, u32 val) +{ + iowrite32(val, t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_EAP_MASK_SET); +} + +void t7xx_mhccif_mask_clr(struct t7xx_pci_dev *t7xx_dev, u32 val) +{ + iowrite32(val, t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_EAP_MASK_CLR); +} + +u32 t7xx_mhccif_mask_get(struct t7xx_pci_dev *t7xx_dev) +{ + return ioread32(t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_EAP_MASK); +} + +static irqreturn_t t7xx_mhccif_isr_handler(int irq, void *data) +{ + return IRQ_WAKE_THREAD; +} + +void t7xx_mhccif_init(struct t7xx_pci_dev *t7xx_dev) +{ + t7xx_dev->base_addr.mhccif_rc_base = t7xx_dev->base_addr.pcie_ext_reg_base + + MHCCIF_RC_DEV_BASE - + t7xx_dev->base_addr.pcie_dev_reg_trsl_addr; + + t7xx_dev->intr_handler[MHCCIF_INT] = t7xx_mhccif_isr_handler; + t7xx_dev->intr_thread[MHCCIF_INT] = t7xx_mhccif_isr_thread; + t7xx_dev->callback_param[MHCCIF_INT] = t7xx_dev; +} + +void t7xx_mhccif_h2d_swint_trigger(struct t7xx_pci_dev *t7xx_dev, u32 channel) +{ + void __iomem *mhccif_pbase = t7xx_dev->base_addr.mhccif_rc_base; + + iowrite32(BIT(channel), mhccif_pbase + REG_RC2EP_SW_BSY); + iowrite32(channel, mhccif_pbase + REG_RC2EP_SW_TCHNUM); +} diff --git a/drivers/net/wwan/t7xx/t7xx_mhccif.h b/drivers/net/wwan/t7xx/t7xx_mhccif.h new file mode 100644 index 000000000..209b386bc --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_mhccif.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Sreehari Kancharla + * + * Contributors: + * Amir Hanania + * Ricardo Martinez + */ + +#ifndef __T7XX_MHCCIF_H__ +#define __T7XX_MHCCIF_H__ + +#include + +#include "t7xx_pci.h" +#include "t7xx_reg.h" + +#define D2H_SW_INT_MASK (D2H_INT_EXCEPTION_INIT | \ + D2H_INT_EXCEPTION_INIT_DONE | \ + D2H_INT_EXCEPTION_CLEARQ_DONE | \ + D2H_INT_EXCEPTION_ALLQ_RESET | \ + D2H_INT_PORT_ENUM | \ + D2H_INT_ASYNC_MD_HK) + +void t7xx_mhccif_mask_set(struct t7xx_pci_dev *t7xx_dev, u32 val); +void t7xx_mhccif_mask_clr(struct t7xx_pci_dev *t7xx_dev, u32 val); +u32 t7xx_mhccif_mask_get(struct t7xx_pci_dev *t7xx_dev); +void t7xx_mhccif_init(struct t7xx_pci_dev *t7xx_dev); +u32 t7xx_mhccif_read_sw_int_sts(struct t7xx_pci_dev *t7xx_dev); +void t7xx_mhccif_h2d_swint_trigger(struct t7xx_pci_dev *t7xx_dev, u32 channel); + +#endif /*__T7XX_MHCCIF_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.c b/drivers/net/wwan/t7xx/t7xx_modem_ops.c new file mode 100644 index 000000000..7d0f5e4f0 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c @@ -0,0 +1,729 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Eliot Lee + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Amir Hanania + * Chiranjeevi Rapolu + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_cldma.h" +#include "t7xx_hif_cldma.h" +#include "t7xx_mhccif.h" +#include "t7xx_modem_ops.h" +#include "t7xx_netdev.h" +#include "t7xx_pci.h" +#include "t7xx_pcie_mac.h" +#include "t7xx_port.h" +#include "t7xx_port_proxy.h" +#include "t7xx_reg.h" +#include "t7xx_state_monitor.h" + +#define RT_ID_MD_PORT_ENUM 0 +/* Modem feature query identification code - "ICCC" */ +#define MD_FEATURE_QUERY_ID 0x49434343 + +#define FEATURE_VER GENMASK(7, 4) +#define FEATURE_MSK GENMASK(3, 0) + +#define RGU_RESET_DELAY_MS 10 +#define PORT_RESET_DELAY_MS 2000 +#define EX_HS_TIMEOUT_MS 5000 +#define EX_HS_POLL_DELAY_MS 10 + +enum mtk_feature_support_type { + MTK_FEATURE_DOES_NOT_EXIST, + MTK_FEATURE_NOT_SUPPORTED, + MTK_FEATURE_MUST_BE_SUPPORTED, +}; + +static unsigned int t7xx_get_interrupt_status(struct t7xx_pci_dev *t7xx_dev) +{ + return t7xx_mhccif_read_sw_int_sts(t7xx_dev) & D2H_SW_INT_MASK; +} + +/** + * t7xx_pci_mhccif_isr() - Process MHCCIF interrupts. + * @t7xx_dev: MTK device. + * + * Check the interrupt status and queue commands accordingly. + * + * Returns: + ** 0 - Success. + ** -EINVAL - Failure to get FSM control. + */ +int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev) +{ + struct t7xx_modem *md = t7xx_dev->md; + struct t7xx_fsm_ctl *ctl; + unsigned int int_sta; + int ret = 0; + u32 mask; + + ctl = md->fsm_ctl; + if (!ctl) { + dev_err_ratelimited(&t7xx_dev->pdev->dev, + "MHCCIF interrupt received before initializing MD monitor\n"); + return -EINVAL; + } + + spin_lock_bh(&md->exp_lock); + int_sta = t7xx_get_interrupt_status(t7xx_dev); + md->exp_id |= int_sta; + if (md->exp_id & D2H_INT_EXCEPTION_INIT) { + if (ctl->md_state == MD_STATE_INVALID || + ctl->md_state == MD_STATE_WAITING_FOR_HS1 || + ctl->md_state == MD_STATE_WAITING_FOR_HS2 || + ctl->md_state == MD_STATE_READY) { + md->exp_id &= ~D2H_INT_EXCEPTION_INIT; + ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_CCIF_EX); + } + } else if (md->exp_id & D2H_INT_PORT_ENUM) { + md->exp_id &= ~D2H_INT_PORT_ENUM; + + if (ctl->curr_state == FSM_STATE_INIT || ctl->curr_state == FSM_STATE_PRE_START || + ctl->curr_state == FSM_STATE_STOPPED) + ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_PORT_ENUM); + } else if (ctl->md_state == MD_STATE_WAITING_FOR_HS1) { + mask = t7xx_mhccif_mask_get(t7xx_dev); + if ((md->exp_id & D2H_INT_ASYNC_MD_HK) && !(mask & D2H_INT_ASYNC_MD_HK)) { + md->exp_id &= ~D2H_INT_ASYNC_MD_HK; + queue_work(md->handshake_wq, &md->handshake_work); + } + } + spin_unlock_bh(&md->exp_lock); + + return ret; +} + +static void t7xx_clr_device_irq_via_pcie(struct t7xx_pci_dev *t7xx_dev) +{ + struct t7xx_addr_base *pbase_addr = &t7xx_dev->base_addr; + void __iomem *reset_pcie_reg; + u32 val; + + reset_pcie_reg = pbase_addr->pcie_ext_reg_base + TOPRGU_CH_PCIE_IRQ_STA - + pbase_addr->pcie_dev_reg_trsl_addr; + val = ioread32(reset_pcie_reg); + iowrite32(val, reset_pcie_reg); +} + +void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev) +{ + /* Clear L2 */ + t7xx_clr_device_irq_via_pcie(t7xx_dev); + /* Clear L1 */ + t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT); +} + +static int t7xx_acpi_reset(struct t7xx_pci_dev *t7xx_dev, char *fn_name) +{ +#ifdef CONFIG_ACPI + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct device *dev = &t7xx_dev->pdev->dev; + acpi_status acpi_ret; + acpi_handle handle; + + handle = ACPI_HANDLE(dev); + if (!handle) { + dev_err(dev, "ACPI handle not found\n"); + return -EFAULT; + } + + if (!acpi_has_method(handle, fn_name)) { + dev_err(dev, "%s method not found\n", fn_name); + return -EFAULT; + } + + acpi_ret = acpi_evaluate_object(handle, fn_name, NULL, &buffer); + if (ACPI_FAILURE(acpi_ret)) { + dev_err(dev, "%s method fail: %s\n", fn_name, acpi_format_exception(acpi_ret)); + return -EFAULT; + } + + kfree(buffer.pointer); + +#endif + return 0; +} + +int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev) +{ + return t7xx_acpi_reset(t7xx_dev, "_RST"); +} + +static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev) +{ + u32 val; + + val = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS); + if (val & MISC_RESET_TYPE_PLDR) + t7xx_acpi_reset(t7xx_dev, "MRST._RST"); + else if (val & MISC_RESET_TYPE_FLDR) + t7xx_acpi_fldr_func(t7xx_dev); +} + +static irqreturn_t t7xx_rgu_isr_thread(int irq, void *data) +{ + struct t7xx_pci_dev *t7xx_dev = data; + + msleep(RGU_RESET_DELAY_MS); + t7xx_reset_device_via_pmic(t7xx_dev); + return IRQ_HANDLED; +} + +static irqreturn_t t7xx_rgu_isr_handler(int irq, void *data) +{ + struct t7xx_pci_dev *t7xx_dev = data; + struct t7xx_modem *modem; + + t7xx_clear_rgu_irq(t7xx_dev); + if (!t7xx_dev->rgu_pci_irq_en) + return IRQ_HANDLED; + + modem = t7xx_dev->md; + modem->rgu_irq_asserted = true; + t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT); + return IRQ_WAKE_THREAD; +} + +static void t7xx_pcie_register_rgu_isr(struct t7xx_pci_dev *t7xx_dev) +{ + /* Registers RGU callback ISR with PCIe driver */ + t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT); + t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT); + + t7xx_dev->intr_handler[SAP_RGU_INT] = t7xx_rgu_isr_handler; + t7xx_dev->intr_thread[SAP_RGU_INT] = t7xx_rgu_isr_thread; + t7xx_dev->callback_param[SAP_RGU_INT] = t7xx_dev; + t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); +} + +/** + * t7xx_cldma_exception() - CLDMA exception handler. + * @md_ctrl: modem control struct. + * @stage: exception stage. + * + * Part of the modem exception recovery. + * Stages are one after the other as describe below: + * HIF_EX_INIT: Disable and clear TXQ. + * HIF_EX_CLEARQ_DONE: Disable RX, flush TX/RX workqueues and clear RX. + * HIF_EX_ALLQ_RESET: HW is back in safe mode for re-initialization and restart. + */ + +/* Modem Exception Handshake Flow + * + * Modem HW Exception interrupt received + * (MD_IRQ_CCIF_EX) + * | + * +---------v--------+ + * | HIF_EX_INIT | : Disable and clear TXQ + * +------------------+ + * | + * +---------v--------+ + * | HIF_EX_INIT_DONE | : Wait for the init to be done + * +------------------+ + * | + * +---------v--------+ + * |HIF_EX_CLEARQ_DONE| : Disable and clear RXQ + * +------------------+ : Flush TX/RX workqueues + * | + * +---------v--------+ + * |HIF_EX_ALLQ_RESET | : Restart HW and CLDMA + * +------------------+ + */ +static void t7xx_cldma_exception(struct cldma_ctrl *md_ctrl, enum hif_ex_stage stage) +{ + switch (stage) { + case HIF_EX_INIT: + t7xx_cldma_stop_all_qs(md_ctrl, MTK_TX); + t7xx_cldma_clear_all_qs(md_ctrl, MTK_TX); + break; + + case HIF_EX_CLEARQ_DONE: + /* We do not want to get CLDMA IRQ when MD is + * resetting CLDMA after it got clearq_ack. + */ + t7xx_cldma_stop_all_qs(md_ctrl, MTK_RX); + t7xx_cldma_stop(md_ctrl); + + if (md_ctrl->hif_id == CLDMA_ID_MD) + t7xx_cldma_hw_reset(md_ctrl->t7xx_dev->base_addr.infracfg_ao_base); + + t7xx_cldma_clear_all_qs(md_ctrl, MTK_RX); + break; + + case HIF_EX_ALLQ_RESET: + t7xx_cldma_hw_init(&md_ctrl->hw_info); + t7xx_cldma_start(md_ctrl); + break; + + default: + break; + } +} + +static void t7xx_md_exception(struct t7xx_modem *md, enum hif_ex_stage stage) +{ + struct t7xx_pci_dev *t7xx_dev = md->t7xx_dev; + + if (stage == HIF_EX_CLEARQ_DONE) { + /* Give DHL time to flush data */ + msleep(PORT_RESET_DELAY_MS); + t7xx_port_proxy_reset(md->port_prox); + } + + t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage); + + if (stage == HIF_EX_INIT) + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_ACK); + else if (stage == HIF_EX_CLEARQ_DONE) + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_CLEARQ_ACK); +} + +static int t7xx_wait_hif_ex_hk_event(struct t7xx_modem *md, int event_id) +{ + unsigned int waited_time_ms = 0; + + do { + if (md->exp_id & event_id) + return 0; + + waited_time_ms += EX_HS_POLL_DELAY_MS; + msleep(EX_HS_POLL_DELAY_MS); + } while (waited_time_ms < EX_HS_TIMEOUT_MS); + + return -EFAULT; +} + +static void t7xx_md_sys_sw_init(struct t7xx_pci_dev *t7xx_dev) +{ + /* Register the MHCCIF ISR for MD exception, port enum and + * async handshake notifications. + */ + t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK); + t7xx_mhccif_mask_clr(t7xx_dev, D2H_INT_PORT_ENUM); + + /* Register RGU IRQ handler for sAP exception notification */ + t7xx_dev->rgu_pci_irq_en = true; + t7xx_pcie_register_rgu_isr(t7xx_dev); +} + +struct feature_query { + __le32 head_pattern; + u8 feature_set[FEATURE_COUNT]; + __le32 tail_pattern; +}; + +static void t7xx_prepare_host_rt_data_query(struct t7xx_sys_info *core) +{ + struct feature_query *ft_query; + struct sk_buff *skb; + + skb = t7xx_ctrl_alloc_skb(sizeof(*ft_query)); + if (!skb) + return; + + ft_query = skb_put(skb, sizeof(*ft_query)); + ft_query->head_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID); + memcpy(ft_query->feature_set, core->feature_set, FEATURE_COUNT); + ft_query->tail_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID); + + /* Send HS1 message to device */ + t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS1_MSG, 0); +} + +static int t7xx_prepare_device_rt_data(struct t7xx_sys_info *core, struct device *dev, + void *data) +{ + struct feature_query *md_feature = data; + struct mtk_runtime_feature *rt_feature; + unsigned int i, rt_data_len = 0; + struct sk_buff *skb; + + /* Parse MD runtime data query */ + if (le32_to_cpu(md_feature->head_pattern) != MD_FEATURE_QUERY_ID || + le32_to_cpu(md_feature->tail_pattern) != MD_FEATURE_QUERY_ID) { + dev_err(dev, "Invalid feature pattern: head 0x%x, tail 0x%x\n", + le32_to_cpu(md_feature->head_pattern), + le32_to_cpu(md_feature->tail_pattern)); + return -EINVAL; + } + + for (i = 0; i < FEATURE_COUNT; i++) { + if (FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]) != + MTK_FEATURE_MUST_BE_SUPPORTED) + rt_data_len += sizeof(*rt_feature); + } + + skb = t7xx_ctrl_alloc_skb(rt_data_len); + if (!skb) + return -ENOMEM; + + rt_feature = skb_put(skb, rt_data_len); + memset(rt_feature, 0, rt_data_len); + + /* Fill runtime feature */ + for (i = 0; i < FEATURE_COUNT; i++) { + u8 md_feature_mask = FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]); + + if (md_feature_mask == MTK_FEATURE_MUST_BE_SUPPORTED) + continue; + + rt_feature->feature_id = i; + if (md_feature_mask == MTK_FEATURE_DOES_NOT_EXIST) + rt_feature->support_info = md_feature->feature_set[i]; + + rt_feature++; + } + + /* Send HS3 message to device */ + t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS3_MSG, 0); + return 0; +} + +static int t7xx_parse_host_rt_data(struct t7xx_fsm_ctl *ctl, struct t7xx_sys_info *core, + struct device *dev, void *data, int data_length) +{ + enum mtk_feature_support_type ft_spt_st, ft_spt_cfg; + struct mtk_runtime_feature *rt_feature; + int i, offset; + + offset = sizeof(struct feature_query); + for (i = 0; i < FEATURE_COUNT && offset < data_length; i++) { + rt_feature = data + offset; + offset += sizeof(*rt_feature) + le32_to_cpu(rt_feature->data_len); + + ft_spt_cfg = FIELD_GET(FEATURE_MSK, core->feature_set[i]); + if (ft_spt_cfg != MTK_FEATURE_MUST_BE_SUPPORTED) + continue; + + ft_spt_st = FIELD_GET(FEATURE_MSK, rt_feature->support_info); + if (ft_spt_st != MTK_FEATURE_MUST_BE_SUPPORTED) + return -EINVAL; + + if (i == RT_ID_MD_PORT_ENUM) + t7xx_port_enum_msg_handler(ctl->md, rt_feature->data); + } + + return 0; +} + +static int t7xx_core_reset(struct t7xx_modem *md) +{ + struct device *dev = &md->t7xx_dev->pdev->dev; + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; + + md->core_md.ready = false; + + if (!ctl) { + dev_err(dev, "FSM is not initialized\n"); + return -EINVAL; + } + + if (md->core_md.handshake_ongoing) { + int ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0); + + if (ret) + return ret; + } + + md->core_md.handshake_ongoing = false; + return 0; +} + +static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_fsm_ctl *ctl, + enum t7xx_fsm_event_state event_id, + enum t7xx_fsm_event_state err_detect) +{ + struct t7xx_fsm_event *event = NULL, *event_next; + struct t7xx_sys_info *core_info = &md->core_md; + struct device *dev = &md->t7xx_dev->pdev->dev; + unsigned long flags; + int ret; + + t7xx_prepare_host_rt_data_query(core_info); + + while (!kthread_should_stop()) { + bool event_received = false; + + spin_lock_irqsave(&ctl->event_lock, flags); + list_for_each_entry_safe(event, event_next, &ctl->event_queue, entry) { + if (event->event_id == err_detect) { + list_del(&event->entry); + spin_unlock_irqrestore(&ctl->event_lock, flags); + dev_err(dev, "Core handshake error event received\n"); + goto err_free_event; + } else if (event->event_id == event_id) { + list_del(&event->entry); + event_received = true; + break; + } + } + spin_unlock_irqrestore(&ctl->event_lock, flags); + + if (event_received) + break; + + wait_event_interruptible(ctl->event_wq, !list_empty(&ctl->event_queue) || + kthread_should_stop()); + if (kthread_should_stop()) + goto err_free_event; + } + + if (!event || ctl->exp_flg) + goto err_free_event; + + ret = t7xx_parse_host_rt_data(ctl, core_info, dev, event->data, event->length); + if (ret) { + dev_err(dev, "Host failure parsing runtime data: %d\n", ret); + goto err_free_event; + } + + if (ctl->exp_flg) + goto err_free_event; + + ret = t7xx_prepare_device_rt_data(core_info, dev, event->data); + if (ret) { + dev_err(dev, "Device failure parsing runtime data: %d", ret); + goto err_free_event; + } + + core_info->ready = true; + core_info->handshake_ongoing = false; + wake_up(&ctl->async_hk_wq); +err_free_event: + kfree(event); +} + +static void t7xx_md_hk_wq(struct work_struct *work) +{ + struct t7xx_modem *md = container_of(work, struct t7xx_modem, handshake_work); + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; + + /* Clear the HS2 EXIT event appended in core_reset() */ + t7xx_fsm_clr_event(ctl, FSM_EVENT_MD_HS2_EXIT); + t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD]); + t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]); + t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2); + md->core_md.handshake_ongoing = true; + t7xx_core_hk_handler(md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT); +} + +void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id) +{ + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; + void __iomem *mhccif_base; + unsigned int int_sta; + unsigned long flags; + + switch (evt_id) { + case FSM_PRE_START: + t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM); + break; + + case FSM_START: + t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_PORT_ENUM); + + spin_lock_irqsave(&md->exp_lock, flags); + int_sta = t7xx_get_interrupt_status(md->t7xx_dev); + md->exp_id |= int_sta; + if (md->exp_id & D2H_INT_EXCEPTION_INIT) { + ctl->exp_flg = true; + md->exp_id &= ~D2H_INT_EXCEPTION_INIT; + md->exp_id &= ~D2H_INT_ASYNC_MD_HK; + } else if (ctl->exp_flg) { + md->exp_id &= ~D2H_INT_ASYNC_MD_HK; + } else if (md->exp_id & D2H_INT_ASYNC_MD_HK) { + queue_work(md->handshake_wq, &md->handshake_work); + md->exp_id &= ~D2H_INT_ASYNC_MD_HK; + mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base; + iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK); + t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); + } else { + t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); + } + spin_unlock_irqrestore(&md->exp_lock, flags); + + t7xx_mhccif_mask_clr(md->t7xx_dev, + D2H_INT_EXCEPTION_INIT | + D2H_INT_EXCEPTION_INIT_DONE | + D2H_INT_EXCEPTION_CLEARQ_DONE | + D2H_INT_EXCEPTION_ALLQ_RESET); + break; + + case FSM_READY: + t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); + break; + + default: + break; + } +} + +void t7xx_md_exception_handshake(struct t7xx_modem *md) +{ + struct device *dev = &md->t7xx_dev->pdev->dev; + int ret; + + t7xx_md_exception(md, HIF_EX_INIT); + ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_INIT_DONE); + if (ret) + dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_INIT_DONE); + + t7xx_md_exception(md, HIF_EX_INIT_DONE); + ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_CLEARQ_DONE); + if (ret) + dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_CLEARQ_DONE); + + t7xx_md_exception(md, HIF_EX_CLEARQ_DONE); + ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_ALLQ_RESET); + if (ret) + dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_ALLQ_RESET); + + t7xx_md_exception(md, HIF_EX_ALLQ_RESET); +} + +static struct t7xx_modem *t7xx_md_alloc(struct t7xx_pci_dev *t7xx_dev) +{ + struct device *dev = &t7xx_dev->pdev->dev; + struct t7xx_modem *md; + + md = devm_kzalloc(dev, sizeof(*md), GFP_KERNEL); + if (!md) + return NULL; + + md->t7xx_dev = t7xx_dev; + t7xx_dev->md = md; + spin_lock_init(&md->exp_lock); + md->handshake_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, + 0, "md_hk_wq"); + if (!md->handshake_wq) + return NULL; + + INIT_WORK(&md->handshake_work, t7xx_md_hk_wq); + md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK; + md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |= + FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED); + return md; +} + +int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev) +{ + struct t7xx_modem *md = t7xx_dev->md; + + md->md_init_finish = false; + md->exp_id = 0; + t7xx_fsm_reset(md); + t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]); + t7xx_port_proxy_reset(md->port_prox); + md->md_init_finish = true; + return t7xx_core_reset(md); +} + +/** + * t7xx_md_init() - Initialize modem. + * @t7xx_dev: MTK device. + * + * Allocate and initialize MD control block, and initialize data path. + * Register MHCCIF ISR and RGU ISR, and start the state machine. + * + * Return: + ** 0 - Success. + ** -ENOMEM - Allocation failure. + */ +int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev) +{ + struct t7xx_modem *md; + int ret; + + md = t7xx_md_alloc(t7xx_dev); + if (!md) + return -ENOMEM; + + ret = t7xx_cldma_alloc(CLDMA_ID_MD, t7xx_dev); + if (ret) + goto err_destroy_hswq; + + ret = t7xx_fsm_init(md); + if (ret) + goto err_destroy_hswq; + + ret = t7xx_ccmni_init(t7xx_dev); + if (ret) + goto err_uninit_fsm; + + ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_MD]); + if (ret) + goto err_uninit_ccmni; + + ret = t7xx_port_proxy_init(md); + if (ret) + goto err_uninit_md_cldma; + + ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0); + if (ret) /* fsm_uninit flushes cmd queue */ + goto err_uninit_proxy; + + t7xx_md_sys_sw_init(t7xx_dev); + md->md_init_finish = true; + return 0; + +err_uninit_proxy: + t7xx_port_proxy_uninit(md->port_prox); + +err_uninit_md_cldma: + t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); + +err_uninit_ccmni: + t7xx_ccmni_exit(t7xx_dev); + +err_uninit_fsm: + t7xx_fsm_uninit(md); + +err_destroy_hswq: + destroy_workqueue(md->handshake_wq); + dev_err(&t7xx_dev->pdev->dev, "Modem init failed\n"); + return ret; +} + +void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev) +{ + struct t7xx_modem *md = t7xx_dev->md; + + t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT); + + if (!md->md_init_finish) + return; + + t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION); + t7xx_port_proxy_uninit(md->port_prox); + t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); + t7xx_ccmni_exit(t7xx_dev); + t7xx_fsm_uninit(md); + destroy_workqueue(md->handshake_wq); +} diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.h b/drivers/net/wwan/t7xx/t7xx_modem_ops.h new file mode 100644 index 000000000..7469ed636 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Eliot Lee + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Amir Hanania + * Chiranjeevi Rapolu + * Sreehari Kancharla + */ + +#ifndef __T7XX_MODEM_OPS_H__ +#define __T7XX_MODEM_OPS_H__ + +#include +#include +#include + +#include "t7xx_hif_cldma.h" +#include "t7xx_pci.h" + +#define FEATURE_COUNT 64 + +/** + * enum hif_ex_stage - HIF exception handshake stages with the HW. + * @HIF_EX_INIT: Disable and clear TXQ. + * @HIF_EX_INIT_DONE: Polling for initialization to be done. + * @HIF_EX_CLEARQ_DONE: Disable RX, flush TX/RX workqueues and clear RX. + * @HIF_EX_ALLQ_RESET: HW is back in safe mode for re-initialization and restart. + */ +enum hif_ex_stage { + HIF_EX_INIT, + HIF_EX_INIT_DONE, + HIF_EX_CLEARQ_DONE, + HIF_EX_ALLQ_RESET, +}; + +struct mtk_runtime_feature { + u8 feature_id; + u8 support_info; + u8 reserved[2]; + __le32 data_len; + __le32 data[]; +}; + +enum md_event_id { + FSM_PRE_START, + FSM_START, + FSM_READY, +}; + +struct t7xx_sys_info { + bool ready; + bool handshake_ongoing; + u8 feature_set[FEATURE_COUNT]; + struct t7xx_port *ctl_port; +}; + +struct t7xx_modem { + struct cldma_ctrl *md_ctrl[CLDMA_NUM]; + struct t7xx_pci_dev *t7xx_dev; + struct t7xx_sys_info core_md; + bool md_init_finish; + bool rgu_irq_asserted; + struct workqueue_struct *handshake_wq; + struct work_struct handshake_work; + struct t7xx_fsm_ctl *fsm_ctl; + struct port_proxy *port_prox; + unsigned int exp_id; + spinlock_t exp_lock; /* Protects exception events */ +}; + +void t7xx_md_exception_handshake(struct t7xx_modem *md); +void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id); +int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev); +int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev); +void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev); +void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev); +int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev); +int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev); + +#endif /* __T7XX_MODEM_OPS_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.c b/drivers/net/wwan/t7xx/t7xx_netdev.c new file mode 100644 index 000000000..f71d3bc3b --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_netdev.c @@ -0,0 +1,423 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Chandrashekar Devegowda + * Haijun Liu + * Ricardo Martinez + * + * Contributors: + * Amir Hanania + * Andy Shevchenko + * Chiranjeevi Rapolu + * Eliot Lee + * Moises Veleta + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_hif_dpmaif_rx.h" +#include "t7xx_hif_dpmaif_tx.h" +#include "t7xx_netdev.h" +#include "t7xx_pci.h" +#include "t7xx_port_proxy.h" +#include "t7xx_state_monitor.h" + +#define IP_MUX_SESSION_DEFAULT 0 + +static int t7xx_ccmni_open(struct net_device *dev) +{ + struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); + + netif_carrier_on(dev); + netif_tx_start_all_queues(dev); + atomic_inc(&ccmni->usage); + return 0; +} + +static int t7xx_ccmni_close(struct net_device *dev) +{ + struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); + + atomic_dec(&ccmni->usage); + netif_carrier_off(dev); + netif_tx_disable(dev); + return 0; +} + +static int t7xx_ccmni_send_packet(struct t7xx_ccmni *ccmni, struct sk_buff *skb, + unsigned int txq_number) +{ + struct t7xx_ccmni_ctrl *ctlb = ccmni->ctlb; + struct t7xx_skb_cb *skb_cb = T7XX_SKB_CB(skb); + + skb_cb->netif_idx = ccmni->index; + + if (t7xx_dpmaif_tx_send_skb(ctlb->hif_ctrl, txq_number, skb)) + return NETDEV_TX_BUSY; + + return 0; +} + +static netdev_tx_t t7xx_ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); + int skb_len = skb->len; + + /* If MTU is changed or there is no headroom, drop the packet */ + if (skb->len > dev->mtu || skb_headroom(skb) < sizeof(struct ccci_header)) { + dev_kfree_skb(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + if (t7xx_ccmni_send_packet(ccmni, skb, DPMAIF_TX_DEFAULT_QUEUE)) + return NETDEV_TX_BUSY; + + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb_len; + + return NETDEV_TX_OK; +} + +static void t7xx_ccmni_tx_timeout(struct net_device *dev, unsigned int __always_unused txqueue) +{ + struct t7xx_ccmni *ccmni = netdev_priv(dev); + + dev->stats.tx_errors++; + + if (atomic_read(&ccmni->usage) > 0) + netif_tx_wake_all_queues(dev); +} + +static const struct net_device_ops ccmni_netdev_ops = { + .ndo_open = t7xx_ccmni_open, + .ndo_stop = t7xx_ccmni_close, + .ndo_start_xmit = t7xx_ccmni_start_xmit, + .ndo_tx_timeout = t7xx_ccmni_tx_timeout, +}; + +static void t7xx_ccmni_start(struct t7xx_ccmni_ctrl *ctlb) +{ + struct t7xx_ccmni *ccmni; + int i; + + for (i = 0; i < ctlb->nic_dev_num; i++) { + ccmni = ctlb->ccmni_inst[i]; + if (!ccmni) + continue; + + if (atomic_read(&ccmni->usage) > 0) { + netif_tx_start_all_queues(ccmni->dev); + netif_carrier_on(ccmni->dev); + } + } +} + +static void t7xx_ccmni_pre_stop(struct t7xx_ccmni_ctrl *ctlb) +{ + struct t7xx_ccmni *ccmni; + int i; + + for (i = 0; i < ctlb->nic_dev_num; i++) { + ccmni = ctlb->ccmni_inst[i]; + if (!ccmni) + continue; + + if (atomic_read(&ccmni->usage) > 0) + netif_tx_disable(ccmni->dev); + } +} + +static void t7xx_ccmni_post_stop(struct t7xx_ccmni_ctrl *ctlb) +{ + struct t7xx_ccmni *ccmni; + int i; + + for (i = 0; i < ctlb->nic_dev_num; i++) { + ccmni = ctlb->ccmni_inst[i]; + if (!ccmni) + continue; + + if (atomic_read(&ccmni->usage) > 0) + netif_carrier_off(ccmni->dev); + } +} + +static void t7xx_ccmni_wwan_setup(struct net_device *dev) +{ + dev->hard_header_len += sizeof(struct ccci_header); + + dev->mtu = ETH_DATA_LEN; + dev->max_mtu = CCMNI_MTU_MAX; + BUILD_BUG_ON(CCMNI_MTU_MAX > DPMAIF_HW_MTU_SIZE); + + dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; + dev->watchdog_timeo = CCMNI_NETDEV_WDT_TO; + + dev->flags = IFF_POINTOPOINT | IFF_NOARP; + + dev->features = NETIF_F_VLAN_CHALLENGED; + + dev->features |= NETIF_F_SG; + dev->hw_features |= NETIF_F_SG; + + dev->features |= NETIF_F_HW_CSUM; + dev->hw_features |= NETIF_F_HW_CSUM; + + dev->features |= NETIF_F_RXCSUM; + dev->hw_features |= NETIF_F_RXCSUM; + + dev->needs_free_netdev = true; + + dev->type = ARPHRD_NONE; + + dev->netdev_ops = &ccmni_netdev_ops; +} + +static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id, + struct netlink_ext_ack *extack) +{ + struct t7xx_ccmni_ctrl *ctlb = ctxt; + struct t7xx_ccmni *ccmni; + int ret; + + if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst)) + return -EINVAL; + + ccmni = wwan_netdev_drvpriv(dev); + ccmni->index = if_id; + ccmni->ctlb = ctlb; + ccmni->dev = dev; + atomic_set(&ccmni->usage, 0); + ctlb->ccmni_inst[if_id] = ccmni; + + ret = register_netdevice(dev); + if (ret) + return ret; + + netif_device_attach(dev); + return 0; +} + +static void t7xx_ccmni_wwan_dellink(void *ctxt, struct net_device *dev, struct list_head *head) +{ + struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); + struct t7xx_ccmni_ctrl *ctlb = ctxt; + u8 if_id = ccmni->index; + + if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst)) + return; + + if (WARN_ON(ctlb->ccmni_inst[if_id] != ccmni)) + return; + + unregister_netdevice(dev); +} + +static const struct wwan_ops ccmni_wwan_ops = { + .priv_size = sizeof(struct t7xx_ccmni), + .setup = t7xx_ccmni_wwan_setup, + .newlink = t7xx_ccmni_wwan_newlink, + .dellink = t7xx_ccmni_wwan_dellink, +}; + +static int t7xx_ccmni_register_wwan(struct t7xx_ccmni_ctrl *ctlb) +{ + struct device *dev = ctlb->hif_ctrl->dev; + int ret; + + if (ctlb->wwan_is_registered) + return 0; + + /* WWAN core will create a netdev for the default IP MUX channel */ + ret = wwan_register_ops(dev, &ccmni_wwan_ops, ctlb, IP_MUX_SESSION_DEFAULT); + if (ret < 0) { + dev_err(dev, "Unable to register WWAN ops, %d\n", ret); + return ret; + } + + ctlb->wwan_is_registered = true; + return 0; +} + +static int t7xx_ccmni_md_state_callback(enum md_state state, void *para) +{ + struct t7xx_ccmni_ctrl *ctlb = para; + struct device *dev; + int ret = 0; + + dev = ctlb->hif_ctrl->dev; + ctlb->md_sta = state; + + switch (state) { + case MD_STATE_READY: + ret = t7xx_ccmni_register_wwan(ctlb); + if (!ret) + t7xx_ccmni_start(ctlb); + break; + + case MD_STATE_EXCEPTION: + case MD_STATE_STOPPED: + t7xx_ccmni_pre_stop(ctlb); + + ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state); + if (ret < 0) + dev_err(dev, "DPMAIF md state callback err, state=%d\n", state); + + t7xx_ccmni_post_stop(ctlb); + break; + + case MD_STATE_WAITING_FOR_HS1: + case MD_STATE_WAITING_TO_STOP: + ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state); + if (ret < 0) + dev_err(dev, "DPMAIF md state callback err, state=%d\n", state); + + break; + + default: + break; + } + + return ret; +} + +static void init_md_status_notifier(struct t7xx_pci_dev *t7xx_dev) +{ + struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb; + struct t7xx_fsm_notifier *md_status_notifier; + + md_status_notifier = &ctlb->md_status_notify; + INIT_LIST_HEAD(&md_status_notifier->entry); + md_status_notifier->notifier_fn = t7xx_ccmni_md_state_callback; + md_status_notifier->data = ctlb; + + t7xx_fsm_notifier_register(t7xx_dev->md, md_status_notifier); +} + +static void t7xx_ccmni_recv_skb(struct t7xx_pci_dev *t7xx_dev, struct sk_buff *skb) +{ + struct t7xx_skb_cb *skb_cb; + struct net_device *net_dev; + struct t7xx_ccmni *ccmni; + int pkt_type, skb_len; + u8 netif_id; + + skb_cb = T7XX_SKB_CB(skb); + netif_id = skb_cb->netif_idx; + ccmni = t7xx_dev->ccmni_ctlb->ccmni_inst[netif_id]; + if (!ccmni) { + dev_kfree_skb(skb); + return; + } + + net_dev = ccmni->dev; + skb->dev = net_dev; + + pkt_type = skb_cb->rx_pkt_type; + if (pkt_type == PKT_TYPE_IP6) + skb->protocol = htons(ETH_P_IPV6); + else + skb->protocol = htons(ETH_P_IP); + + skb_len = skb->len; + netif_rx(skb); + net_dev->stats.rx_packets++; + net_dev->stats.rx_bytes += skb_len; +} + +static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno) +{ + struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0]; + struct netdev_queue *net_queue; + + if (netif_running(ccmni->dev) && atomic_read(&ccmni->usage) > 0) { + net_queue = netdev_get_tx_queue(ccmni->dev, qno); + if (netif_tx_queue_stopped(net_queue)) + netif_tx_wake_queue(net_queue); + } +} + +static void t7xx_ccmni_queue_tx_full_notify(struct t7xx_ccmni_ctrl *ctlb, int qno) +{ + struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0]; + struct netdev_queue *net_queue; + + if (atomic_read(&ccmni->usage) > 0) { + netdev_err(ccmni->dev, "TX queue %d is full\n", qno); + net_queue = netdev_get_tx_queue(ccmni->dev, qno); + netif_tx_stop_queue(net_queue); + } +} + +static void t7xx_ccmni_queue_state_notify(struct t7xx_pci_dev *t7xx_dev, + enum dpmaif_txq_state state, int qno) +{ + struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb; + + if (ctlb->md_sta != MD_STATE_READY) + return; + + if (!ctlb->ccmni_inst[0]) { + dev_warn(&t7xx_dev->pdev->dev, "No netdev registered yet\n"); + return; + } + + if (state == DMPAIF_TXQ_STATE_IRQ) + t7xx_ccmni_queue_tx_irq_notify(ctlb, qno); + else if (state == DMPAIF_TXQ_STATE_FULL) + t7xx_ccmni_queue_tx_full_notify(ctlb, qno); +} + +int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev) +{ + struct device *dev = &t7xx_dev->pdev->dev; + struct t7xx_ccmni_ctrl *ctlb; + + ctlb = devm_kzalloc(dev, sizeof(*ctlb), GFP_KERNEL); + if (!ctlb) + return -ENOMEM; + + t7xx_dev->ccmni_ctlb = ctlb; + ctlb->t7xx_dev = t7xx_dev; + ctlb->callbacks.state_notify = t7xx_ccmni_queue_state_notify; + ctlb->callbacks.recv_skb = t7xx_ccmni_recv_skb; + ctlb->nic_dev_num = NIC_DEV_DEFAULT; + + ctlb->hif_ctrl = t7xx_dpmaif_hif_init(t7xx_dev, &ctlb->callbacks); + if (!ctlb->hif_ctrl) + return -ENOMEM; + + init_md_status_notifier(t7xx_dev); + return 0; +} + +void t7xx_ccmni_exit(struct t7xx_pci_dev *t7xx_dev) +{ + struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb; + + t7xx_fsm_notifier_unregister(t7xx_dev->md, &ctlb->md_status_notify); + + if (ctlb->wwan_is_registered) { + wwan_unregister_ops(&t7xx_dev->pdev->dev); + ctlb->wwan_is_registered = false; + } + + t7xx_dpmaif_hif_exit(ctlb->hif_ctrl); +} diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.h b/drivers/net/wwan/t7xx/t7xx_netdev.h new file mode 100644 index 000000000..f5ad49ca1 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_netdev.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Moises Veleta + * + * Contributors: + * Amir Hanania + * Chiranjeevi Rapolu + * Ricardo Martinez + */ + +#ifndef __T7XX_NETDEV_H__ +#define __T7XX_NETDEV_H__ + +#include +#include +#include + +#include "t7xx_hif_dpmaif.h" +#include "t7xx_pci.h" +#include "t7xx_state_monitor.h" + +#define RXQ_NUM DPMAIF_RXQ_NUM +#define NIC_DEV_MAX 21 +#define NIC_DEV_DEFAULT 2 + +#define CCMNI_NETDEV_WDT_TO (1 * HZ) +#define CCMNI_MTU_MAX 3000 + +struct t7xx_ccmni { + u8 index; + atomic_t usage; + struct net_device *dev; + struct t7xx_ccmni_ctrl *ctlb; +}; + +struct t7xx_ccmni_ctrl { + struct t7xx_pci_dev *t7xx_dev; + struct dpmaif_ctrl *hif_ctrl; + struct t7xx_ccmni *ccmni_inst[NIC_DEV_MAX]; + struct dpmaif_callbacks callbacks; + unsigned int nic_dev_num; + unsigned int md_sta; + struct t7xx_fsm_notifier md_status_notify; + bool wwan_is_registered; +}; + +int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev); +void t7xx_ccmni_exit(struct t7xx_pci_dev *t7xx_dev); + +#endif /* __T7XX_NETDEV_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c new file mode 100644 index 000000000..91256e005 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_pci.c @@ -0,0 +1,781 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Ricardo Martinez + * Sreehari Kancharla + * + * Contributors: + * Amir Hanania + * Andy Shevchenko + * Chiranjeevi Rapolu + * Eliot Lee + * Moises Veleta + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_mhccif.h" +#include "t7xx_modem_ops.h" +#include "t7xx_pci.h" +#include "t7xx_pcie_mac.h" +#include "t7xx_reg.h" +#include "t7xx_state_monitor.h" + +#define T7XX_PCI_IREG_BASE 0 +#define T7XX_PCI_EREG_BASE 2 + +#define T7XX_INIT_TIMEOUT 20 +#define PM_SLEEP_DIS_TIMEOUT_MS 20 +#define PM_ACK_TIMEOUT_MS 1500 +#define PM_AUTOSUSPEND_MS 20000 +#define PM_RESOURCE_POLL_TIMEOUT_US 10000 +#define PM_RESOURCE_POLL_STEP_US 100 + +enum t7xx_pm_state { + MTK_PM_EXCEPTION, + MTK_PM_INIT, /* Device initialized, but handshake not completed */ + MTK_PM_SUSPENDED, + MTK_PM_RESUMED, +}; + +static void t7xx_dev_set_sleep_capability(struct t7xx_pci_dev *t7xx_dev, bool enable) +{ + void __iomem *ctrl_reg = IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_CTRL; + u32 value; + + value = ioread32(ctrl_reg); + + if (enable) + value &= ~T7XX_PCIE_MISC_MAC_SLEEP_DIS; + else + value |= T7XX_PCIE_MISC_MAC_SLEEP_DIS; + + iowrite32(value, ctrl_reg); +} + +static int t7xx_wait_pm_config(struct t7xx_pci_dev *t7xx_dev) +{ + int ret, val; + + ret = read_poll_timeout(ioread32, val, + (val & T7XX_PCIE_RESOURCE_STS_MSK) == T7XX_PCIE_RESOURCE_STS_MSK, + PM_RESOURCE_POLL_STEP_US, PM_RESOURCE_POLL_TIMEOUT_US, true, + IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS); + if (ret == -ETIMEDOUT) + dev_err(&t7xx_dev->pdev->dev, "PM configuration timed out\n"); + + return ret; +} + +static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev) +{ + struct pci_dev *pdev = t7xx_dev->pdev; + + INIT_LIST_HEAD(&t7xx_dev->md_pm_entities); + mutex_init(&t7xx_dev->md_pm_entity_mtx); + spin_lock_init(&t7xx_dev->md_pm_lock); + init_completion(&t7xx_dev->sleep_lock_acquire); + init_completion(&t7xx_dev->pm_sr_ack); + init_completion(&t7xx_dev->init_done); + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT); + + device_init_wakeup(&pdev->dev, true); + dev_pm_set_driver_flags(&pdev->dev, pdev->dev.power.driver_flags | + DPM_FLAG_NO_DIRECT_COMPLETE); + + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); + pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS); + pm_runtime_use_autosuspend(&pdev->dev); + + return t7xx_wait_pm_config(t7xx_dev); +} + +void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev) +{ + /* Enable the PCIe resource lock only after MD deep sleep is done */ + t7xx_mhccif_mask_clr(t7xx_dev, + D2H_INT_DS_LOCK_ACK | + D2H_INT_SUSPEND_ACK | + D2H_INT_RESUME_ACK | + D2H_INT_SUSPEND_ACK_AP | + D2H_INT_RESUME_ACK_AP); + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); + + pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev); + pm_runtime_allow(&t7xx_dev->pdev->dev); + pm_runtime_put_noidle(&t7xx_dev->pdev->dev); + complete_all(&t7xx_dev->init_done); +} + +static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev) +{ + /* The device is kept in FSM re-init flow + * so just roll back PM setting to the init setting. + */ + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT); + + pm_runtime_get_noresume(&t7xx_dev->pdev->dev); + + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); + return t7xx_wait_pm_config(t7xx_dev); +} + +void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev) +{ + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); + t7xx_wait_pm_config(t7xx_dev); + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_EXCEPTION); +} + +int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity) +{ + struct md_pm_entity *entity; + + mutex_lock(&t7xx_dev->md_pm_entity_mtx); + list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { + if (entity->id == pm_entity->id) { + mutex_unlock(&t7xx_dev->md_pm_entity_mtx); + return -EEXIST; + } + } + + list_add_tail(&pm_entity->entity, &t7xx_dev->md_pm_entities); + mutex_unlock(&t7xx_dev->md_pm_entity_mtx); + return 0; +} + +int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity) +{ + struct md_pm_entity *entity, *tmp_entity; + + mutex_lock(&t7xx_dev->md_pm_entity_mtx); + list_for_each_entry_safe(entity, tmp_entity, &t7xx_dev->md_pm_entities, entity) { + if (entity->id == pm_entity->id) { + list_del(&pm_entity->entity); + mutex_unlock(&t7xx_dev->md_pm_entity_mtx); + return 0; + } + } + + mutex_unlock(&t7xx_dev->md_pm_entity_mtx); + + return -ENXIO; +} + +int t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev *t7xx_dev) +{ + struct device *dev = &t7xx_dev->pdev->dev; + int ret; + + ret = wait_for_completion_timeout(&t7xx_dev->sleep_lock_acquire, + msecs_to_jiffies(PM_SLEEP_DIS_TIMEOUT_MS)); + if (!ret) + dev_err_ratelimited(dev, "Resource wait complete timed out\n"); + + return ret; +} + +/** + * t7xx_pci_disable_sleep() - Disable deep sleep capability. + * @t7xx_dev: MTK device. + * + * Lock the deep sleep capability, note that the device can still go into deep sleep + * state while device is in D0 state, from the host's point-of-view. + * + * If device is in deep sleep state, wake up the device and disable deep sleep capability. + */ +void t7xx_pci_disable_sleep(struct t7xx_pci_dev *t7xx_dev) +{ + unsigned long flags; + + spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags); + t7xx_dev->sleep_disable_count++; + if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED) + goto unlock_and_complete; + + if (t7xx_dev->sleep_disable_count == 1) { + u32 status; + + reinit_completion(&t7xx_dev->sleep_lock_acquire); + t7xx_dev_set_sleep_capability(t7xx_dev, false); + + status = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS); + if (status & T7XX_PCIE_RESOURCE_STS_MSK) + goto unlock_and_complete; + + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DS_LOCK); + } + spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags); + return; + +unlock_and_complete: + spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags); + complete_all(&t7xx_dev->sleep_lock_acquire); +} + +/** + * t7xx_pci_enable_sleep() - Enable deep sleep capability. + * @t7xx_dev: MTK device. + * + * After enabling deep sleep, device can enter into deep sleep state. + */ +void t7xx_pci_enable_sleep(struct t7xx_pci_dev *t7xx_dev) +{ + unsigned long flags; + + spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags); + t7xx_dev->sleep_disable_count--; + if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED) + goto unlock; + + if (t7xx_dev->sleep_disable_count == 0) + t7xx_dev_set_sleep_capability(t7xx_dev, true); + +unlock: + spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags); +} + +static int t7xx_send_pm_request(struct t7xx_pci_dev *t7xx_dev, u32 request) +{ + unsigned long wait_ret; + + reinit_completion(&t7xx_dev->pm_sr_ack); + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, request); + wait_ret = wait_for_completion_timeout(&t7xx_dev->pm_sr_ack, + msecs_to_jiffies(PM_ACK_TIMEOUT_MS)); + if (!wait_ret) + return -ETIMEDOUT; + + return 0; +} + +static int __t7xx_pci_pm_suspend(struct pci_dev *pdev) +{ + enum t7xx_pm_id entity_id = PM_ENTITY_ID_INVALID; + struct t7xx_pci_dev *t7xx_dev; + struct md_pm_entity *entity; + int ret; + + t7xx_dev = pci_get_drvdata(pdev); + if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) { + dev_err(&pdev->dev, "[PM] Exiting suspend, modem in invalid state\n"); + return -EFAULT; + } + + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); + ret = t7xx_wait_pm_config(t7xx_dev); + if (ret) { + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); + return ret; + } + + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED); + t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT); + t7xx_dev->rgu_pci_irq_en = false; + + list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { + if (!entity->suspend) + continue; + + ret = entity->suspend(t7xx_dev, entity->entity_param); + if (ret) { + entity_id = entity->id; + dev_err(&pdev->dev, "[PM] Suspend error: %d, id: %d\n", ret, entity_id); + goto abort_suspend; + } + } + + ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ); + if (ret) { + dev_err(&pdev->dev, "[PM] MD suspend error: %d\n", ret); + goto abort_suspend; + } + + ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ_AP); + if (ret) { + t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ); + dev_err(&pdev->dev, "[PM] SAP suspend error: %d\n", ret); + goto abort_suspend; + } + + list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { + if (entity->suspend_late) + entity->suspend_late(t7xx_dev, entity->entity_param); + } + + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); + return 0; + +abort_suspend: + list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { + if (entity_id == entity->id) + break; + + if (entity->resume) + entity->resume(t7xx_dev, entity->entity_param); + } + + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); + t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); + return ret; +} + +static void t7xx_pcie_interrupt_reinit(struct t7xx_pci_dev *t7xx_dev) +{ + t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM); + + /* Disable interrupt first and let the IPs enable them */ + iowrite32(MSIX_MSK_SET_ALL, IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_CLR_GRP0_0); + + /* Device disables PCIe interrupts during resume and + * following function will re-enable PCIe interrupts. + */ + t7xx_pcie_mac_interrupts_en(t7xx_dev); + t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT); +} + +static int t7xx_pcie_reinit(struct t7xx_pci_dev *t7xx_dev, bool is_d3) +{ + int ret; + + ret = pcim_enable_device(t7xx_dev->pdev); + if (ret) + return ret; + + t7xx_pcie_mac_atr_init(t7xx_dev); + t7xx_pcie_interrupt_reinit(t7xx_dev); + + if (is_d3) { + t7xx_mhccif_init(t7xx_dev); + return t7xx_pci_pm_reinit(t7xx_dev); + } + + return 0; +} + +static int t7xx_send_fsm_command(struct t7xx_pci_dev *t7xx_dev, u32 event) +{ + struct t7xx_fsm_ctl *fsm_ctl = t7xx_dev->md->fsm_ctl; + struct device *dev = &t7xx_dev->pdev->dev; + int ret = -EINVAL; + + switch (event) { + case FSM_CMD_STOP: + ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION); + break; + + case FSM_CMD_START: + t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT); + t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT); + t7xx_dev->rgu_pci_irq_en = true; + t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); + ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_START, 0); + break; + + default: + break; + } + + if (ret) + dev_err(dev, "Failure handling FSM command %u, %d\n", event, ret); + + return ret; +} + +static int __t7xx_pci_pm_resume(struct pci_dev *pdev, bool state_check) +{ + struct t7xx_pci_dev *t7xx_dev; + struct md_pm_entity *entity; + u32 prev_state; + int ret = 0; + + t7xx_dev = pci_get_drvdata(pdev); + if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) { + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); + return 0; + } + + t7xx_pcie_mac_interrupts_en(t7xx_dev); + prev_state = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_PM_RESUME_STATE); + + if (state_check) { + /* For D3/L3 resume, the device could boot so quickly that the + * initial value of the dummy register might be overwritten. + * Identify new boots if the ATR source address register is not initialized. + */ + u32 atr_reg_val = ioread32(IREG_BASE(t7xx_dev) + + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR); + if (prev_state == PM_RESUME_REG_STATE_L3 || + (prev_state == PM_RESUME_REG_STATE_INIT && + atr_reg_val == ATR_SRC_ADDR_INVALID)) { + ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP); + if (ret) + return ret; + + ret = t7xx_pcie_reinit(t7xx_dev, true); + if (ret) + return ret; + + t7xx_clear_rgu_irq(t7xx_dev); + return t7xx_send_fsm_command(t7xx_dev, FSM_CMD_START); + } + + if (prev_state == PM_RESUME_REG_STATE_EXP || + prev_state == PM_RESUME_REG_STATE_L2_EXP) { + if (prev_state == PM_RESUME_REG_STATE_L2_EXP) { + ret = t7xx_pcie_reinit(t7xx_dev, false); + if (ret) + return ret; + } + + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED); + t7xx_dev->rgu_pci_irq_en = true; + t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); + + t7xx_mhccif_mask_clr(t7xx_dev, + D2H_INT_EXCEPTION_INIT | + D2H_INT_EXCEPTION_INIT_DONE | + D2H_INT_EXCEPTION_CLEARQ_DONE | + D2H_INT_EXCEPTION_ALLQ_RESET | + D2H_INT_PORT_ENUM); + + return ret; + } + + if (prev_state == PM_RESUME_REG_STATE_L2) { + ret = t7xx_pcie_reinit(t7xx_dev, false); + if (ret) + return ret; + + } else if (prev_state != PM_RESUME_REG_STATE_L1 && + prev_state != PM_RESUME_REG_STATE_INIT) { + ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP); + if (ret) + return ret; + + t7xx_clear_rgu_irq(t7xx_dev); + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED); + return 0; + } + } + + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); + t7xx_wait_pm_config(t7xx_dev); + + list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { + if (entity->resume_early) + entity->resume_early(t7xx_dev, entity->entity_param); + } + + ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ); + if (ret) + dev_err(&pdev->dev, "[PM] MD resume error: %d\n", ret); + + ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ_AP); + if (ret) + dev_err(&pdev->dev, "[PM] SAP resume error: %d\n", ret); + + list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { + if (entity->resume) { + ret = entity->resume(t7xx_dev, entity->entity_param); + if (ret) + dev_err(&pdev->dev, "[PM] Resume entry ID: %d error: %d\n", + entity->id, ret); + } + } + + t7xx_dev->rgu_pci_irq_en = true; + t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); + pm_runtime_mark_last_busy(&pdev->dev); + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); + + return ret; +} + +static int t7xx_pci_pm_resume_noirq(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct t7xx_pci_dev *t7xx_dev; + + t7xx_dev = pci_get_drvdata(pdev); + t7xx_pcie_mac_interrupts_dis(t7xx_dev); + + return 0; +} + +static void t7xx_pci_shutdown(struct pci_dev *pdev) +{ + __t7xx_pci_pm_suspend(pdev); +} + +static int t7xx_pci_pm_prepare(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct t7xx_pci_dev *t7xx_dev; + + t7xx_dev = pci_get_drvdata(pdev); + if (!wait_for_completion_timeout(&t7xx_dev->init_done, T7XX_INIT_TIMEOUT * HZ)) { + dev_warn(dev, "Not ready for system sleep.\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int t7xx_pci_pm_suspend(struct device *dev) +{ + return __t7xx_pci_pm_suspend(to_pci_dev(dev)); +} + +static int t7xx_pci_pm_resume(struct device *dev) +{ + return __t7xx_pci_pm_resume(to_pci_dev(dev), true); +} + +static int t7xx_pci_pm_thaw(struct device *dev) +{ + return __t7xx_pci_pm_resume(to_pci_dev(dev), false); +} + +static int t7xx_pci_pm_runtime_suspend(struct device *dev) +{ + return __t7xx_pci_pm_suspend(to_pci_dev(dev)); +} + +static int t7xx_pci_pm_runtime_resume(struct device *dev) +{ + return __t7xx_pci_pm_resume(to_pci_dev(dev), true); +} + +static const struct dev_pm_ops t7xx_pci_pm_ops = { + .prepare = t7xx_pci_pm_prepare, + .suspend = t7xx_pci_pm_suspend, + .resume = t7xx_pci_pm_resume, + .resume_noirq = t7xx_pci_pm_resume_noirq, + .freeze = t7xx_pci_pm_suspend, + .thaw = t7xx_pci_pm_thaw, + .poweroff = t7xx_pci_pm_suspend, + .restore = t7xx_pci_pm_resume, + .restore_noirq = t7xx_pci_pm_resume_noirq, + .runtime_suspend = t7xx_pci_pm_runtime_suspend, + .runtime_resume = t7xx_pci_pm_runtime_resume +}; + +static int t7xx_request_irq(struct pci_dev *pdev) +{ + struct t7xx_pci_dev *t7xx_dev; + int ret = 0, i; + + t7xx_dev = pci_get_drvdata(pdev); + + for (i = 0; i < EXT_INT_NUM; i++) { + const char *irq_descr; + int irq_vec; + + if (!t7xx_dev->intr_handler[i]) + continue; + + irq_descr = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d", + dev_driver_string(&pdev->dev), i); + if (!irq_descr) { + ret = -ENOMEM; + break; + } + + irq_vec = pci_irq_vector(pdev, i); + ret = request_threaded_irq(irq_vec, t7xx_dev->intr_handler[i], + t7xx_dev->intr_thread[i], 0, irq_descr, + t7xx_dev->callback_param[i]); + if (ret) { + dev_err(&pdev->dev, "Failed to request IRQ: %d\n", ret); + break; + } + } + + if (ret) { + while (i--) { + if (!t7xx_dev->intr_handler[i]) + continue; + + free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]); + } + } + + return ret; +} + +static int t7xx_setup_msix(struct t7xx_pci_dev *t7xx_dev) +{ + struct pci_dev *pdev = t7xx_dev->pdev; + int ret; + + /* Only using 6 interrupts, but HW-design requires power-of-2 IRQs allocation */ + ret = pci_alloc_irq_vectors(pdev, EXT_INT_NUM, EXT_INT_NUM, PCI_IRQ_MSIX); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to allocate MSI-X entry: %d\n", ret); + return ret; + } + + ret = t7xx_request_irq(pdev); + if (ret) { + pci_free_irq_vectors(pdev); + return ret; + } + + t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM); + return 0; +} + +static int t7xx_interrupt_init(struct t7xx_pci_dev *t7xx_dev) +{ + int ret, i; + + if (!t7xx_dev->pdev->msix_cap) + return -EINVAL; + + ret = t7xx_setup_msix(t7xx_dev); + if (ret) + return ret; + + /* IPs enable interrupts when ready */ + for (i = 0; i < EXT_INT_NUM; i++) + t7xx_pcie_mac_set_int(t7xx_dev, i); + + return 0; +} + +static void t7xx_pci_infracfg_ao_calc(struct t7xx_pci_dev *t7xx_dev) +{ + t7xx_dev->base_addr.infracfg_ao_base = t7xx_dev->base_addr.pcie_ext_reg_base + + INFRACFG_AO_DEV_CHIP - + t7xx_dev->base_addr.pcie_dev_reg_trsl_addr; +} + +static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct t7xx_pci_dev *t7xx_dev; + int ret; + + t7xx_dev = devm_kzalloc(&pdev->dev, sizeof(*t7xx_dev), GFP_KERNEL); + if (!t7xx_dev) + return -ENOMEM; + + pci_set_drvdata(pdev, t7xx_dev); + t7xx_dev->pdev = pdev; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + pci_set_master(pdev); + + ret = pcim_iomap_regions(pdev, BIT(T7XX_PCI_IREG_BASE) | BIT(T7XX_PCI_EREG_BASE), + pci_name(pdev)); + if (ret) { + dev_err(&pdev->dev, "Could not request BARs: %d\n", ret); + return -ENOMEM; + } + + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) { + dev_err(&pdev->dev, "Could not set PCI DMA mask: %d\n", ret); + return ret; + } + + ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) { + dev_err(&pdev->dev, "Could not set consistent PCI DMA mask: %d\n", ret); + return ret; + } + + IREG_BASE(t7xx_dev) = pcim_iomap_table(pdev)[T7XX_PCI_IREG_BASE]; + t7xx_dev->base_addr.pcie_ext_reg_base = pcim_iomap_table(pdev)[T7XX_PCI_EREG_BASE]; + + ret = t7xx_pci_pm_init(t7xx_dev); + if (ret) + return ret; + + t7xx_pcie_mac_atr_init(t7xx_dev); + t7xx_pci_infracfg_ao_calc(t7xx_dev); + t7xx_mhccif_init(t7xx_dev); + + ret = t7xx_md_init(t7xx_dev); + if (ret) + return ret; + + t7xx_pcie_mac_interrupts_dis(t7xx_dev); + + ret = t7xx_interrupt_init(t7xx_dev); + if (ret) { + t7xx_md_exit(t7xx_dev); + return ret; + } + + t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT); + t7xx_pcie_mac_interrupts_en(t7xx_dev); + + return 0; +} + +static void t7xx_pci_remove(struct pci_dev *pdev) +{ + struct t7xx_pci_dev *t7xx_dev; + int i; + + t7xx_dev = pci_get_drvdata(pdev); + t7xx_md_exit(t7xx_dev); + + for (i = 0; i < EXT_INT_NUM; i++) { + if (!t7xx_dev->intr_handler[i]) + continue; + + free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]); + } + + pci_free_irq_vectors(t7xx_dev->pdev); +} + +static const struct pci_device_id t7xx_pci_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x4d75) }, + { } +}; +MODULE_DEVICE_TABLE(pci, t7xx_pci_table); + +static struct pci_driver t7xx_pci_driver = { + .name = "mtk_t7xx", + .id_table = t7xx_pci_table, + .probe = t7xx_pci_probe, + .remove = t7xx_pci_remove, + .driver.pm = &t7xx_pci_pm_ops, + .shutdown = t7xx_pci_shutdown, +}; + +module_pci_driver(t7xx_pci_driver); + +MODULE_AUTHOR("MediaTek Inc"); +MODULE_DESCRIPTION("MediaTek PCIe 5G WWAN modem T7xx driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wwan/t7xx/t7xx_pci.h b/drivers/net/wwan/t7xx/t7xx_pci.h new file mode 100644 index 000000000..5dffe24ef --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_pci.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Ricardo Martinez + * Sreehari Kancharla + * + * Contributors: + * Amir Hanania + * Chiranjeevi Rapolu + * Moises Veleta + */ + +#ifndef __T7XX_PCI_H__ +#define __T7XX_PCI_H__ + +#include +#include +#include +#include +#include +#include + +#include "t7xx_reg.h" + +/* struct t7xx_addr_base - holds base addresses + * @pcie_mac_ireg_base: PCIe MAC register base + * @pcie_ext_reg_base: used to calculate base addresses for CLDMA, DPMA and MHCCIF registers + * @pcie_dev_reg_trsl_addr: used to calculate the register base address + * @infracfg_ao_base: base address used in CLDMA reset operations + * @mhccif_rc_base: host view of MHCCIF rc base addr + */ +struct t7xx_addr_base { + void __iomem *pcie_mac_ireg_base; + void __iomem *pcie_ext_reg_base; + u32 pcie_dev_reg_trsl_addr; + void __iomem *infracfg_ao_base; + void __iomem *mhccif_rc_base; +}; + +typedef irqreturn_t (*t7xx_intr_callback)(int irq, void *param); + +/* struct t7xx_pci_dev - MTK device context structure + * @intr_handler: array of handler function for request_threaded_irq + * @intr_thread: array of thread_fn for request_threaded_irq + * @callback_param: array of cookie passed back to interrupt functions + * @pdev: PCI device + * @base_addr: memory base addresses of HW components + * @md: modem interface + * @ccmni_ctlb: context structure used to control the network data path + * @rgu_pci_irq_en: RGU callback ISR registered and active + * @md_pm_entities: list of pm entities + * @md_pm_entity_mtx: protects md_pm_entities list + * @pm_sr_ack: ack from the device when went to sleep or woke up + * @md_pm_state: state for resume/suspend + * @md_pm_lock: protects PCIe sleep lock + * @sleep_disable_count: PCIe L1.2 lock counter + * @sleep_lock_acquire: indicates that sleep has been disabled + */ +struct t7xx_pci_dev { + t7xx_intr_callback intr_handler[EXT_INT_NUM]; + t7xx_intr_callback intr_thread[EXT_INT_NUM]; + void *callback_param[EXT_INT_NUM]; + struct pci_dev *pdev; + struct t7xx_addr_base base_addr; + struct t7xx_modem *md; + struct t7xx_ccmni_ctrl *ccmni_ctlb; + bool rgu_pci_irq_en; + struct completion init_done; + + /* Low Power Items */ + struct list_head md_pm_entities; + struct mutex md_pm_entity_mtx; /* Protects MD PM entities list */ + struct completion pm_sr_ack; + atomic_t md_pm_state; + spinlock_t md_pm_lock; /* Protects PCI resource lock */ + unsigned int sleep_disable_count; + struct completion sleep_lock_acquire; +}; + +enum t7xx_pm_id { + PM_ENTITY_ID_CTRL1, + PM_ENTITY_ID_CTRL2, + PM_ENTITY_ID_DATA, + PM_ENTITY_ID_INVALID +}; + +/* struct md_pm_entity - device power management entity + * @entity: list of PM Entities + * @suspend: callback invoked before sending D3 request to device + * @suspend_late: callback invoked after getting D3 ACK from device + * @resume_early: callback invoked before sending the resume request to device + * @resume: callback invoked after getting resume ACK from device + * @id: unique PM entity identifier + * @entity_param: parameter passed to the registered callbacks + * + * This structure is used to indicate PM operations required by internal + * HW modules such as CLDMA and DPMA. + */ +struct md_pm_entity { + struct list_head entity; + int (*suspend)(struct t7xx_pci_dev *t7xx_dev, void *entity_param); + void (*suspend_late)(struct t7xx_pci_dev *t7xx_dev, void *entity_param); + void (*resume_early)(struct t7xx_pci_dev *t7xx_dev, void *entity_param); + int (*resume)(struct t7xx_pci_dev *t7xx_dev, void *entity_param); + enum t7xx_pm_id id; + void *entity_param; +}; + +void t7xx_pci_disable_sleep(struct t7xx_pci_dev *t7xx_dev); +void t7xx_pci_enable_sleep(struct t7xx_pci_dev *t7xx_dev); +int t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev *t7xx_dev); +int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity); +int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity); +void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev); +void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev); + +#endif /* __T7XX_PCI_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_pcie_mac.c b/drivers/net/wwan/t7xx/t7xx_pcie_mac.c new file mode 100644 index 000000000..76da4c15e --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_pcie_mac.c @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Moises Veleta + * Sreehari Kancharla + * + * Contributors: + * Amir Hanania + * Chiranjeevi Rapolu + * Ricardo Martinez + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_pci.h" +#include "t7xx_pcie_mac.h" +#include "t7xx_reg.h" + +#define T7XX_PCIE_REG_BAR 2 +#define T7XX_PCIE_REG_PORT ATR_SRC_PCI_WIN0 +#define T7XX_PCIE_REG_TABLE_NUM 0 +#define T7XX_PCIE_REG_TRSL_PORT ATR_DST_AXIM_0 + +#define T7XX_PCIE_DEV_DMA_PORT_START ATR_SRC_AXIS_0 +#define T7XX_PCIE_DEV_DMA_PORT_END ATR_SRC_AXIS_2 +#define T7XX_PCIE_DEV_DMA_TABLE_NUM 0 +#define T7XX_PCIE_DEV_DMA_TRSL_ADDR 0 +#define T7XX_PCIE_DEV_DMA_SRC_ADDR 0 +#define T7XX_PCIE_DEV_DMA_TRANSPARENT 1 +#define T7XX_PCIE_DEV_DMA_SIZE 0 + +enum t7xx_atr_src_port { + ATR_SRC_PCI_WIN0, + ATR_SRC_PCI_WIN1, + ATR_SRC_AXIS_0, + ATR_SRC_AXIS_1, + ATR_SRC_AXIS_2, + ATR_SRC_AXIS_3, +}; + +enum t7xx_atr_dst_port { + ATR_DST_PCI_TRX, + ATR_DST_PCI_CONFIG, + ATR_DST_AXIM_0 = 4, + ATR_DST_AXIM_1, + ATR_DST_AXIM_2, + ATR_DST_AXIM_3, +}; + +struct t7xx_atr_config { + u64 src_addr; + u64 trsl_addr; + u64 size; + u32 port; + u32 table; + enum t7xx_atr_dst_port trsl_id; + u32 transparent; +}; + +static void t7xx_pcie_mac_atr_tables_dis(void __iomem *pbase, enum t7xx_atr_src_port port) +{ + void __iomem *reg; + int i, offset; + + for (i = 0; i < ATR_TABLE_NUM_PER_ATR; i++) { + offset = ATR_PORT_OFFSET * port + ATR_TABLE_OFFSET * i; + reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset; + iowrite64(0, reg); + } +} + +static int t7xx_pcie_mac_atr_cfg(struct t7xx_pci_dev *t7xx_dev, struct t7xx_atr_config *cfg) +{ + struct device *dev = &t7xx_dev->pdev->dev; + void __iomem *pbase = IREG_BASE(t7xx_dev); + int atr_size, pos, offset; + void __iomem *reg; + u64 value; + + if (cfg->transparent) { + /* No address conversion is performed */ + atr_size = ATR_TRANSPARENT_SIZE; + } else { + if (cfg->src_addr & (cfg->size - 1)) { + dev_err(dev, "Source address is not aligned to size\n"); + return -EINVAL; + } + + if (cfg->trsl_addr & (cfg->size - 1)) { + dev_err(dev, "Translation address %llx is not aligned to size %llx\n", + cfg->trsl_addr, cfg->size - 1); + return -EINVAL; + } + + pos = __ffs64(cfg->size); + + /* HW calculates the address translation space as 2^(atr_size + 1) */ + atr_size = pos - 1; + } + + offset = ATR_PORT_OFFSET * cfg->port + ATR_TABLE_OFFSET * cfg->table; + + reg = pbase + ATR_PCIE_WIN0_T0_TRSL_ADDR + offset; + value = cfg->trsl_addr & ATR_PCIE_WIN0_ADDR_ALGMT; + iowrite64(value, reg); + + reg = pbase + ATR_PCIE_WIN0_T0_TRSL_PARAM + offset; + iowrite32(cfg->trsl_id, reg); + + reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset; + value = (cfg->src_addr & ATR_PCIE_WIN0_ADDR_ALGMT) | (atr_size << 1) | BIT(0); + iowrite64(value, reg); + + /* Ensure ATR is set */ + ioread64(reg); + return 0; +} + +/** + * t7xx_pcie_mac_atr_init() - Initialize address translation. + * @t7xx_dev: MTK device. + * + * Setup ATR for ports & device. + */ +void t7xx_pcie_mac_atr_init(struct t7xx_pci_dev *t7xx_dev) +{ + struct t7xx_atr_config cfg; + u32 i; + + /* Disable for all ports */ + for (i = ATR_SRC_PCI_WIN0; i <= ATR_SRC_AXIS_3; i++) + t7xx_pcie_mac_atr_tables_dis(IREG_BASE(t7xx_dev), i); + + memset(&cfg, 0, sizeof(cfg)); + /* Config ATR for RC to access device's register */ + cfg.src_addr = pci_resource_start(t7xx_dev->pdev, T7XX_PCIE_REG_BAR); + cfg.size = T7XX_PCIE_REG_SIZE_CHIP; + cfg.trsl_addr = T7XX_PCIE_REG_TRSL_ADDR_CHIP; + cfg.port = T7XX_PCIE_REG_PORT; + cfg.table = T7XX_PCIE_REG_TABLE_NUM; + cfg.trsl_id = T7XX_PCIE_REG_TRSL_PORT; + t7xx_pcie_mac_atr_tables_dis(IREG_BASE(t7xx_dev), cfg.port); + t7xx_pcie_mac_atr_cfg(t7xx_dev, &cfg); + + t7xx_dev->base_addr.pcie_dev_reg_trsl_addr = T7XX_PCIE_REG_TRSL_ADDR_CHIP; + + /* Config ATR for EP to access RC's memory */ + for (i = T7XX_PCIE_DEV_DMA_PORT_START; i <= T7XX_PCIE_DEV_DMA_PORT_END; i++) { + cfg.src_addr = T7XX_PCIE_DEV_DMA_SRC_ADDR; + cfg.size = T7XX_PCIE_DEV_DMA_SIZE; + cfg.trsl_addr = T7XX_PCIE_DEV_DMA_TRSL_ADDR; + cfg.port = i; + cfg.table = T7XX_PCIE_DEV_DMA_TABLE_NUM; + cfg.trsl_id = ATR_DST_PCI_TRX; + cfg.transparent = T7XX_PCIE_DEV_DMA_TRANSPARENT; + t7xx_pcie_mac_atr_tables_dis(IREG_BASE(t7xx_dev), cfg.port); + t7xx_pcie_mac_atr_cfg(t7xx_dev, &cfg); + } +} + +/** + * t7xx_pcie_mac_enable_disable_int() - Enable/disable interrupts. + * @t7xx_dev: MTK device. + * @enable: Enable/disable. + * + * Enable or disable device interrupts. + */ +static void t7xx_pcie_mac_enable_disable_int(struct t7xx_pci_dev *t7xx_dev, bool enable) +{ + u32 value; + + value = ioread32(IREG_BASE(t7xx_dev) + ISTAT_HST_CTRL); + + if (enable) + value &= ~ISTAT_HST_CTRL_DIS; + else + value |= ISTAT_HST_CTRL_DIS; + + iowrite32(value, IREG_BASE(t7xx_dev) + ISTAT_HST_CTRL); +} + +void t7xx_pcie_mac_interrupts_en(struct t7xx_pci_dev *t7xx_dev) +{ + t7xx_pcie_mac_enable_disable_int(t7xx_dev, true); +} + +void t7xx_pcie_mac_interrupts_dis(struct t7xx_pci_dev *t7xx_dev) +{ + t7xx_pcie_mac_enable_disable_int(t7xx_dev, false); +} + +/** + * t7xx_pcie_mac_clear_set_int() - Clear/set interrupt by type. + * @t7xx_dev: MTK device. + * @int_type: Interrupt type. + * @clear: Clear/set. + * + * Clear or set device interrupt by type. + */ +static void t7xx_pcie_mac_clear_set_int(struct t7xx_pci_dev *t7xx_dev, + enum t7xx_int int_type, bool clear) +{ + void __iomem *reg; + u32 val; + + if (clear) + reg = IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_CLR_GRP0_0; + else + reg = IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_SET_GRP0_0; + + val = BIT(EXT_INT_START + int_type); + iowrite32(val, reg); +} + +void t7xx_pcie_mac_clear_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type) +{ + t7xx_pcie_mac_clear_set_int(t7xx_dev, int_type, true); +} + +void t7xx_pcie_mac_set_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type) +{ + t7xx_pcie_mac_clear_set_int(t7xx_dev, int_type, false); +} + +/** + * t7xx_pcie_mac_clear_int_status() - Clear interrupt status by type. + * @t7xx_dev: MTK device. + * @int_type: Interrupt type. + * + * Enable or disable device interrupts' status by type. + */ +void t7xx_pcie_mac_clear_int_status(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type) +{ + void __iomem *reg = IREG_BASE(t7xx_dev) + MSIX_ISTAT_HST_GRP0_0; + u32 val = BIT(EXT_INT_START + int_type); + + iowrite32(val, reg); +} + +/** + * t7xx_pcie_set_mac_msix_cfg() - Write MSIX control configuration. + * @t7xx_dev: MTK device. + * @irq_count: Number of MSIX IRQ vectors. + * + * Write IRQ count to device. + */ +void t7xx_pcie_set_mac_msix_cfg(struct t7xx_pci_dev *t7xx_dev, unsigned int irq_count) +{ + u32 val = ffs(irq_count) * 2 - 1; + + iowrite32(val, IREG_BASE(t7xx_dev) + T7XX_PCIE_CFG_MSIX); +} diff --git a/drivers/net/wwan/t7xx/t7xx_pcie_mac.h b/drivers/net/wwan/t7xx/t7xx_pcie_mac.h new file mode 100644 index 000000000..50336fa7e --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_pcie_mac.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Sreehari Kancharla + * + * Contributors: + * Moises Veleta + * Ricardo Martinez + */ + +#ifndef __T7XX_PCIE_MAC_H__ +#define __T7XX_PCIE_MAC_H__ + +#include "t7xx_pci.h" +#include "t7xx_reg.h" + +#define IREG_BASE(t7xx_dev) ((t7xx_dev)->base_addr.pcie_mac_ireg_base) + +void t7xx_pcie_mac_interrupts_en(struct t7xx_pci_dev *t7xx_dev); +void t7xx_pcie_mac_interrupts_dis(struct t7xx_pci_dev *t7xx_dev); +void t7xx_pcie_mac_atr_init(struct t7xx_pci_dev *t7xx_dev); +void t7xx_pcie_mac_clear_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type); +void t7xx_pcie_mac_set_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type); +void t7xx_pcie_mac_clear_int_status(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type); +void t7xx_pcie_set_mac_msix_cfg(struct t7xx_pci_dev *t7xx_dev, unsigned int irq_count); + +#endif /* __T7XX_PCIE_MAC_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_port.h b/drivers/net/wwan/t7xx/t7xx_port.h new file mode 100644 index 000000000..dc4133eb4 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_port.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Amir Hanania + * Andy Shevchenko + * Chandrashekar Devegowda + * Eliot Lee + */ + +#ifndef __T7XX_PORT_H__ +#define __T7XX_PORT_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_hif_cldma.h" +#include "t7xx_pci.h" + +#define PORT_CH_ID_MASK GENMASK(7, 0) + +/* Channel ID and Message ID definitions. + * The channel number consists of peer_id(15:12) , channel_id(11:0) + * peer_id: + * 0:reserved, 1: to sAP, 2: to MD + */ +enum port_ch { + /* to MD */ + PORT_CH_CONTROL_RX = 0x2000, + PORT_CH_CONTROL_TX = 0x2001, + PORT_CH_UART1_RX = 0x2006, /* META */ + PORT_CH_UART1_TX = 0x2008, + PORT_CH_UART2_RX = 0x200a, /* AT */ + PORT_CH_UART2_TX = 0x200c, + PORT_CH_MD_LOG_RX = 0x202a, /* MD logging */ + PORT_CH_MD_LOG_TX = 0x202b, + PORT_CH_LB_IT_RX = 0x203e, /* Loop back test */ + PORT_CH_LB_IT_TX = 0x203f, + PORT_CH_STATUS_RX = 0x2043, /* Status events */ + PORT_CH_MIPC_RX = 0x20ce, /* MIPC */ + PORT_CH_MIPC_TX = 0x20cf, + PORT_CH_MBIM_RX = 0x20d0, + PORT_CH_MBIM_TX = 0x20d1, + PORT_CH_DSS0_RX = 0x20d2, + PORT_CH_DSS0_TX = 0x20d3, + PORT_CH_DSS1_RX = 0x20d4, + PORT_CH_DSS1_TX = 0x20d5, + PORT_CH_DSS2_RX = 0x20d6, + PORT_CH_DSS2_TX = 0x20d7, + PORT_CH_DSS3_RX = 0x20d8, + PORT_CH_DSS3_TX = 0x20d9, + PORT_CH_DSS4_RX = 0x20da, + PORT_CH_DSS4_TX = 0x20db, + PORT_CH_DSS5_RX = 0x20dc, + PORT_CH_DSS5_TX = 0x20dd, + PORT_CH_DSS6_RX = 0x20de, + PORT_CH_DSS6_TX = 0x20df, + PORT_CH_DSS7_RX = 0x20e0, + PORT_CH_DSS7_TX = 0x20e1, +}; + +struct t7xx_port; +struct port_ops { + int (*init)(struct t7xx_port *port); + int (*recv_skb)(struct t7xx_port *port, struct sk_buff *skb); + void (*md_state_notify)(struct t7xx_port *port, unsigned int md_state); + void (*uninit)(struct t7xx_port *port); + int (*enable_chl)(struct t7xx_port *port); + int (*disable_chl)(struct t7xx_port *port); +}; + +struct t7xx_port_conf { + enum port_ch tx_ch; + enum port_ch rx_ch; + unsigned char txq_index; + unsigned char rxq_index; + unsigned char txq_exp_index; + unsigned char rxq_exp_index; + enum cldma_id path_id; + struct port_ops *ops; + char *name; + enum wwan_port_type port_type; +}; + +struct t7xx_port { + /* Members not initialized in definition */ + const struct t7xx_port_conf *port_conf; + struct wwan_port *wwan_port; + struct t7xx_pci_dev *t7xx_dev; + struct device *dev; + u16 seq_nums[2]; /* TX/RX sequence numbers */ + atomic_t usage_cnt; + struct list_head entry; + struct list_head queue_entry; + /* TX and RX flows are asymmetric since ports are multiplexed on + * queues. + * + * TX: data blocks are sent directly to a queue. Each port + * does not maintain a TX list; instead, they only provide + * a wait_queue_head for blocking writes. + * + * RX: Each port uses a RX list to hold packets, + * allowing the modem to dispatch RX packet as quickly as possible. + */ + struct sk_buff_head rx_skb_list; + spinlock_t port_update_lock; /* Protects port configuration */ + wait_queue_head_t rx_wq; + int rx_length_th; + bool chan_enable; + struct task_struct *thread; +}; + +struct sk_buff *t7xx_port_alloc_skb(int payload); +struct sk_buff *t7xx_ctrl_alloc_skb(int payload); +int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb); +int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header, + unsigned int ex_msg); +int t7xx_port_send_ctl_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int msg, + unsigned int ex_msg); + +#endif /* __T7XX_PORT_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c b/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c new file mode 100644 index 000000000..68430b130 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Ricardo Martinez + * Moises Veleta + * + * Contributors: + * Amir Hanania + * Chiranjeevi Rapolu + * Eliot Lee + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_port.h" +#include "t7xx_port_proxy.h" +#include "t7xx_state_monitor.h" + +#define PORT_MSG_VERSION GENMASK(31, 16) +#define PORT_MSG_PRT_CNT GENMASK(15, 0) + +struct port_msg { + __le32 head_pattern; + __le32 info; + __le32 tail_pattern; + __le32 data[]; +}; + +static int port_ctl_send_msg_to_md(struct t7xx_port *port, unsigned int msg, unsigned int ex_msg) +{ + struct sk_buff *skb; + int ret; + + skb = t7xx_ctrl_alloc_skb(0); + if (!skb) + return -ENOMEM; + + ret = t7xx_port_send_ctl_skb(port, skb, msg, ex_msg); + if (ret) + dev_kfree_skb_any(skb); + + return ret; +} + +static int fsm_ee_message_handler(struct t7xx_port *port, struct t7xx_fsm_ctl *ctl, + struct sk_buff *skb) +{ + struct ctrl_msg_header *ctrl_msg_h = (struct ctrl_msg_header *)skb->data; + struct device *dev = &ctl->md->t7xx_dev->pdev->dev; + enum md_state md_state; + int ret = -EINVAL; + + md_state = t7xx_fsm_get_md_state(ctl); + if (md_state != MD_STATE_EXCEPTION) { + dev_err(dev, "Receive invalid MD_EX %x when MD state is %d\n", + ctrl_msg_h->ex_msg, md_state); + return -EINVAL; + } + + switch (le32_to_cpu(ctrl_msg_h->ctrl_msg_id)) { + case CTL_ID_MD_EX: + if (le32_to_cpu(ctrl_msg_h->ex_msg) != MD_EX_CHK_ID) { + dev_err(dev, "Receive invalid MD_EX %x\n", ctrl_msg_h->ex_msg); + break; + } + + ret = port_ctl_send_msg_to_md(port, CTL_ID_MD_EX, MD_EX_CHK_ID); + if (ret) { + dev_err(dev, "Failed to send exception message to modem\n"); + break; + } + + ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_EX, NULL, 0); + if (ret) + dev_err(dev, "Failed to append Modem Exception event"); + + break; + + case CTL_ID_MD_EX_ACK: + if (le32_to_cpu(ctrl_msg_h->ex_msg) != MD_EX_CHK_ACK_ID) { + dev_err(dev, "Receive invalid MD_EX_ACK %x\n", ctrl_msg_h->ex_msg); + break; + } + + ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_EX_REC_OK, NULL, 0); + if (ret) + dev_err(dev, "Failed to append Modem Exception Received event"); + + break; + + case CTL_ID_MD_EX_PASS: + ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_EX_PASS, NULL, 0); + if (ret) + dev_err(dev, "Failed to append Modem Exception Passed event"); + + break; + + case CTL_ID_DRV_VER_ERROR: + dev_err(dev, "AP/MD driver version mismatch\n"); + } + + return ret; +} + +/** + * t7xx_port_enum_msg_handler() - Parse the port enumeration message to create/remove nodes. + * @md: Modem context. + * @msg: Message. + * + * Used to control create/remove device node. + * + * Return: + * * 0 - Success. + * * -EFAULT - Message check failure. + */ +int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg) +{ + struct device *dev = &md->t7xx_dev->pdev->dev; + unsigned int version, port_count, i; + struct port_msg *port_msg = msg; + + version = FIELD_GET(PORT_MSG_VERSION, le32_to_cpu(port_msg->info)); + if (version != PORT_ENUM_VER || + le32_to_cpu(port_msg->head_pattern) != PORT_ENUM_HEAD_PATTERN || + le32_to_cpu(port_msg->tail_pattern) != PORT_ENUM_TAIL_PATTERN) { + dev_err(dev, "Invalid port control message %x:%x:%x\n", + version, le32_to_cpu(port_msg->head_pattern), + le32_to_cpu(port_msg->tail_pattern)); + return -EFAULT; + } + + port_count = FIELD_GET(PORT_MSG_PRT_CNT, le32_to_cpu(port_msg->info)); + for (i = 0; i < port_count; i++) { + u32 port_info = le32_to_cpu(port_msg->data[i]); + unsigned int ch_id; + bool en_flag; + + ch_id = FIELD_GET(PORT_INFO_CH_ID, port_info); + en_flag = port_info & PORT_INFO_ENFLG; + if (t7xx_port_proxy_chl_enable_disable(md->port_prox, ch_id, en_flag)) + dev_dbg(dev, "Port:%x not found\n", ch_id); + } + + return 0; +} + +static int control_msg_handler(struct t7xx_port *port, struct sk_buff *skb) +{ + const struct t7xx_port_conf *port_conf = port->port_conf; + struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl; + struct ctrl_msg_header *ctrl_msg_h; + int ret = 0; + + ctrl_msg_h = (struct ctrl_msg_header *)skb->data; + switch (le32_to_cpu(ctrl_msg_h->ctrl_msg_id)) { + case CTL_ID_HS2_MSG: + skb_pull(skb, sizeof(*ctrl_msg_h)); + + if (port_conf->rx_ch == PORT_CH_CONTROL_RX) { + ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2, skb->data, + le32_to_cpu(ctrl_msg_h->data_length)); + if (ret) + dev_err(port->dev, "Failed to append Handshake 2 event"); + } + + dev_kfree_skb_any(skb); + break; + + case CTL_ID_MD_EX: + case CTL_ID_MD_EX_ACK: + case CTL_ID_MD_EX_PASS: + case CTL_ID_DRV_VER_ERROR: + ret = fsm_ee_message_handler(port, ctl, skb); + dev_kfree_skb_any(skb); + break; + + case CTL_ID_PORT_ENUM: + skb_pull(skb, sizeof(*ctrl_msg_h)); + ret = t7xx_port_enum_msg_handler(ctl->md, (struct port_msg *)skb->data); + if (!ret) + ret = port_ctl_send_msg_to_md(port, CTL_ID_PORT_ENUM, 0); + else + ret = port_ctl_send_msg_to_md(port, CTL_ID_PORT_ENUM, + PORT_ENUM_VER_MISMATCH); + + break; + + default: + ret = -EINVAL; + dev_err(port->dev, "Unknown control message ID to FSM %x\n", + le32_to_cpu(ctrl_msg_h->ctrl_msg_id)); + break; + } + + if (ret) + dev_err(port->dev, "%s control message handle error: %d\n", port_conf->name, ret); + + return ret; +} + +static int port_ctl_rx_thread(void *arg) +{ + while (!kthread_should_stop()) { + struct t7xx_port *port = arg; + struct sk_buff *skb; + unsigned long flags; + + spin_lock_irqsave(&port->rx_wq.lock, flags); + if (skb_queue_empty(&port->rx_skb_list) && + wait_event_interruptible_locked_irq(port->rx_wq, + !skb_queue_empty(&port->rx_skb_list) || + kthread_should_stop())) { + spin_unlock_irqrestore(&port->rx_wq.lock, flags); + continue; + } + if (kthread_should_stop()) { + spin_unlock_irqrestore(&port->rx_wq.lock, flags); + break; + } + skb = __skb_dequeue(&port->rx_skb_list); + spin_unlock_irqrestore(&port->rx_wq.lock, flags); + + control_msg_handler(port, skb); + } + + return 0; +} + +static int port_ctl_init(struct t7xx_port *port) +{ + const struct t7xx_port_conf *port_conf = port->port_conf; + + port->thread = kthread_run(port_ctl_rx_thread, port, "%s", port_conf->name); + if (IS_ERR(port->thread)) { + dev_err(port->dev, "Failed to start port control thread\n"); + return PTR_ERR(port->thread); + } + + port->rx_length_th = CTRL_QUEUE_MAXLEN; + return 0; +} + +static void port_ctl_uninit(struct t7xx_port *port) +{ + unsigned long flags; + struct sk_buff *skb; + + if (port->thread) + kthread_stop(port->thread); + + spin_lock_irqsave(&port->rx_wq.lock, flags); + port->rx_length_th = 0; + while ((skb = __skb_dequeue(&port->rx_skb_list)) != NULL) + dev_kfree_skb_any(skb); + spin_unlock_irqrestore(&port->rx_wq.lock, flags); +} + +struct port_ops ctl_port_ops = { + .init = port_ctl_init, + .recv_skb = t7xx_port_enqueue_skb, + .uninit = port_ctl_uninit, +}; diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.c b/drivers/net/wwan/t7xx/t7xx_port_proxy.c new file mode 100644 index 000000000..d4de047ff --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c @@ -0,0 +1,509 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Andy Shevchenko + * Chandrashekar Devegowda + * Chiranjeevi Rapolu + * Eliot Lee + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_hif_cldma.h" +#include "t7xx_modem_ops.h" +#include "t7xx_port.h" +#include "t7xx_port_proxy.h" +#include "t7xx_state_monitor.h" + +#define Q_IDX_CTRL 0 +#define Q_IDX_MBIM 2 +#define Q_IDX_AT_CMD 5 + +#define INVALID_SEQ_NUM GENMASK(15, 0) + +#define for_each_proxy_port(i, p, proxy) \ + for (i = 0, (p) = &(proxy)->ports[i]; \ + i < (proxy)->port_count; \ + i++, (p) = &(proxy)->ports[i]) + +static const struct t7xx_port_conf t7xx_md_port_conf[] = { + { + .tx_ch = PORT_CH_UART2_TX, + .rx_ch = PORT_CH_UART2_RX, + .txq_index = Q_IDX_AT_CMD, + .rxq_index = Q_IDX_AT_CMD, + .txq_exp_index = 0xff, + .rxq_exp_index = 0xff, + .path_id = CLDMA_ID_MD, + .ops = &wwan_sub_port_ops, + .name = "AT", + .port_type = WWAN_PORT_AT, + }, { + .tx_ch = PORT_CH_MBIM_TX, + .rx_ch = PORT_CH_MBIM_RX, + .txq_index = Q_IDX_MBIM, + .rxq_index = Q_IDX_MBIM, + .path_id = CLDMA_ID_MD, + .ops = &wwan_sub_port_ops, + .name = "MBIM", + .port_type = WWAN_PORT_MBIM, + }, { + .tx_ch = PORT_CH_CONTROL_TX, + .rx_ch = PORT_CH_CONTROL_RX, + .txq_index = Q_IDX_CTRL, + .rxq_index = Q_IDX_CTRL, + .path_id = CLDMA_ID_MD, + .ops = &ctl_port_ops, + .name = "t7xx_ctrl", + }, +}; + +static struct t7xx_port *t7xx_proxy_get_port_by_ch(struct port_proxy *port_prox, enum port_ch ch) +{ + const struct t7xx_port_conf *port_conf; + struct t7xx_port *port; + int i; + + for_each_proxy_port(i, port, port_prox) { + port_conf = port->port_conf; + if (port_conf->rx_ch == ch || port_conf->tx_ch == ch) + return port; + } + + return NULL; +} + +static u16 t7xx_port_next_rx_seq_num(struct t7xx_port *port, struct ccci_header *ccci_h) +{ + u32 status = le32_to_cpu(ccci_h->status); + u16 seq_num, next_seq_num; + bool assert_bit; + + seq_num = FIELD_GET(CCCI_H_SEQ_FLD, status); + next_seq_num = (seq_num + 1) & FIELD_MAX(CCCI_H_SEQ_FLD); + assert_bit = status & CCCI_H_AST_BIT; + if (!assert_bit || port->seq_nums[MTK_RX] == INVALID_SEQ_NUM) + return next_seq_num; + + if (seq_num != port->seq_nums[MTK_RX]) + dev_warn_ratelimited(port->dev, + "seq num out-of-order %u != %u (header %X, len %X)\n", + seq_num, port->seq_nums[MTK_RX], + le32_to_cpu(ccci_h->packet_header), + le32_to_cpu(ccci_h->packet_len)); + + return next_seq_num; +} + +void t7xx_port_proxy_reset(struct port_proxy *port_prox) +{ + struct t7xx_port *port; + int i; + + for_each_proxy_port(i, port, port_prox) { + port->seq_nums[MTK_RX] = INVALID_SEQ_NUM; + port->seq_nums[MTK_TX] = 0; + } +} + +static int t7xx_port_get_queue_no(struct t7xx_port *port) +{ + const struct t7xx_port_conf *port_conf = port->port_conf; + struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl; + + return t7xx_fsm_get_md_state(ctl) == MD_STATE_EXCEPTION ? + port_conf->txq_exp_index : port_conf->txq_index; +} + +static void t7xx_port_struct_init(struct t7xx_port *port) +{ + INIT_LIST_HEAD(&port->entry); + INIT_LIST_HEAD(&port->queue_entry); + skb_queue_head_init(&port->rx_skb_list); + init_waitqueue_head(&port->rx_wq); + port->seq_nums[MTK_RX] = INVALID_SEQ_NUM; + port->seq_nums[MTK_TX] = 0; + atomic_set(&port->usage_cnt, 0); +} + +struct sk_buff *t7xx_port_alloc_skb(int payload) +{ + struct sk_buff *skb = __dev_alloc_skb(payload + sizeof(struct ccci_header), GFP_KERNEL); + + if (skb) + skb_reserve(skb, sizeof(struct ccci_header)); + + return skb; +} + +struct sk_buff *t7xx_ctrl_alloc_skb(int payload) +{ + struct sk_buff *skb = t7xx_port_alloc_skb(payload + sizeof(struct ctrl_msg_header)); + + if (skb) + skb_reserve(skb, sizeof(struct ctrl_msg_header)); + + return skb; +} + +/** + * t7xx_port_enqueue_skb() - Enqueue the received skb into the port's rx_skb_list. + * @port: port context. + * @skb: received skb. + * + * Return: + * * 0 - Success. + * * -ENOBUFS - Not enough buffer space. Caller will try again later, skb is not consumed. + */ +int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb) +{ + unsigned long flags; + + spin_lock_irqsave(&port->rx_wq.lock, flags); + if (port->rx_skb_list.qlen >= port->rx_length_th) { + spin_unlock_irqrestore(&port->rx_wq.lock, flags); + + return -ENOBUFS; + } + __skb_queue_tail(&port->rx_skb_list, skb); + spin_unlock_irqrestore(&port->rx_wq.lock, flags); + + wake_up_all(&port->rx_wq); + return 0; +} + +static int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb) +{ + enum cldma_id path_id = port->port_conf->path_id; + struct cldma_ctrl *md_ctrl; + int ret, tx_qno; + + md_ctrl = port->t7xx_dev->md->md_ctrl[path_id]; + tx_qno = t7xx_port_get_queue_no(port); + ret = t7xx_cldma_send_skb(md_ctrl, tx_qno, skb); + if (ret) + dev_err(port->dev, "Failed to send skb: %d\n", ret); + + return ret; +} + +static int t7xx_port_send_ccci_skb(struct t7xx_port *port, struct sk_buff *skb, + unsigned int pkt_header, unsigned int ex_msg) +{ + const struct t7xx_port_conf *port_conf = port->port_conf; + struct ccci_header *ccci_h; + u32 status; + int ret; + + ccci_h = skb_push(skb, sizeof(*ccci_h)); + status = FIELD_PREP(CCCI_H_CHN_FLD, port_conf->tx_ch) | + FIELD_PREP(CCCI_H_SEQ_FLD, port->seq_nums[MTK_TX]) | CCCI_H_AST_BIT; + ccci_h->status = cpu_to_le32(status); + ccci_h->packet_header = cpu_to_le32(pkt_header); + ccci_h->packet_len = cpu_to_le32(skb->len); + ccci_h->ex_msg = cpu_to_le32(ex_msg); + + ret = t7xx_port_send_raw_skb(port, skb); + if (ret) + return ret; + + port->seq_nums[MTK_TX]++; + return 0; +} + +int t7xx_port_send_ctl_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int msg, + unsigned int ex_msg) +{ + struct ctrl_msg_header *ctrl_msg_h; + unsigned int msg_len = skb->len; + u32 pkt_header = 0; + + ctrl_msg_h = skb_push(skb, sizeof(*ctrl_msg_h)); + ctrl_msg_h->ctrl_msg_id = cpu_to_le32(msg); + ctrl_msg_h->ex_msg = cpu_to_le32(ex_msg); + ctrl_msg_h->data_length = cpu_to_le32(msg_len); + + if (!msg_len) + pkt_header = CCCI_HEADER_NO_DATA; + + return t7xx_port_send_ccci_skb(port, skb, pkt_header, ex_msg); +} + +int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header, + unsigned int ex_msg) +{ + struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl; + unsigned int fsm_state; + + fsm_state = t7xx_fsm_get_ctl_state(ctl); + if (fsm_state != FSM_STATE_PRE_START) { + const struct t7xx_port_conf *port_conf = port->port_conf; + enum md_state md_state = t7xx_fsm_get_md_state(ctl); + + switch (md_state) { + case MD_STATE_EXCEPTION: + if (port_conf->tx_ch != PORT_CH_MD_LOG_TX) + return -EBUSY; + break; + + case MD_STATE_WAITING_FOR_HS1: + case MD_STATE_WAITING_FOR_HS2: + case MD_STATE_STOPPED: + case MD_STATE_WAITING_TO_STOP: + case MD_STATE_INVALID: + return -ENODEV; + + default: + break; + } + } + + return t7xx_port_send_ccci_skb(port, skb, pkt_header, ex_msg); +} + +static void t7xx_proxy_setup_ch_mapping(struct port_proxy *port_prox) +{ + struct t7xx_port *port; + + int i, j; + + for (i = 0; i < ARRAY_SIZE(port_prox->rx_ch_ports); i++) + INIT_LIST_HEAD(&port_prox->rx_ch_ports[i]); + + for (j = 0; j < ARRAY_SIZE(port_prox->queue_ports); j++) { + for (i = 0; i < ARRAY_SIZE(port_prox->queue_ports[j]); i++) + INIT_LIST_HEAD(&port_prox->queue_ports[j][i]); + } + + for_each_proxy_port(i, port, port_prox) { + const struct t7xx_port_conf *port_conf = port->port_conf; + enum cldma_id path_id = port_conf->path_id; + u8 ch_id; + + ch_id = FIELD_GET(PORT_CH_ID_MASK, port_conf->rx_ch); + list_add_tail(&port->entry, &port_prox->rx_ch_ports[ch_id]); + list_add_tail(&port->queue_entry, + &port_prox->queue_ports[path_id][port_conf->rxq_index]); + } +} + +static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev, + struct cldma_queue *queue, u16 channel) +{ + struct port_proxy *port_prox = t7xx_dev->md->port_prox; + struct list_head *port_list; + struct t7xx_port *port; + u8 ch_id; + + ch_id = FIELD_GET(PORT_CH_ID_MASK, channel); + port_list = &port_prox->rx_ch_ports[ch_id]; + list_for_each_entry(port, port_list, entry) { + const struct t7xx_port_conf *port_conf = port->port_conf; + + if (queue->md_ctrl->hif_id == port_conf->path_id && + channel == port_conf->rx_ch) + return port; + } + + return NULL; +} + +/** + * t7xx_port_proxy_recv_skb() - Dispatch received skb. + * @queue: CLDMA queue. + * @skb: Socket buffer. + * + * Return: + ** 0 - Packet consumed. + ** -ERROR - Failed to process skb. + */ +static int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb) +{ + struct ccci_header *ccci_h = (struct ccci_header *)skb->data; + struct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev; + struct t7xx_fsm_ctl *ctl = t7xx_dev->md->fsm_ctl; + struct device *dev = queue->md_ctrl->dev; + const struct t7xx_port_conf *port_conf; + struct t7xx_port *port; + u16 seq_num, channel; + int ret; + + channel = FIELD_GET(CCCI_H_CHN_FLD, le32_to_cpu(ccci_h->status)); + if (t7xx_fsm_get_md_state(ctl) == MD_STATE_INVALID) { + dev_err_ratelimited(dev, "Packet drop on channel 0x%x, modem not ready\n", channel); + goto drop_skb; + } + + port = t7xx_port_proxy_find_port(t7xx_dev, queue, channel); + if (!port) { + dev_err_ratelimited(dev, "Packet drop on channel 0x%x, port not found\n", channel); + goto drop_skb; + } + + seq_num = t7xx_port_next_rx_seq_num(port, ccci_h); + port_conf = port->port_conf; + skb_pull(skb, sizeof(*ccci_h)); + + ret = port_conf->ops->recv_skb(port, skb); + /* Error indicates to try again later */ + if (ret) { + skb_push(skb, sizeof(*ccci_h)); + return ret; + } + + port->seq_nums[MTK_RX] = seq_num; + return 0; + +drop_skb: + dev_kfree_skb_any(skb); + return 0; +} + +/** + * t7xx_port_proxy_md_status_notify() - Notify all ports of state. + *@port_prox: The port_proxy pointer. + *@state: State. + * + * Called by t7xx_fsm. Used to dispatch modem status for all ports, + * which want to know MD state transition. + */ +void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int state) +{ + struct t7xx_port *port; + int i; + + for_each_proxy_port(i, port, port_prox) { + const struct t7xx_port_conf *port_conf = port->port_conf; + + if (port_conf->ops->md_state_notify) + port_conf->ops->md_state_notify(port, state); + } +} + +static void t7xx_proxy_init_all_ports(struct t7xx_modem *md) +{ + struct port_proxy *port_prox = md->port_prox; + struct t7xx_port *port; + int i; + + for_each_proxy_port(i, port, port_prox) { + const struct t7xx_port_conf *port_conf = port->port_conf; + + t7xx_port_struct_init(port); + + if (port_conf->tx_ch == PORT_CH_CONTROL_TX) + md->core_md.ctl_port = port; + + port->t7xx_dev = md->t7xx_dev; + port->dev = &md->t7xx_dev->pdev->dev; + spin_lock_init(&port->port_update_lock); + port->chan_enable = false; + + if (port_conf->ops->init) + port_conf->ops->init(port); + } + + t7xx_proxy_setup_ch_mapping(port_prox); +} + +static int t7xx_proxy_alloc(struct t7xx_modem *md) +{ + unsigned int port_count = ARRAY_SIZE(t7xx_md_port_conf); + struct device *dev = &md->t7xx_dev->pdev->dev; + struct port_proxy *port_prox; + int i; + + port_prox = devm_kzalloc(dev, sizeof(*port_prox) + sizeof(struct t7xx_port) * port_count, + GFP_KERNEL); + if (!port_prox) + return -ENOMEM; + + md->port_prox = port_prox; + port_prox->dev = dev; + + for (i = 0; i < port_count; i++) + port_prox->ports[i].port_conf = &t7xx_md_port_conf[i]; + + port_prox->port_count = port_count; + t7xx_proxy_init_all_ports(md); + return 0; +} + +/** + * t7xx_port_proxy_init() - Initialize ports. + * @md: Modem. + * + * Create all port instances. + * + * Return: + * * 0 - Success. + * * -ERROR - Error code from failure sub-initializations. + */ +int t7xx_port_proxy_init(struct t7xx_modem *md) +{ + int ret; + + ret = t7xx_proxy_alloc(md); + if (ret) + return ret; + + t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_MD], t7xx_port_proxy_recv_skb); + return 0; +} + +void t7xx_port_proxy_uninit(struct port_proxy *port_prox) +{ + struct t7xx_port *port; + int i; + + for_each_proxy_port(i, port, port_prox) { + const struct t7xx_port_conf *port_conf = port->port_conf; + + if (port_conf->ops->uninit) + port_conf->ops->uninit(port); + } +} + +int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id, + bool en_flag) +{ + struct t7xx_port *port = t7xx_proxy_get_port_by_ch(port_prox, ch_id); + const struct t7xx_port_conf *port_conf; + + if (!port) + return -EINVAL; + + port_conf = port->port_conf; + + if (en_flag) { + if (port_conf->ops->enable_chl) + port_conf->ops->enable_chl(port); + } else { + if (port_conf->ops->disable_chl) + port_conf->ops->disable_chl(port); + } + + return 0; +} diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.h b/drivers/net/wwan/t7xx/t7xx_port_proxy.h new file mode 100644 index 000000000..bc1ff5c6c --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Chiranjeevi Rapolu + * Eliot Lee + * Sreehari Kancharla + */ + +#ifndef __T7XX_PORT_PROXY_H__ +#define __T7XX_PORT_PROXY_H__ + +#include +#include +#include +#include + +#include "t7xx_hif_cldma.h" +#include "t7xx_modem_ops.h" +#include "t7xx_port.h" + +#define MTK_QUEUES 16 +#define RX_QUEUE_MAXLEN 32 +#define CTRL_QUEUE_MAXLEN 16 + +struct port_proxy { + int port_count; + struct list_head rx_ch_ports[PORT_CH_ID_MASK + 1]; + struct list_head queue_ports[CLDMA_NUM][MTK_QUEUES]; + struct device *dev; + struct t7xx_port ports[]; +}; + +struct ccci_header { + __le32 packet_header; + __le32 packet_len; + __le32 status; + __le32 ex_msg; +}; + +/* Coupled with HW - indicates if there is data following the CCCI header or not */ +#define CCCI_HEADER_NO_DATA 0xffffffff + +#define CCCI_H_AST_BIT BIT(31) +#define CCCI_H_SEQ_FLD GENMASK(30, 16) +#define CCCI_H_CHN_FLD GENMASK(15, 0) + +struct ctrl_msg_header { + __le32 ctrl_msg_id; + __le32 ex_msg; + __le32 data_length; +}; + +/* Control identification numbers for AP<->MD messages */ +#define CTL_ID_HS1_MSG 0x0 +#define CTL_ID_HS2_MSG 0x1 +#define CTL_ID_HS3_MSG 0x2 +#define CTL_ID_MD_EX 0x4 +#define CTL_ID_DRV_VER_ERROR 0x5 +#define CTL_ID_MD_EX_ACK 0x6 +#define CTL_ID_MD_EX_PASS 0x8 +#define CTL_ID_PORT_ENUM 0x9 + +/* Modem exception check identification code - "EXCP" */ +#define MD_EX_CHK_ID 0x45584350 +/* Modem exception check acknowledge identification code - "EREC" */ +#define MD_EX_CHK_ACK_ID 0x45524543 + +#define PORT_INFO_RSRVD GENMASK(31, 16) +#define PORT_INFO_ENFLG BIT(15) +#define PORT_INFO_CH_ID GENMASK(14, 0) + +#define PORT_ENUM_VER 0 +#define PORT_ENUM_HEAD_PATTERN 0x5a5a5a5a +#define PORT_ENUM_TAIL_PATTERN 0xa5a5a5a5 +#define PORT_ENUM_VER_MISMATCH 0x00657272 + +/* Port operations mapping */ +extern struct port_ops wwan_sub_port_ops; +extern struct port_ops ctl_port_ops; + +void t7xx_port_proxy_reset(struct port_proxy *port_prox); +void t7xx_port_proxy_uninit(struct port_proxy *port_prox); +int t7xx_port_proxy_init(struct t7xx_modem *md); +void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int state); +int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg); +int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id, + bool en_flag); + +#endif /* __T7XX_PORT_PROXY_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_port_wwan.c b/drivers/net/wwan/t7xx/t7xx_port_wwan.c new file mode 100644 index 000000000..33931bfd7 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_port_wwan.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Chandrashekar Devegowda + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Andy Shevchenko + * Chiranjeevi Rapolu + * Eliot Lee + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_port.h" +#include "t7xx_port_proxy.h" +#include "t7xx_state_monitor.h" + +static int t7xx_port_ctrl_start(struct wwan_port *port) +{ + struct t7xx_port *port_mtk = wwan_port_get_drvdata(port); + + if (atomic_read(&port_mtk->usage_cnt)) + return -EBUSY; + + atomic_inc(&port_mtk->usage_cnt); + return 0; +} + +static void t7xx_port_ctrl_stop(struct wwan_port *port) +{ + struct t7xx_port *port_mtk = wwan_port_get_drvdata(port); + + atomic_dec(&port_mtk->usage_cnt); +} + +static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb) +{ + struct t7xx_port *port_private = wwan_port_get_drvdata(port); + size_t len, offset, chunk_len = 0, txq_mtu = CLDMA_MTU; + const struct t7xx_port_conf *port_conf; + struct t7xx_fsm_ctl *ctl; + enum md_state md_state; + + len = skb->len; + if (!len || !port_private->chan_enable) + return -EINVAL; + + port_conf = port_private->port_conf; + ctl = port_private->t7xx_dev->md->fsm_ctl; + md_state = t7xx_fsm_get_md_state(ctl); + if (md_state == MD_STATE_WAITING_FOR_HS1 || md_state == MD_STATE_WAITING_FOR_HS2) { + dev_warn(port_private->dev, "Cannot write to %s port when md_state=%d\n", + port_conf->name, md_state); + return -ENODEV; + } + + for (offset = 0; offset < len; offset += chunk_len) { + struct sk_buff *skb_ccci; + int ret; + + chunk_len = min(len - offset, txq_mtu - sizeof(struct ccci_header)); + skb_ccci = t7xx_port_alloc_skb(chunk_len); + if (!skb_ccci) + return -ENOMEM; + + skb_put_data(skb_ccci, skb->data + offset, chunk_len); + ret = t7xx_port_send_skb(port_private, skb_ccci, 0, 0); + if (ret) { + dev_kfree_skb_any(skb_ccci); + dev_err(port_private->dev, "Write error on %s port, %d\n", + port_conf->name, ret); + return ret; + } + } + + dev_kfree_skb(skb); + return 0; +} + +static const struct wwan_port_ops wwan_ops = { + .start = t7xx_port_ctrl_start, + .stop = t7xx_port_ctrl_stop, + .tx = t7xx_port_ctrl_tx, +}; + +static int t7xx_port_wwan_init(struct t7xx_port *port) +{ + port->rx_length_th = RX_QUEUE_MAXLEN; + return 0; +} + +static void t7xx_port_wwan_uninit(struct t7xx_port *port) +{ + if (!port->wwan_port) + return; + + port->rx_length_th = 0; + wwan_remove_port(port->wwan_port); + port->wwan_port = NULL; +} + +static int t7xx_port_wwan_recv_skb(struct t7xx_port *port, struct sk_buff *skb) +{ + if (!atomic_read(&port->usage_cnt) || !port->chan_enable) { + const struct t7xx_port_conf *port_conf = port->port_conf; + + dev_kfree_skb_any(skb); + dev_err_ratelimited(port->dev, "Port %s is not opened, drop packets\n", + port_conf->name); + /* Dropping skb, caller should not access skb.*/ + return 0; + } + + wwan_port_rx(port->wwan_port, skb); + return 0; +} + +static int t7xx_port_wwan_enable_chl(struct t7xx_port *port) +{ + spin_lock(&port->port_update_lock); + port->chan_enable = true; + spin_unlock(&port->port_update_lock); + + return 0; +} + +static int t7xx_port_wwan_disable_chl(struct t7xx_port *port) +{ + spin_lock(&port->port_update_lock); + port->chan_enable = false; + spin_unlock(&port->port_update_lock); + + return 0; +} + +static void t7xx_port_wwan_md_state_notify(struct t7xx_port *port, unsigned int state) +{ + const struct t7xx_port_conf *port_conf = port->port_conf; + + if (state != MD_STATE_READY) + return; + + if (!port->wwan_port) { + port->wwan_port = wwan_create_port(port->dev, port_conf->port_type, + &wwan_ops, port); + if (IS_ERR(port->wwan_port)) + dev_err(port->dev, "Unable to create WWWAN port %s", port_conf->name); + } +} + +struct port_ops wwan_sub_port_ops = { + .init = t7xx_port_wwan_init, + .recv_skb = t7xx_port_wwan_recv_skb, + .uninit = t7xx_port_wwan_uninit, + .enable_chl = t7xx_port_wwan_enable_chl, + .disable_chl = t7xx_port_wwan_disable_chl, + .md_state_notify = t7xx_port_wwan_md_state_notify, +}; diff --git a/drivers/net/wwan/t7xx/t7xx_reg.h b/drivers/net/wwan/t7xx/t7xx_reg.h new file mode 100644 index 000000000..7c1b81091 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_reg.h @@ -0,0 +1,350 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Chiranjeevi Rapolu + * + * Contributors: + * Amir Hanania + * Andy Shevchenko + * Eliot Lee + * Moises Veleta + * Ricardo Martinez + * Sreehari Kancharla + */ + +#ifndef __T7XX_REG_H__ +#define __T7XX_REG_H__ + +#include + +/* Device base address offset */ +#define MHCCIF_RC_DEV_BASE 0x10024000 + +#define REG_RC2EP_SW_BSY 0x04 +#define REG_RC2EP_SW_INT_START 0x08 + +#define REG_RC2EP_SW_TCHNUM 0x0c +#define H2D_CH_EXCEPTION_ACK 1 +#define H2D_CH_EXCEPTION_CLEARQ_ACK 2 +#define H2D_CH_DS_LOCK 3 +/* Channels 4-8 are reserved */ +#define H2D_CH_SUSPEND_REQ 9 +#define H2D_CH_RESUME_REQ 10 +#define H2D_CH_SUSPEND_REQ_AP 11 +#define H2D_CH_RESUME_REQ_AP 12 +#define H2D_CH_DEVICE_RESET 13 +#define H2D_CH_DRM_DISABLE_AP 14 + +#define REG_EP2RC_SW_INT_STS 0x10 +#define REG_EP2RC_SW_INT_ACK 0x14 +#define REG_EP2RC_SW_INT_EAP_MASK 0x20 +#define REG_EP2RC_SW_INT_EAP_MASK_SET 0x30 +#define REG_EP2RC_SW_INT_EAP_MASK_CLR 0x40 + +#define D2H_INT_DS_LOCK_ACK BIT(0) +#define D2H_INT_EXCEPTION_INIT BIT(1) +#define D2H_INT_EXCEPTION_INIT_DONE BIT(2) +#define D2H_INT_EXCEPTION_CLEARQ_DONE BIT(3) +#define D2H_INT_EXCEPTION_ALLQ_RESET BIT(4) +#define D2H_INT_PORT_ENUM BIT(5) +/* Bits 6-10 are reserved */ +#define D2H_INT_SUSPEND_ACK BIT(11) +#define D2H_INT_RESUME_ACK BIT(12) +#define D2H_INT_SUSPEND_ACK_AP BIT(13) +#define D2H_INT_RESUME_ACK_AP BIT(14) +#define D2H_INT_ASYNC_SAP_HK BIT(15) +#define D2H_INT_ASYNC_MD_HK BIT(16) + +/* Register base */ +#define INFRACFG_AO_DEV_CHIP 0x10001000 + +/* ATR setting */ +#define T7XX_PCIE_REG_TRSL_ADDR_CHIP 0x10000000 +#define T7XX_PCIE_REG_SIZE_CHIP 0x00400000 + +/* Reset Generic Unit (RGU) */ +#define TOPRGU_CH_PCIE_IRQ_STA 0x1000790c + +#define ATR_PORT_OFFSET 0x100 +#define ATR_TABLE_OFFSET 0x20 +#define ATR_TABLE_NUM_PER_ATR 8 +#define ATR_TRANSPARENT_SIZE 0x3f + +/* PCIE_MAC_IREG Register Definition */ + +#define ISTAT_HST_CTRL 0x01ac +#define ISTAT_HST_CTRL_DIS BIT(0) + +#define T7XX_PCIE_MISC_CTRL 0x0348 +#define T7XX_PCIE_MISC_MAC_SLEEP_DIS BIT(7) + +#define T7XX_PCIE_CFG_MSIX 0x03ec +#define ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR 0x0600 +#define ATR_PCIE_WIN0_T0_TRSL_ADDR 0x0608 +#define ATR_PCIE_WIN0_T0_TRSL_PARAM 0x0610 +#define ATR_PCIE_WIN0_ADDR_ALGMT GENMASK_ULL(63, 12) + +#define ATR_SRC_ADDR_INVALID 0x007f + +#define T7XX_PCIE_PM_RESUME_STATE 0x0d0c + +enum t7xx_pm_resume_state { + PM_RESUME_REG_STATE_L3, + PM_RESUME_REG_STATE_L1, + PM_RESUME_REG_STATE_INIT, + PM_RESUME_REG_STATE_EXP, + PM_RESUME_REG_STATE_L2, + PM_RESUME_REG_STATE_L2_EXP, +}; + +#define T7XX_PCIE_MISC_DEV_STATUS 0x0d1c +#define MISC_STAGE_MASK GENMASK(2, 0) +#define MISC_RESET_TYPE_PLDR BIT(26) +#define MISC_RESET_TYPE_FLDR BIT(27) +#define LINUX_STAGE 4 + +#define T7XX_PCIE_RESOURCE_STATUS 0x0d28 +#define T7XX_PCIE_RESOURCE_STS_MSK GENMASK(4, 0) + +#define DISABLE_ASPM_LOWPWR 0x0e50 +#define ENABLE_ASPM_LOWPWR 0x0e54 +#define T7XX_L1_BIT(i) BIT((i) * 4 + 1) +#define T7XX_L1_1_BIT(i) BIT((i) * 4 + 2) +#define T7XX_L1_2_BIT(i) BIT((i) * 4 + 3) + +#define MSIX_ISTAT_HST_GRP0_0 0x0f00 +#define IMASK_HOST_MSIX_SET_GRP0_0 0x3000 +#define IMASK_HOST_MSIX_CLR_GRP0_0 0x3080 +#define EXT_INT_START 24 +#define EXT_INT_NUM 8 +#define MSIX_MSK_SET_ALL GENMASK(31, 24) + +enum t7xx_int { + DPMAIF_INT, + CLDMA0_INT, + CLDMA1_INT, + CLDMA2_INT, + MHCCIF_INT, + DPMAIF2_INT, + SAP_RGU_INT, + CLDMA3_INT, +}; + +/* DPMA definitions */ + +#define DPMAIF_PD_BASE 0x1022d000 +#define BASE_DPMAIF_UL DPMAIF_PD_BASE +#define BASE_DPMAIF_DL (DPMAIF_PD_BASE + 0x100) +#define BASE_DPMAIF_AP_MISC (DPMAIF_PD_BASE + 0x400) +#define BASE_DPMAIF_MMW_HPC (DPMAIF_PD_BASE + 0x600) +#define BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX (DPMAIF_PD_BASE + 0x900) +#define BASE_DPMAIF_PD_SRAM_DL (DPMAIF_PD_BASE + 0xc00) +#define BASE_DPMAIF_PD_SRAM_UL (DPMAIF_PD_BASE + 0xd00) + +#define DPMAIF_AO_BASE 0x10014000 +#define BASE_DPMAIF_AO_UL DPMAIF_AO_BASE +#define BASE_DPMAIF_AO_DL (DPMAIF_AO_BASE + 0x400) + +#define DPMAIF_UL_ADD_DESC (BASE_DPMAIF_UL + 0x00) +#define DPMAIF_UL_CHK_BUSY (BASE_DPMAIF_UL + 0x88) +#define DPMAIF_UL_RESERVE_AO_RW (BASE_DPMAIF_UL + 0xac) +#define DPMAIF_UL_ADD_DESC_CH0 (BASE_DPMAIF_UL + 0xb0) + +#define DPMAIF_DL_BAT_INIT (BASE_DPMAIF_DL + 0x00) +#define DPMAIF_DL_BAT_ADD (BASE_DPMAIF_DL + 0x04) +#define DPMAIF_DL_BAT_INIT_CON0 (BASE_DPMAIF_DL + 0x08) +#define DPMAIF_DL_BAT_INIT_CON1 (BASE_DPMAIF_DL + 0x0c) +#define DPMAIF_DL_BAT_INIT_CON2 (BASE_DPMAIF_DL + 0x10) +#define DPMAIF_DL_BAT_INIT_CON3 (BASE_DPMAIF_DL + 0x50) +#define DPMAIF_DL_CHK_BUSY (BASE_DPMAIF_DL + 0xb4) + +#define DPMAIF_AP_L2TISAR0 (BASE_DPMAIF_AP_MISC + 0x00) +#define DPMAIF_AP_APDL_L2TISAR0 (BASE_DPMAIF_AP_MISC + 0x50) +#define DPMAIF_AP_IP_BUSY (BASE_DPMAIF_AP_MISC + 0x60) +#define DPMAIF_AP_CG_EN (BASE_DPMAIF_AP_MISC + 0x68) +#define DPMAIF_AP_OVERWRITE_CFG (BASE_DPMAIF_AP_MISC + 0x90) +#define DPMAIF_AP_MEM_CLR (BASE_DPMAIF_AP_MISC + 0x94) +#define DPMAIF_AP_ALL_L2TISAR0_MASK GENMASK(31, 0) +#define DPMAIF_AP_APDL_ALL_L2TISAR0_MASK GENMASK(31, 0) +#define DPMAIF_AP_IP_BUSY_MASK GENMASK(31, 0) + +#define DPMAIF_AO_UL_INIT_SET (BASE_DPMAIF_AO_UL + 0x0) +#define DPMAIF_AO_UL_CHNL_ARB0 (BASE_DPMAIF_AO_UL + 0x1c) +#define DPMAIF_AO_UL_AP_L2TIMR0 (BASE_DPMAIF_AO_UL + 0x80) +#define DPMAIF_AO_UL_AP_L2TIMCR0 (BASE_DPMAIF_AO_UL + 0x84) +#define DPMAIF_AO_UL_AP_L2TIMSR0 (BASE_DPMAIF_AO_UL + 0x88) +#define DPMAIF_AO_UL_AP_L1TIMR0 (BASE_DPMAIF_AO_UL + 0x8c) +#define DPMAIF_AO_UL_APDL_L2TIMR0 (BASE_DPMAIF_AO_UL + 0x90) +#define DPMAIF_AO_UL_APDL_L2TIMCR0 (BASE_DPMAIF_AO_UL + 0x94) +#define DPMAIF_AO_UL_APDL_L2TIMSR0 (BASE_DPMAIF_AO_UL + 0x98) +#define DPMAIF_AO_AP_DLUL_IP_BUSY_MASK (BASE_DPMAIF_AO_UL + 0x9c) + +#define DPMAIF_AO_UL_CHNL0_CON0 (BASE_DPMAIF_PD_SRAM_UL + 0x10) +#define DPMAIF_AO_UL_CHNL0_CON1 (BASE_DPMAIF_PD_SRAM_UL + 0x14) +#define DPMAIF_AO_UL_CHNL0_CON2 (BASE_DPMAIF_PD_SRAM_UL + 0x18) +#define DPMAIF_AO_UL_CH0_STA (BASE_DPMAIF_PD_SRAM_UL + 0x70) + +#define DPMAIF_AO_DL_INIT_SET (BASE_DPMAIF_AO_DL + 0x00) +#define DPMAIF_AO_DL_IRQ_MASK (BASE_DPMAIF_AO_DL + 0x0c) +#define DPMAIF_AO_DL_DLQPIT_INIT_CON5 (BASE_DPMAIF_AO_DL + 0x28) +#define DPMAIF_AO_DL_DLQPIT_TRIG_THRES (BASE_DPMAIF_AO_DL + 0x34) + +#define DPMAIF_AO_DL_PKTINFO_CON0 (BASE_DPMAIF_PD_SRAM_DL + 0x00) +#define DPMAIF_AO_DL_PKTINFO_CON1 (BASE_DPMAIF_PD_SRAM_DL + 0x04) +#define DPMAIF_AO_DL_PKTINFO_CON2 (BASE_DPMAIF_PD_SRAM_DL + 0x08) +#define DPMAIF_AO_DL_RDY_CHK_THRES (BASE_DPMAIF_PD_SRAM_DL + 0x0c) +#define DPMAIF_AO_DL_RDY_CHK_FRG_THRES (BASE_DPMAIF_PD_SRAM_DL + 0x10) + +#define DPMAIF_AO_DL_DLQ_AGG_CFG (BASE_DPMAIF_PD_SRAM_DL + 0x20) +#define DPMAIF_AO_DL_DLQPIT_TIMEOUT0 (BASE_DPMAIF_PD_SRAM_DL + 0x24) +#define DPMAIF_AO_DL_DLQPIT_TIMEOUT1 (BASE_DPMAIF_PD_SRAM_DL + 0x28) +#define DPMAIF_AO_DL_HPC_CNTL (BASE_DPMAIF_PD_SRAM_DL + 0x38) +#define DPMAIF_AO_DL_PIT_SEQ_END (BASE_DPMAIF_PD_SRAM_DL + 0x40) + +#define DPMAIF_AO_DL_BAT_RD_IDX (BASE_DPMAIF_PD_SRAM_DL + 0xd8) +#define DPMAIF_AO_DL_BAT_WR_IDX (BASE_DPMAIF_PD_SRAM_DL + 0xdc) +#define DPMAIF_AO_DL_PIT_RD_IDX (BASE_DPMAIF_PD_SRAM_DL + 0xec) +#define DPMAIF_AO_DL_PIT_WR_IDX (BASE_DPMAIF_PD_SRAM_DL + 0x60) +#define DPMAIF_AO_DL_FRGBAT_RD_IDX (BASE_DPMAIF_PD_SRAM_DL + 0x78) +#define DPMAIF_AO_DL_DLQ_WR_IDX (BASE_DPMAIF_PD_SRAM_DL + 0xa4) + +#define DPMAIF_HPC_INTR_MASK (BASE_DPMAIF_MMW_HPC + 0x0f4) +#define DPMA_HPC_ALL_INT_MASK GENMASK(15, 0) + +#define DPMAIF_HPC_DLQ_PATH_MODE 3 +#define DPMAIF_HPC_ADD_MODE_DF 0 +#define DPMAIF_HPC_TOTAL_NUM 8 +#define DPMAIF_HPC_MAX_TOTAL_NUM 8 + +#define DPMAIF_DL_DLQPIT_INIT (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x00) +#define DPMAIF_DL_DLQPIT_ADD (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x10) +#define DPMAIF_DL_DLQPIT_INIT_CON0 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x14) +#define DPMAIF_DL_DLQPIT_INIT_CON1 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x18) +#define DPMAIF_DL_DLQPIT_INIT_CON2 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x1c) +#define DPMAIF_DL_DLQPIT_INIT_CON3 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x20) +#define DPMAIF_DL_DLQPIT_INIT_CON4 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x24) +#define DPMAIF_DL_DLQPIT_INIT_CON5 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x28) +#define DPMAIF_DL_DLQPIT_INIT_CON6 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x2c) + +#define DPMAIF_ULQSAR_n(q) (DPMAIF_AO_UL_CHNL0_CON0 + 0x10 * (q)) +#define DPMAIF_UL_DRBSIZE_ADDRH_n(q) (DPMAIF_AO_UL_CHNL0_CON1 + 0x10 * (q)) +#define DPMAIF_UL_DRB_ADDRH_n(q) (DPMAIF_AO_UL_CHNL0_CON2 + 0x10 * (q)) +#define DPMAIF_ULQ_STA0_n(q) (DPMAIF_AO_UL_CH0_STA + 0x04 * (q)) +#define DPMAIF_ULQ_ADD_DESC_CH_n(q) (DPMAIF_UL_ADD_DESC_CH0 + 0x04 * (q)) + +#define DPMAIF_UL_DRB_RIDX_MSK GENMASK(31, 16) + +#define DPMAIF_AP_RGU_ASSERT 0x10001150 +#define DPMAIF_AP_RGU_DEASSERT 0x10001154 +#define DPMAIF_AP_RST_BIT BIT(2) + +#define DPMAIF_AP_AO_RGU_ASSERT 0x10001140 +#define DPMAIF_AP_AO_RGU_DEASSERT 0x10001144 +#define DPMAIF_AP_AO_RST_BIT BIT(6) + +/* DPMAIF init/restore */ +#define DPMAIF_UL_ADD_NOT_READY BIT(31) +#define DPMAIF_UL_ADD_UPDATE BIT(31) +#define DPMAIF_UL_ADD_COUNT_MASK GENMASK(15, 0) +#define DPMAIF_UL_ALL_QUE_ARB_EN GENMASK(11, 8) + +#define DPMAIF_DL_ADD_UPDATE BIT(31) +#define DPMAIF_DL_ADD_NOT_READY BIT(31) +#define DPMAIF_DL_FRG_ADD_UPDATE BIT(16) +#define DPMAIF_DL_ADD_COUNT_MASK GENMASK(15, 0) + +#define DPMAIF_DL_BAT_INIT_ALLSET BIT(0) +#define DPMAIF_DL_BAT_FRG_INIT BIT(16) +#define DPMAIF_DL_BAT_INIT_EN BIT(31) +#define DPMAIF_DL_BAT_INIT_NOT_READY BIT(31) +#define DPMAIF_DL_BAT_INIT_ONLY_ENABLE_BIT 0 + +#define DPMAIF_DL_PIT_INIT_ALLSET BIT(0) +#define DPMAIF_DL_PIT_INIT_EN BIT(31) +#define DPMAIF_DL_PIT_INIT_NOT_READY BIT(31) + +#define DPMAIF_BAT_REMAIN_SZ_BASE 16 +#define DPMAIF_BAT_BUFFER_SZ_BASE 128 +#define DPMAIF_FRG_BUFFER_SZ_BASE 128 + +#define DLQ_PIT_IDX_SIZE 0x20 + +#define DPMAIF_PIT_SIZE_MSK GENMASK(17, 0) + +#define DPMAIF_PIT_REM_CNT_MSK GENMASK(17, 0) + +#define DPMAIF_BAT_EN_MSK BIT(16) +#define DPMAIF_FRG_EN_MSK BIT(28) +#define DPMAIF_BAT_SIZE_MSK GENMASK(15, 0) + +#define DPMAIF_BAT_BID_MAXCNT_MSK GENMASK(31, 16) +#define DPMAIF_BAT_REMAIN_MINSZ_MSK GENMASK(15, 8) +#define DPMAIF_PIT_CHK_NUM_MSK GENMASK(31, 24) +#define DPMAIF_BAT_BUF_SZ_MSK GENMASK(16, 8) +#define DPMAIF_FRG_BUF_SZ_MSK GENMASK(16, 8) +#define DPMAIF_BAT_RSV_LEN_MSK GENMASK(7, 0) +#define DPMAIF_PKT_ALIGN_MSK GENMASK(23, 22) + +#define DPMAIF_BAT_CHECK_THRES_MSK GENMASK(21, 16) +#define DPMAIF_FRG_CHECK_THRES_MSK GENMASK(7, 0) + +#define DPMAIF_PKT_ALIGN_EN BIT(23) + +#define DPMAIF_DRB_SIZE_MSK GENMASK(15, 0) + +#define DPMAIF_DL_RD_WR_IDX_MSK GENMASK(17, 0) + +/* DPMAIF_UL_CHK_BUSY */ +#define DPMAIF_UL_IDLE_STS BIT(11) +/* DPMAIF_DL_CHK_BUSY */ +#define DPMAIF_DL_IDLE_STS BIT(23) +/* DPMAIF_AO_DL_RDY_CHK_THRES */ +#define DPMAIF_DL_PKT_CHECKSUM_EN BIT(31) +#define DPMAIF_PORT_MODE_PCIE BIT(30) +#define DPMAIF_DL_BURST_PIT_EN BIT(13) +/* DPMAIF_DL_BAT_INIT_CON1 */ +#define DPMAIF_DL_BAT_CACHE_PRI BIT(22) +/* DPMAIF_AP_MEM_CLR */ +#define DPMAIF_MEM_CLR BIT(0) +/* DPMAIF_AP_OVERWRITE_CFG */ +#define DPMAIF_SRAM_SYNC BIT(0) +/* DPMAIF_AO_UL_INIT_SET */ +#define DPMAIF_UL_INIT_DONE BIT(0) +/* DPMAIF_AO_DL_INIT_SET */ +#define DPMAIF_DL_INIT_DONE BIT(0) +/* DPMAIF_AO_DL_PIT_SEQ_END */ +#define DPMAIF_DL_PIT_SEQ_MSK GENMASK(7, 0) +/* DPMAIF_UL_RESERVE_AO_RW */ +#define DPMAIF_PCIE_MODE_SET_VALUE 0x55 +/* DPMAIF_AP_CG_EN */ +#define DPMAIF_CG_EN 0x7f + +#define DPMAIF_UDL_IP_BUSY BIT(0) +#define DPMAIF_DL_INT_DLQ0_QDONE BIT(8) +#define DPMAIF_DL_INT_DLQ1_QDONE BIT(9) +#define DPMAIF_DL_INT_DLQ0_PITCNT_LEN BIT(10) +#define DPMAIF_DL_INT_DLQ1_PITCNT_LEN BIT(11) +#define DPMAIF_DL_INT_Q2TOQ1 BIT(24) +#define DPMAIF_DL_INT_Q2APTOP BIT(25) + +#define DPMAIF_DLQ_LOW_TIMEOUT_THRES_MKS GENMASK(15, 0) +#define DPMAIF_DLQ_HIGH_TIMEOUT_THRES_MSK GENMASK(31, 16) + +/* DPMAIF DLQ HW configure */ +#define DPMAIF_AGG_MAX_LEN_DF 65535 +#define DPMAIF_AGG_TBL_ENT_NUM_DF 50 +#define DPMAIF_HASH_PRIME_DF 13 +#define DPMAIF_MID_TIMEOUT_THRES_DF 100 +#define DPMAIF_DLQ_TIMEOUT_THRES_DF 100 +#define DPMAIF_DLQ_PRS_THRES_DF 10 +#define DPMAIF_DLQ_HASH_BIT_CHOOSE_DF 0 + +#define DPMAIF_DLQPIT_EN_MSK BIT(20) +#define DPMAIF_DLQPIT_CHAN_OFS 16 +#define DPMAIF_ADD_DLQ_PIT_CHAN_OFS 20 + +#endif /* __T7XX_REG_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.c b/drivers/net/wwan/t7xx/t7xx_state_monitor.c new file mode 100644 index 000000000..0bcca08ff --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c @@ -0,0 +1,550 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Eliot Lee + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Amir Hanania + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_hif_cldma.h" +#include "t7xx_mhccif.h" +#include "t7xx_modem_ops.h" +#include "t7xx_pci.h" +#include "t7xx_pcie_mac.h" +#include "t7xx_port_proxy.h" +#include "t7xx_reg.h" +#include "t7xx_state_monitor.h" + +#define FSM_DRM_DISABLE_DELAY_MS 200 +#define FSM_EVENT_POLL_INTERVAL_MS 20 +#define FSM_MD_EX_REC_OK_TIMEOUT_MS 10000 +#define FSM_MD_EX_PASS_TIMEOUT_MS 45000 +#define FSM_CMD_TIMEOUT_MS 2000 + +void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier) +{ + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; + unsigned long flags; + + spin_lock_irqsave(&ctl->notifier_lock, flags); + list_add_tail(¬ifier->entry, &ctl->notifier_list); + spin_unlock_irqrestore(&ctl->notifier_lock, flags); +} + +void t7xx_fsm_notifier_unregister(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier) +{ + struct t7xx_fsm_notifier *notifier_cur, *notifier_next; + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; + unsigned long flags; + + spin_lock_irqsave(&ctl->notifier_lock, flags); + list_for_each_entry_safe(notifier_cur, notifier_next, &ctl->notifier_list, entry) { + if (notifier_cur == notifier) + list_del(¬ifier->entry); + } + spin_unlock_irqrestore(&ctl->notifier_lock, flags); +} + +static void fsm_state_notify(struct t7xx_modem *md, enum md_state state) +{ + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; + struct t7xx_fsm_notifier *notifier; + unsigned long flags; + + spin_lock_irqsave(&ctl->notifier_lock, flags); + list_for_each_entry(notifier, &ctl->notifier_list, entry) { + spin_unlock_irqrestore(&ctl->notifier_lock, flags); + if (notifier->notifier_fn) + notifier->notifier_fn(state, notifier->data); + + spin_lock_irqsave(&ctl->notifier_lock, flags); + } + spin_unlock_irqrestore(&ctl->notifier_lock, flags); +} + +void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state) +{ + ctl->md_state = state; + + /* Update to port first, otherwise sending message on HS2 may fail */ + t7xx_port_proxy_md_status_notify(ctl->md->port_prox, state); + fsm_state_notify(ctl->md, state); +} + +static void fsm_finish_command(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, int result) +{ + if (cmd->flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) { + *cmd->ret = result; + complete_all(cmd->done); + } + + kfree(cmd); +} + +static void fsm_del_kf_event(struct t7xx_fsm_event *event) +{ + list_del(&event->entry); + kfree(event); +} + +static void fsm_flush_event_cmd_qs(struct t7xx_fsm_ctl *ctl) +{ + struct device *dev = &ctl->md->t7xx_dev->pdev->dev; + struct t7xx_fsm_event *event, *evt_next; + struct t7xx_fsm_command *cmd, *cmd_next; + unsigned long flags; + + spin_lock_irqsave(&ctl->command_lock, flags); + list_for_each_entry_safe(cmd, cmd_next, &ctl->command_queue, entry) { + dev_warn(dev, "Unhandled command %d\n", cmd->cmd_id); + list_del(&cmd->entry); + fsm_finish_command(ctl, cmd, -EINVAL); + } + spin_unlock_irqrestore(&ctl->command_lock, flags); + + spin_lock_irqsave(&ctl->event_lock, flags); + list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) { + dev_warn(dev, "Unhandled event %d\n", event->event_id); + fsm_del_kf_event(event); + } + spin_unlock_irqrestore(&ctl->event_lock, flags); +} + +static void fsm_wait_for_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_expected, + enum t7xx_fsm_event_state event_ignore, int retries) +{ + struct t7xx_fsm_event *event; + bool event_received = false; + unsigned long flags; + int cnt = 0; + + while (cnt++ < retries && !event_received) { + bool sleep_required = true; + + if (kthread_should_stop()) + return; + + spin_lock_irqsave(&ctl->event_lock, flags); + event = list_first_entry_or_null(&ctl->event_queue, struct t7xx_fsm_event, entry); + if (event) { + event_received = event->event_id == event_expected; + if (event_received || event->event_id == event_ignore) { + fsm_del_kf_event(event); + sleep_required = false; + } + } + spin_unlock_irqrestore(&ctl->event_lock, flags); + + if (sleep_required) + msleep(FSM_EVENT_POLL_INTERVAL_MS); + } +} + +static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, + enum t7xx_ex_reason reason) +{ + struct device *dev = &ctl->md->t7xx_dev->pdev->dev; + + if (ctl->curr_state != FSM_STATE_READY && ctl->curr_state != FSM_STATE_STARTING) { + if (cmd) + fsm_finish_command(ctl, cmd, -EINVAL); + + return; + } + + ctl->curr_state = FSM_STATE_EXCEPTION; + + switch (reason) { + case EXCEPTION_HS_TIMEOUT: + dev_err(dev, "Boot Handshake failure\n"); + break; + + case EXCEPTION_EVENT: + dev_err(dev, "Exception event\n"); + t7xx_fsm_broadcast_state(ctl, MD_STATE_EXCEPTION); + t7xx_pci_pm_exp_detected(ctl->md->t7xx_dev); + t7xx_md_exception_handshake(ctl->md); + + fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_REC_OK, FSM_EVENT_MD_EX, + FSM_MD_EX_REC_OK_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS); + fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_PASS, FSM_EVENT_INVALID, + FSM_MD_EX_PASS_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS); + break; + + default: + dev_err(dev, "Exception %d\n", reason); + break; + } + + if (cmd) + fsm_finish_command(ctl, cmd, 0); +} + +static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl) +{ + ctl->curr_state = FSM_STATE_STOPPED; + + t7xx_fsm_broadcast_state(ctl, MD_STATE_STOPPED); + return t7xx_md_reset(ctl->md->t7xx_dev); +} + +static void fsm_routine_stopped(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) +{ + if (ctl->curr_state == FSM_STATE_STOPPED) { + fsm_finish_command(ctl, cmd, -EINVAL); + return; + } + + fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl)); +} + +static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) +{ + struct t7xx_pci_dev *t7xx_dev; + struct cldma_ctrl *md_ctrl; + int err; + + if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) { + fsm_finish_command(ctl, cmd, -EINVAL); + return; + } + + md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD]; + t7xx_dev = ctl->md->t7xx_dev; + + ctl->curr_state = FSM_STATE_STOPPING; + t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP); + t7xx_cldma_stop(md_ctrl); + + if (!ctl->md->rgu_irq_asserted) { + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP); + /* Wait for the DRM disable to take effect */ + msleep(FSM_DRM_DISABLE_DELAY_MS); + + err = t7xx_acpi_fldr_func(t7xx_dev); + if (err) + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET); + } + + fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl)); +} + +static void t7xx_fsm_broadcast_ready_state(struct t7xx_fsm_ctl *ctl) +{ + if (ctl->md_state != MD_STATE_WAITING_FOR_HS2) + return; + + ctl->md_state = MD_STATE_READY; + + fsm_state_notify(ctl->md, MD_STATE_READY); + t7xx_port_proxy_md_status_notify(ctl->md->port_prox, MD_STATE_READY); +} + +static void fsm_routine_ready(struct t7xx_fsm_ctl *ctl) +{ + struct t7xx_modem *md = ctl->md; + + ctl->curr_state = FSM_STATE_READY; + t7xx_fsm_broadcast_ready_state(ctl); + t7xx_md_event_notify(md, FSM_READY); +} + +static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl) +{ + struct t7xx_modem *md = ctl->md; + struct device *dev; + + ctl->curr_state = FSM_STATE_STARTING; + + t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS1); + t7xx_md_event_notify(md, FSM_START); + + wait_event_interruptible_timeout(ctl->async_hk_wq, md->core_md.ready || ctl->exp_flg, + HZ * 60); + dev = &md->t7xx_dev->pdev->dev; + + if (ctl->exp_flg) + dev_err(dev, "MD exception is captured during handshake\n"); + + if (!md->core_md.ready) { + dev_err(dev, "MD handshake timeout\n"); + if (md->core_md.handshake_ongoing) + t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0); + + fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT); + return -ETIMEDOUT; + } + + t7xx_pci_pm_init_late(md->t7xx_dev); + fsm_routine_ready(ctl); + return 0; +} + +static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) +{ + struct t7xx_modem *md = ctl->md; + u32 dev_status; + int ret; + + if (!md) + return; + + if (ctl->curr_state != FSM_STATE_INIT && ctl->curr_state != FSM_STATE_PRE_START && + ctl->curr_state != FSM_STATE_STOPPED) { + fsm_finish_command(ctl, cmd, -EINVAL); + return; + } + + ctl->curr_state = FSM_STATE_PRE_START; + t7xx_md_event_notify(md, FSM_PRE_START); + + ret = read_poll_timeout(ioread32, dev_status, + (dev_status & MISC_STAGE_MASK) == LINUX_STAGE, 20000, 2000000, + false, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS); + if (ret) { + struct device *dev = &md->t7xx_dev->pdev->dev; + + fsm_finish_command(ctl, cmd, -ETIMEDOUT); + dev_err(dev, "Invalid device status 0x%lx\n", dev_status & MISC_STAGE_MASK); + return; + } + + t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]); + fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl)); +} + +static int fsm_main_thread(void *data) +{ + struct t7xx_fsm_ctl *ctl = data; + struct t7xx_fsm_command *cmd; + unsigned long flags; + + while (!kthread_should_stop()) { + if (wait_event_interruptible(ctl->command_wq, !list_empty(&ctl->command_queue) || + kthread_should_stop())) + continue; + + if (kthread_should_stop()) + break; + + spin_lock_irqsave(&ctl->command_lock, flags); + cmd = list_first_entry(&ctl->command_queue, struct t7xx_fsm_command, entry); + list_del(&cmd->entry); + spin_unlock_irqrestore(&ctl->command_lock, flags); + + switch (cmd->cmd_id) { + case FSM_CMD_START: + fsm_routine_start(ctl, cmd); + break; + + case FSM_CMD_EXCEPTION: + fsm_routine_exception(ctl, cmd, FIELD_GET(FSM_CMD_EX_REASON, cmd->flag)); + break; + + case FSM_CMD_PRE_STOP: + fsm_routine_stopping(ctl, cmd); + break; + + case FSM_CMD_STOP: + fsm_routine_stopped(ctl, cmd); + break; + + default: + fsm_finish_command(ctl, cmd, -EINVAL); + fsm_flush_event_cmd_qs(ctl); + break; + } + } + + return 0; +} + +int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, unsigned int flag) +{ + DECLARE_COMPLETION_ONSTACK(done); + struct t7xx_fsm_command *cmd; + unsigned long flags; + int ret; + + cmd = kzalloc(sizeof(*cmd), flag & FSM_CMD_FLAG_IN_INTERRUPT ? GFP_ATOMIC : GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + INIT_LIST_HEAD(&cmd->entry); + cmd->cmd_id = cmd_id; + cmd->flag = flag; + if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) { + cmd->done = &done; + cmd->ret = &ret; + } + + spin_lock_irqsave(&ctl->command_lock, flags); + list_add_tail(&cmd->entry, &ctl->command_queue); + spin_unlock_irqrestore(&ctl->command_lock, flags); + + wake_up(&ctl->command_wq); + + if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) { + unsigned long wait_ret; + + wait_ret = wait_for_completion_timeout(&done, + msecs_to_jiffies(FSM_CMD_TIMEOUT_MS)); + if (!wait_ret) + return -ETIMEDOUT; + + return ret; + } + + return 0; +} + +int t7xx_fsm_append_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id, + unsigned char *data, unsigned int length) +{ + struct device *dev = &ctl->md->t7xx_dev->pdev->dev; + struct t7xx_fsm_event *event; + unsigned long flags; + + if (event_id <= FSM_EVENT_INVALID || event_id >= FSM_EVENT_MAX) { + dev_err(dev, "Invalid event %d\n", event_id); + return -EINVAL; + } + + event = kmalloc(sizeof(*event) + length, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); + if (!event) + return -ENOMEM; + + INIT_LIST_HEAD(&event->entry); + event->event_id = event_id; + event->length = length; + + if (data && length) + memcpy(event->data, data, length); + + spin_lock_irqsave(&ctl->event_lock, flags); + list_add_tail(&event->entry, &ctl->event_queue); + spin_unlock_irqrestore(&ctl->event_lock, flags); + + wake_up_all(&ctl->event_wq); + return 0; +} + +void t7xx_fsm_clr_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id) +{ + struct t7xx_fsm_event *event, *evt_next; + unsigned long flags; + + spin_lock_irqsave(&ctl->event_lock, flags); + list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) { + if (event->event_id == event_id) + fsm_del_kf_event(event); + } + spin_unlock_irqrestore(&ctl->event_lock, flags); +} + +enum md_state t7xx_fsm_get_md_state(struct t7xx_fsm_ctl *ctl) +{ + if (ctl) + return ctl->md_state; + + return MD_STATE_INVALID; +} + +unsigned int t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl *ctl) +{ + if (ctl) + return ctl->curr_state; + + return FSM_STATE_STOPPED; +} + +int t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl *ctl, enum t7xx_md_irq_type type) +{ + unsigned int cmd_flags = FSM_CMD_FLAG_IN_INTERRUPT; + + if (type == MD_IRQ_PORT_ENUM) { + return t7xx_fsm_append_cmd(ctl, FSM_CMD_START, cmd_flags); + } else if (type == MD_IRQ_CCIF_EX) { + ctl->exp_flg = true; + wake_up(&ctl->async_hk_wq); + cmd_flags |= FIELD_PREP(FSM_CMD_EX_REASON, EXCEPTION_EVENT); + return t7xx_fsm_append_cmd(ctl, FSM_CMD_EXCEPTION, cmd_flags); + } + + return -EINVAL; +} + +void t7xx_fsm_reset(struct t7xx_modem *md) +{ + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; + + fsm_flush_event_cmd_qs(ctl); + ctl->curr_state = FSM_STATE_STOPPED; + ctl->exp_flg = false; +} + +int t7xx_fsm_init(struct t7xx_modem *md) +{ + struct device *dev = &md->t7xx_dev->pdev->dev; + struct t7xx_fsm_ctl *ctl; + + ctl = devm_kzalloc(dev, sizeof(*ctl), GFP_KERNEL); + if (!ctl) + return -ENOMEM; + + md->fsm_ctl = ctl; + ctl->md = md; + ctl->curr_state = FSM_STATE_INIT; + INIT_LIST_HEAD(&ctl->command_queue); + INIT_LIST_HEAD(&ctl->event_queue); + init_waitqueue_head(&ctl->async_hk_wq); + init_waitqueue_head(&ctl->event_wq); + INIT_LIST_HEAD(&ctl->notifier_list); + init_waitqueue_head(&ctl->command_wq); + spin_lock_init(&ctl->event_lock); + spin_lock_init(&ctl->command_lock); + ctl->exp_flg = false; + spin_lock_init(&ctl->notifier_lock); + + ctl->fsm_thread = kthread_run(fsm_main_thread, ctl, "t7xx_fsm"); + return PTR_ERR_OR_ZERO(ctl->fsm_thread); +} + +void t7xx_fsm_uninit(struct t7xx_modem *md) +{ + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; + + if (!ctl) + return; + + if (ctl->fsm_thread) + kthread_stop(ctl->fsm_thread); + + fsm_flush_event_cmd_qs(ctl); +} diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.h b/drivers/net/wwan/t7xx/t7xx_state_monitor.h new file mode 100644 index 000000000..b1af0259d --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Moises Veleta + * + * Contributors: + * Eliot Lee + * Ricardo Martinez + * Sreehari Kancharla + */ + +#ifndef __T7XX_MONITOR_H__ +#define __T7XX_MONITOR_H__ + +#include +#include +#include +#include +#include + +#include "t7xx_modem_ops.h" + +enum t7xx_fsm_state { + FSM_STATE_INIT, + FSM_STATE_PRE_START, + FSM_STATE_STARTING, + FSM_STATE_READY, + FSM_STATE_EXCEPTION, + FSM_STATE_STOPPING, + FSM_STATE_STOPPED, +}; + +enum t7xx_fsm_event_state { + FSM_EVENT_INVALID, + FSM_EVENT_MD_HS2, + FSM_EVENT_MD_EX, + FSM_EVENT_MD_EX_REC_OK, + FSM_EVENT_MD_EX_PASS, + FSM_EVENT_MD_HS2_EXIT, + FSM_EVENT_MAX +}; + +enum t7xx_fsm_cmd_state { + FSM_CMD_INVALID, + FSM_CMD_START, + FSM_CMD_EXCEPTION, + FSM_CMD_PRE_STOP, + FSM_CMD_STOP, +}; + +enum t7xx_ex_reason { + EXCEPTION_HS_TIMEOUT, + EXCEPTION_EVENT, +}; + +enum t7xx_md_irq_type { + MD_IRQ_WDT, + MD_IRQ_CCIF_EX, + MD_IRQ_PORT_ENUM, +}; + +enum md_state { + MD_STATE_INVALID, + MD_STATE_WAITING_FOR_HS1, + MD_STATE_WAITING_FOR_HS2, + MD_STATE_READY, + MD_STATE_EXCEPTION, + MD_STATE_WAITING_TO_STOP, + MD_STATE_STOPPED, +}; + +#define FSM_CMD_FLAG_WAIT_FOR_COMPLETION BIT(0) +#define FSM_CMD_FLAG_FLIGHT_MODE BIT(1) +#define FSM_CMD_FLAG_IN_INTERRUPT BIT(2) +#define FSM_CMD_EX_REASON GENMASK(23, 16) + +struct t7xx_fsm_ctl { + struct t7xx_modem *md; + enum md_state md_state; + unsigned int curr_state; + struct list_head command_queue; + struct list_head event_queue; + wait_queue_head_t command_wq; + wait_queue_head_t event_wq; + wait_queue_head_t async_hk_wq; + spinlock_t event_lock; /* Protects event queue */ + spinlock_t command_lock; /* Protects command queue */ + struct task_struct *fsm_thread; + bool exp_flg; + spinlock_t notifier_lock; /* Protects notifier list */ + struct list_head notifier_list; +}; + +struct t7xx_fsm_event { + struct list_head entry; + enum t7xx_fsm_event_state event_id; + unsigned int length; + unsigned char data[]; +}; + +struct t7xx_fsm_command { + struct list_head entry; + enum t7xx_fsm_cmd_state cmd_id; + unsigned int flag; + struct completion *done; + int *ret; +}; + +struct t7xx_fsm_notifier { + struct list_head entry; + int (*notifier_fn)(enum md_state state, void *data); + void *data; +}; + +int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, + unsigned int flag); +int t7xx_fsm_append_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id, + unsigned char *data, unsigned int length); +void t7xx_fsm_clr_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id); +void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state); +void t7xx_fsm_reset(struct t7xx_modem *md); +int t7xx_fsm_init(struct t7xx_modem *md); +void t7xx_fsm_uninit(struct t7xx_modem *md); +int t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl *ctl, enum t7xx_md_irq_type type); +enum md_state t7xx_fsm_get_md_state(struct t7xx_fsm_ctl *ctl); +unsigned int t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl *ctl); +void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier); +void t7xx_fsm_notifier_unregister(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier); + +#endif /* __T7XX_MONITOR_H__ */ -- cgit v1.2.3