From 2c3c1048746a4622d8c89a29670120dc8fab93c4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:49:45 +0200 Subject: Adding upstream version 6.1.76. Signed-off-by: Daniel Baumann --- drivers/net/ethernet/socionext/Kconfig | 38 + drivers/net/ethernet/socionext/Makefile | 6 + drivers/net/ethernet/socionext/netsec.c | 2224 ++++++++++++++++++++++++++++++ drivers/net/ethernet/socionext/sni_ave.c | 1995 +++++++++++++++++++++++++++ 4 files changed, 4263 insertions(+) create mode 100644 drivers/net/ethernet/socionext/Kconfig create mode 100644 drivers/net/ethernet/socionext/Makefile create mode 100644 drivers/net/ethernet/socionext/netsec.c create mode 100644 drivers/net/ethernet/socionext/sni_ave.c (limited to 'drivers/net/ethernet/socionext') diff --git a/drivers/net/ethernet/socionext/Kconfig b/drivers/net/ethernet/socionext/Kconfig new file mode 100644 index 000000000..482983898 --- /dev/null +++ b/drivers/net/ethernet/socionext/Kconfig @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: GPL-2.0-only +config NET_VENDOR_SOCIONEXT + bool "Socionext ethernet drivers" + default y + help + Option to select ethernet drivers for Socionext platforms. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Socionext devices. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_SOCIONEXT + +config SNI_AVE + tristate "Socionext AVE ethernet support" + depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF + depends on HAS_IOMEM + select MFD_SYSCON + select PHYLIB + help + Driver for gigabit ethernet MACs, called AVE, in the + Socionext UniPhier family. + +config SNI_NETSEC + tristate "Socionext NETSEC ethernet support" + depends on (ARCH_SYNQUACER || COMPILE_TEST) && OF + select PHYLIB + select PAGE_POOL + select MII + help + Enable to add support for the SocioNext NetSec Gigabit Ethernet + controller + PHY, as found on the Synquacer SC2A11 SoC + + To compile this driver as a module, choose M here: the module will be + called netsec. If unsure, say N. + +endif #NET_VENDOR_SOCIONEXT diff --git a/drivers/net/ethernet/socionext/Makefile b/drivers/net/ethernet/socionext/Makefile new file mode 100644 index 000000000..7fd837a99 --- /dev/null +++ b/drivers/net/ethernet/socionext/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for all ethernet ip drivers on Socionext platforms +# +obj-$(CONFIG_SNI_AVE) += sni_ave.o +obj-$(CONFIG_SNI_NETSEC) += netsec.o diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c new file mode 100644 index 000000000..b130e9783 --- /dev/null +++ b/drivers/net/ethernet/socionext/netsec.c @@ -0,0 +1,2224 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define NETSEC_REG_SOFT_RST 0x104 +#define NETSEC_REG_COM_INIT 0x120 + +#define NETSEC_REG_TOP_STATUS 0x200 +#define NETSEC_IRQ_RX BIT(1) +#define NETSEC_IRQ_TX BIT(0) + +#define NETSEC_REG_TOP_INTEN 0x204 +#define NETSEC_REG_INTEN_SET 0x234 +#define NETSEC_REG_INTEN_CLR 0x238 + +#define NETSEC_REG_NRM_TX_STATUS 0x400 +#define NETSEC_REG_NRM_TX_INTEN 0x404 +#define NETSEC_REG_NRM_TX_INTEN_SET 0x428 +#define NETSEC_REG_NRM_TX_INTEN_CLR 0x42c +#define NRM_TX_ST_NTOWNR BIT(17) +#define NRM_TX_ST_TR_ERR BIT(16) +#define NRM_TX_ST_TXDONE BIT(15) +#define NRM_TX_ST_TMREXP BIT(14) + +#define NETSEC_REG_NRM_RX_STATUS 0x440 +#define NETSEC_REG_NRM_RX_INTEN 0x444 +#define NETSEC_REG_NRM_RX_INTEN_SET 0x468 +#define NETSEC_REG_NRM_RX_INTEN_CLR 0x46c +#define NRM_RX_ST_RC_ERR BIT(16) +#define NRM_RX_ST_PKTCNT BIT(15) +#define NRM_RX_ST_TMREXP BIT(14) + +#define NETSEC_REG_PKT_CMD_BUF 0xd0 + +#define NETSEC_REG_CLK_EN 0x100 + +#define NETSEC_REG_PKT_CTRL 0x140 + +#define NETSEC_REG_DMA_TMR_CTRL 0x20c +#define NETSEC_REG_F_TAIKI_MC_VER 0x22c +#define NETSEC_REG_F_TAIKI_VER 0x230 +#define NETSEC_REG_DMA_HM_CTRL 0x214 +#define NETSEC_REG_DMA_MH_CTRL 0x220 +#define NETSEC_REG_ADDR_DIS_CORE 0x218 +#define NETSEC_REG_DMAC_HM_CMD_BUF 0x210 +#define NETSEC_REG_DMAC_MH_CMD_BUF 0x21c + +#define NETSEC_REG_NRM_TX_PKTCNT 0x410 + +#define NETSEC_REG_NRM_TX_DONE_PKTCNT 0x414 +#define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT 0x418 + +#define NETSEC_REG_NRM_TX_TMR 0x41c + +#define NETSEC_REG_NRM_RX_PKTCNT 0x454 +#define NETSEC_REG_NRM_RX_RXINT_PKTCNT 0x458 +#define NETSEC_REG_NRM_TX_TXINT_TMR 0x420 +#define NETSEC_REG_NRM_RX_RXINT_TMR 0x460 + +#define NETSEC_REG_NRM_RX_TMR 0x45c + +#define NETSEC_REG_NRM_TX_DESC_START_UP 0x434 +#define NETSEC_REG_NRM_TX_DESC_START_LW 0x408 +#define NETSEC_REG_NRM_RX_DESC_START_UP 0x474 +#define NETSEC_REG_NRM_RX_DESC_START_LW 0x448 + +#define NETSEC_REG_NRM_TX_CONFIG 0x430 +#define NETSEC_REG_NRM_RX_CONFIG 0x470 + +#define MAC_REG_STATUS 0x1024 +#define MAC_REG_DATA 0x11c0 +#define MAC_REG_CMD 0x11c4 +#define MAC_REG_FLOW_TH 0x11cc +#define MAC_REG_INTF_SEL 0x11d4 +#define MAC_REG_DESC_INIT 0x11fc +#define MAC_REG_DESC_SOFT_RST 0x1204 +#define NETSEC_REG_MODE_TRANS_COMP_STATUS 0x500 + +#define GMAC_REG_MCR 0x0000 +#define GMAC_REG_MFFR 0x0004 +#define GMAC_REG_GAR 0x0010 +#define GMAC_REG_GDR 0x0014 +#define GMAC_REG_FCR 0x0018 +#define GMAC_REG_BMR 0x1000 +#define GMAC_REG_RDLAR 0x100c +#define GMAC_REG_TDLAR 0x1010 +#define GMAC_REG_OMR 0x1018 + +#define MHZ(n) ((n) * 1000 * 1000) + +#define NETSEC_TX_SHIFT_OWN_FIELD 31 +#define NETSEC_TX_SHIFT_LD_FIELD 30 +#define NETSEC_TX_SHIFT_DRID_FIELD 24 +#define NETSEC_TX_SHIFT_PT_FIELD 21 +#define NETSEC_TX_SHIFT_TDRID_FIELD 16 +#define NETSEC_TX_SHIFT_CC_FIELD 15 +#define NETSEC_TX_SHIFT_FS_FIELD 9 +#define NETSEC_TX_LAST 8 +#define NETSEC_TX_SHIFT_CO 7 +#define NETSEC_TX_SHIFT_SO 6 +#define NETSEC_TX_SHIFT_TRS_FIELD 4 + +#define NETSEC_RX_PKT_OWN_FIELD 31 +#define NETSEC_RX_PKT_LD_FIELD 30 +#define NETSEC_RX_PKT_SDRID_FIELD 24 +#define NETSEC_RX_PKT_FR_FIELD 23 +#define NETSEC_RX_PKT_ER_FIELD 21 +#define NETSEC_RX_PKT_ERR_FIELD 16 +#define NETSEC_RX_PKT_TDRID_FIELD 12 +#define NETSEC_RX_PKT_FS_FIELD 9 +#define NETSEC_RX_PKT_LS_FIELD 8 +#define NETSEC_RX_PKT_CO_FIELD 6 + +#define NETSEC_RX_PKT_ERR_MASK 3 + +#define NETSEC_MAX_TX_PKT_LEN 1518 +#define NETSEC_MAX_TX_JUMBO_PKT_LEN 9018 + +#define NETSEC_RING_GMAC 15 +#define NETSEC_RING_MAX 2 + +#define NETSEC_TCP_SEG_LEN_MAX 1460 +#define NETSEC_TCP_JUMBO_SEG_LEN_MAX 8960 + +#define NETSEC_RX_CKSUM_NOTAVAIL 0 +#define NETSEC_RX_CKSUM_OK 1 +#define NETSEC_RX_CKSUM_NG 2 + +#define NETSEC_TOP_IRQ_REG_CODE_LOAD_END BIT(20) +#define NETSEC_IRQ_TRANSITION_COMPLETE BIT(4) + +#define NETSEC_MODE_TRANS_COMP_IRQ_N2T BIT(20) +#define NETSEC_MODE_TRANS_COMP_IRQ_T2N BIT(19) + +#define NETSEC_INT_PKTCNT_MAX 2047 + +#define NETSEC_FLOW_START_TH_MAX 95 +#define NETSEC_FLOW_STOP_TH_MAX 95 +#define NETSEC_FLOW_PAUSE_TIME_MIN 5 + +#define NETSEC_CLK_EN_REG_DOM_ALL 0x3f + +#define NETSEC_PKT_CTRL_REG_MODE_NRM BIT(28) +#define NETSEC_PKT_CTRL_REG_EN_JUMBO BIT(27) +#define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER BIT(3) +#define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE BIT(2) +#define NETSEC_PKT_CTRL_REG_LOG_HD_ER BIT(1) +#define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH BIT(0) + +#define NETSEC_CLK_EN_REG_DOM_G BIT(5) +#define NETSEC_CLK_EN_REG_DOM_C BIT(1) +#define NETSEC_CLK_EN_REG_DOM_D BIT(0) + +#define NETSEC_COM_INIT_REG_DB BIT(2) +#define NETSEC_COM_INIT_REG_CLS BIT(1) +#define NETSEC_COM_INIT_REG_ALL (NETSEC_COM_INIT_REG_CLS | \ + NETSEC_COM_INIT_REG_DB) + +#define NETSEC_SOFT_RST_REG_RESET 0 +#define NETSEC_SOFT_RST_REG_RUN BIT(31) + +#define NETSEC_DMA_CTRL_REG_STOP 1 +#define MH_CTRL__MODE_TRANS BIT(20) + +#define NETSEC_GMAC_CMD_ST_READ 0 +#define NETSEC_GMAC_CMD_ST_WRITE BIT(28) +#define NETSEC_GMAC_CMD_ST_BUSY BIT(31) + +#define NETSEC_GMAC_BMR_REG_COMMON 0x00412080 +#define NETSEC_GMAC_BMR_REG_RESET 0x00020181 +#define NETSEC_GMAC_BMR_REG_SWR 0x00000001 + +#define NETSEC_GMAC_OMR_REG_ST BIT(13) +#define NETSEC_GMAC_OMR_REG_SR BIT(1) + +#define NETSEC_GMAC_MCR_REG_IBN BIT(30) +#define NETSEC_GMAC_MCR_REG_CST BIT(25) +#define NETSEC_GMAC_MCR_REG_JE BIT(20) +#define NETSEC_MCR_PS BIT(15) +#define NETSEC_GMAC_MCR_REG_FES BIT(14) +#define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON 0x0000280c +#define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON 0x0001a00c + +#define NETSEC_FCR_RFE BIT(2) +#define NETSEC_FCR_TFE BIT(1) + +#define NETSEC_GMAC_GAR_REG_GW BIT(1) +#define NETSEC_GMAC_GAR_REG_GB BIT(0) + +#define NETSEC_GMAC_GAR_REG_SHIFT_PA 11 +#define NETSEC_GMAC_GAR_REG_SHIFT_GR 6 +#define GMAC_REG_SHIFT_CR_GAR 2 + +#define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ 2 +#define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ 3 +#define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ 0 +#define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ 1 +#define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ 4 +#define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ 5 + +#define NETSEC_GMAC_RDLAR_REG_COMMON 0x18000 +#define NETSEC_GMAC_TDLAR_REG_COMMON 0x1c000 + +#define NETSEC_REG_NETSEC_VER_F_TAIKI 0x50000 + +#define NETSEC_REG_DESC_RING_CONFIG_CFG_UP BIT(31) +#define NETSEC_REG_DESC_RING_CONFIG_CH_RST BIT(30) +#define NETSEC_REG_DESC_TMR_MODE 4 +#define NETSEC_REG_DESC_ENDIAN 0 + +#define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST 1 +#define NETSEC_MAC_DESC_INIT_REG_INIT 1 + +#define NETSEC_EEPROM_MAC_ADDRESS 0x00 +#define NETSEC_EEPROM_HM_ME_ADDRESS_H 0x08 +#define NETSEC_EEPROM_HM_ME_ADDRESS_L 0x0C +#define NETSEC_EEPROM_HM_ME_SIZE 0x10 +#define NETSEC_EEPROM_MH_ME_ADDRESS_H 0x14 +#define NETSEC_EEPROM_MH_ME_ADDRESS_L 0x18 +#define NETSEC_EEPROM_MH_ME_SIZE 0x1C +#define NETSEC_EEPROM_PKT_ME_ADDRESS 0x20 +#define NETSEC_EEPROM_PKT_ME_SIZE 0x24 + +#define DESC_NUM 256 + +#define NETSEC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#define NETSEC_RXBUF_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \ + NET_IP_ALIGN) +#define NETSEC_RX_BUF_NON_DATA (NETSEC_RXBUF_HEADROOM + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define NETSEC_RX_BUF_SIZE (PAGE_SIZE - NETSEC_RX_BUF_NON_DATA) + +#define DESC_SZ sizeof(struct netsec_de) + +#define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000) + +#define NETSEC_XDP_PASS 0 +#define NETSEC_XDP_CONSUMED BIT(0) +#define NETSEC_XDP_TX BIT(1) +#define NETSEC_XDP_REDIR BIT(2) + +enum ring_id { + NETSEC_RING_TX = 0, + NETSEC_RING_RX +}; + +enum buf_type { + TYPE_NETSEC_SKB = 0, + TYPE_NETSEC_XDP_TX, + TYPE_NETSEC_XDP_NDO, +}; + +struct netsec_desc { + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; + dma_addr_t dma_addr; + void *addr; + u16 len; + u8 buf_type; +}; + +struct netsec_desc_ring { + dma_addr_t desc_dma; + struct netsec_desc *desc; + void *vaddr; + u16 head, tail; + u16 xdp_xmit; /* netsec_xdp_xmit packets */ + struct page_pool *page_pool; + struct xdp_rxq_info xdp_rxq; + spinlock_t lock; /* XDP tx queue locking */ +}; + +struct netsec_priv { + struct netsec_desc_ring desc_ring[NETSEC_RING_MAX]; + struct ethtool_coalesce et_coalesce; + struct bpf_prog *xdp_prog; + spinlock_t reglock; /* protect reg access */ + struct napi_struct napi; + phy_interface_t phy_interface; + struct net_device *ndev; + struct device_node *phy_np; + struct phy_device *phydev; + struct mii_bus *mii_bus; + void __iomem *ioaddr; + void __iomem *eeprom_base; + struct device *dev; + struct clk *clk; + u32 msg_enable; + u32 freq; + u32 phy_addr; + bool rx_cksum_offload_flag; +}; + +struct netsec_de { /* Netsec Descriptor layout */ + u32 attr; + u32 data_buf_addr_up; + u32 data_buf_addr_lw; + u32 buf_len_info; +}; + +struct netsec_tx_pkt_ctrl { + u16 tcp_seg_len; + bool tcp_seg_offload_flag; + bool cksum_offload_flag; +}; + +struct netsec_rx_pkt_info { + int rx_cksum_result; + int err_code; + bool err_flag; +}; + +static void netsec_write(struct netsec_priv *priv, u32 reg_addr, u32 val) +{ + writel(val, priv->ioaddr + reg_addr); +} + +static u32 netsec_read(struct netsec_priv *priv, u32 reg_addr) +{ + return readl(priv->ioaddr + reg_addr); +} + +/************* MDIO BUS OPS FOLLOW *************/ + +#define TIMEOUT_SPINS_MAC 1000 +#define TIMEOUT_SECONDARY_MS_MAC 100 + +static u32 netsec_clk_type(u32 freq) +{ + if (freq < MHZ(35)) + return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ; + if (freq < MHZ(60)) + return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ; + if (freq < MHZ(100)) + return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ; + if (freq < MHZ(150)) + return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ; + if (freq < MHZ(250)) + return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ; + + return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ; +} + +static int netsec_wait_while_busy(struct netsec_priv *priv, u32 addr, u32 mask) +{ + u32 timeout = TIMEOUT_SPINS_MAC; + + while (--timeout && netsec_read(priv, addr) & mask) + cpu_relax(); + if (timeout) + return 0; + + timeout = TIMEOUT_SECONDARY_MS_MAC; + while (--timeout && netsec_read(priv, addr) & mask) + usleep_range(1000, 2000); + + if (timeout) + return 0; + + netdev_WARN(priv->ndev, "%s: timeout\n", __func__); + + return -ETIMEDOUT; +} + +static int netsec_mac_write(struct netsec_priv *priv, u32 addr, u32 value) +{ + netsec_write(priv, MAC_REG_DATA, value); + netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_WRITE); + return netsec_wait_while_busy(priv, + MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY); +} + +static int netsec_mac_read(struct netsec_priv *priv, u32 addr, u32 *read) +{ + int ret; + + netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_READ); + ret = netsec_wait_while_busy(priv, + MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY); + if (ret) + return ret; + + *read = netsec_read(priv, MAC_REG_DATA); + + return 0; +} + +static int netsec_mac_wait_while_busy(struct netsec_priv *priv, + u32 addr, u32 mask) +{ + u32 timeout = TIMEOUT_SPINS_MAC; + int ret, data; + + do { + ret = netsec_mac_read(priv, addr, &data); + if (ret) + break; + cpu_relax(); + } while (--timeout && (data & mask)); + + if (timeout) + return 0; + + timeout = TIMEOUT_SECONDARY_MS_MAC; + do { + usleep_range(1000, 2000); + + ret = netsec_mac_read(priv, addr, &data); + if (ret) + break; + cpu_relax(); + } while (--timeout && (data & mask)); + + if (timeout && !ret) + return 0; + + netdev_WARN(priv->ndev, "%s: timeout\n", __func__); + + return -ETIMEDOUT; +} + +static int netsec_mac_update_to_phy_state(struct netsec_priv *priv) +{ + struct phy_device *phydev = priv->ndev->phydev; + u32 value = 0; + + value = phydev->duplex ? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON : + NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON; + + if (phydev->speed != SPEED_1000) + value |= NETSEC_MCR_PS; + + if (priv->phy_interface != PHY_INTERFACE_MODE_GMII && + phydev->speed == SPEED_100) + value |= NETSEC_GMAC_MCR_REG_FES; + + value |= NETSEC_GMAC_MCR_REG_CST | NETSEC_GMAC_MCR_REG_JE; + + if (phy_interface_mode_is_rgmii(priv->phy_interface)) + value |= NETSEC_GMAC_MCR_REG_IBN; + + if (netsec_mac_write(priv, GMAC_REG_MCR, value)) + return -ETIMEDOUT; + + return 0; +} + +static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr); + +static int netsec_phy_write(struct mii_bus *bus, + int phy_addr, int reg, u16 val) +{ + int status; + struct netsec_priv *priv = bus->priv; + + if (netsec_mac_write(priv, GMAC_REG_GDR, val)) + return -ETIMEDOUT; + if (netsec_mac_write(priv, GMAC_REG_GAR, + phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA | + reg << NETSEC_GMAC_GAR_REG_SHIFT_GR | + NETSEC_GMAC_GAR_REG_GW | NETSEC_GMAC_GAR_REG_GB | + (netsec_clk_type(priv->freq) << + GMAC_REG_SHIFT_CR_GAR))) + return -ETIMEDOUT; + + status = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR, + NETSEC_GMAC_GAR_REG_GB); + + /* Developerbox implements RTL8211E PHY and there is + * a compatibility problem with F_GMAC4. + * RTL8211E expects MDC clock must be kept toggling for several + * clock cycle with MDIO high before entering the IDLE state. + * To meet this requirement, netsec driver needs to issue dummy + * read(e.g. read PHYID1(offset 0x2) register) right after write. + */ + netsec_phy_read(bus, phy_addr, MII_PHYSID1); + + return status; +} + +static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr) +{ + struct netsec_priv *priv = bus->priv; + u32 data; + int ret; + + if (netsec_mac_write(priv, GMAC_REG_GAR, NETSEC_GMAC_GAR_REG_GB | + phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA | + reg_addr << NETSEC_GMAC_GAR_REG_SHIFT_GR | + (netsec_clk_type(priv->freq) << + GMAC_REG_SHIFT_CR_GAR))) + return -ETIMEDOUT; + + ret = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR, + NETSEC_GMAC_GAR_REG_GB); + if (ret) + return ret; + + ret = netsec_mac_read(priv, GMAC_REG_GDR, &data); + if (ret) + return ret; + + return data; +} + +/************* ETHTOOL_OPS FOLLOW *************/ + +static void netsec_et_get_drvinfo(struct net_device *net_device, + struct ethtool_drvinfo *info) +{ + strscpy(info->driver, "netsec", sizeof(info->driver)); + strscpy(info->bus_info, dev_name(net_device->dev.parent), + sizeof(info->bus_info)); +} + +static int netsec_et_get_coalesce(struct net_device *net_device, + struct ethtool_coalesce *et_coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct netsec_priv *priv = netdev_priv(net_device); + + *et_coalesce = priv->et_coalesce; + + return 0; +} + +static int netsec_et_set_coalesce(struct net_device *net_device, + struct ethtool_coalesce *et_coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct netsec_priv *priv = netdev_priv(net_device); + + priv->et_coalesce = *et_coalesce; + + if (priv->et_coalesce.tx_coalesce_usecs < 50) + priv->et_coalesce.tx_coalesce_usecs = 50; + if (priv->et_coalesce.tx_max_coalesced_frames < 1) + priv->et_coalesce.tx_max_coalesced_frames = 1; + + netsec_write(priv, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT, + priv->et_coalesce.tx_max_coalesced_frames); + netsec_write(priv, NETSEC_REG_NRM_TX_TXINT_TMR, + priv->et_coalesce.tx_coalesce_usecs); + netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TXDONE); + netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TMREXP); + + if (priv->et_coalesce.rx_coalesce_usecs < 50) + priv->et_coalesce.rx_coalesce_usecs = 50; + if (priv->et_coalesce.rx_max_coalesced_frames < 1) + priv->et_coalesce.rx_max_coalesced_frames = 1; + + netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_PKTCNT, + priv->et_coalesce.rx_max_coalesced_frames); + netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_TMR, + priv->et_coalesce.rx_coalesce_usecs); + netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_PKTCNT); + netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_TMREXP); + + return 0; +} + +static u32 netsec_et_get_msglevel(struct net_device *dev) +{ + struct netsec_priv *priv = netdev_priv(dev); + + return priv->msg_enable; +} + +static void netsec_et_set_msglevel(struct net_device *dev, u32 datum) +{ + struct netsec_priv *priv = netdev_priv(dev); + + priv->msg_enable = datum; +} + +static const struct ethtool_ops netsec_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES, + .get_drvinfo = netsec_et_get_drvinfo, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_link = ethtool_op_get_link, + .get_coalesce = netsec_et_get_coalesce, + .set_coalesce = netsec_et_set_coalesce, + .get_msglevel = netsec_et_get_msglevel, + .set_msglevel = netsec_et_set_msglevel, +}; + +/************* NETDEV_OPS FOLLOW *************/ + + +static void netsec_set_rx_de(struct netsec_priv *priv, + struct netsec_desc_ring *dring, u16 idx, + const struct netsec_desc *desc) +{ + struct netsec_de *de = dring->vaddr + DESC_SZ * idx; + u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) | + (1 << NETSEC_RX_PKT_FS_FIELD) | + (1 << NETSEC_RX_PKT_LS_FIELD); + + if (idx == DESC_NUM - 1) + attr |= (1 << NETSEC_RX_PKT_LD_FIELD); + + de->data_buf_addr_up = upper_32_bits(desc->dma_addr); + de->data_buf_addr_lw = lower_32_bits(desc->dma_addr); + de->buf_len_info = desc->len; + de->attr = attr; + dma_wmb(); + + dring->desc[idx].dma_addr = desc->dma_addr; + dring->desc[idx].addr = desc->addr; + dring->desc[idx].len = desc->len; +} + +static bool netsec_clean_tx_dring(struct netsec_priv *priv) +{ + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; + struct xdp_frame_bulk bq; + struct netsec_de *entry; + int tail = dring->tail; + unsigned int bytes; + int cnt = 0; + + spin_lock(&dring->lock); + + bytes = 0; + xdp_frame_bulk_init(&bq); + entry = dring->vaddr + DESC_SZ * tail; + + rcu_read_lock(); /* need for xdp_return_frame_bulk */ + + while (!(entry->attr & (1U << NETSEC_TX_SHIFT_OWN_FIELD)) && + cnt < DESC_NUM) { + struct netsec_desc *desc; + int eop; + + desc = &dring->desc[tail]; + eop = (entry->attr >> NETSEC_TX_LAST) & 1; + dma_rmb(); + + /* if buf_type is either TYPE_NETSEC_SKB or + * TYPE_NETSEC_XDP_NDO we mapped it + */ + if (desc->buf_type != TYPE_NETSEC_XDP_TX) + dma_unmap_single(priv->dev, desc->dma_addr, desc->len, + DMA_TO_DEVICE); + + if (!eop) + goto next; + + if (desc->buf_type == TYPE_NETSEC_SKB) { + bytes += desc->skb->len; + dev_kfree_skb(desc->skb); + } else { + bytes += desc->xdpf->len; + if (desc->buf_type == TYPE_NETSEC_XDP_TX) + xdp_return_frame_rx_napi(desc->xdpf); + else + xdp_return_frame_bulk(desc->xdpf, &bq); + } +next: + /* clean up so netsec_uninit_pkt_dring() won't free the skb + * again + */ + *desc = (struct netsec_desc){}; + + /* entry->attr is not going to be accessed by the NIC until + * netsec_set_tx_de() is called. No need for a dma_wmb() here + */ + entry->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD; + /* move tail ahead */ + dring->tail = (tail + 1) % DESC_NUM; + + tail = dring->tail; + entry = dring->vaddr + DESC_SZ * tail; + cnt++; + } + xdp_flush_frame_bulk(&bq); + + rcu_read_unlock(); + + spin_unlock(&dring->lock); + + if (!cnt) + return false; + + /* reading the register clears the irq */ + netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT); + + priv->ndev->stats.tx_packets += cnt; + priv->ndev->stats.tx_bytes += bytes; + + netdev_completed_queue(priv->ndev, cnt, bytes); + + return true; +} + +static void netsec_process_tx(struct netsec_priv *priv) +{ + struct net_device *ndev = priv->ndev; + bool cleaned; + + cleaned = netsec_clean_tx_dring(priv); + + if (cleaned && netif_queue_stopped(ndev)) { + /* Make sure we update the value, anyone stopping the queue + * after this will read the proper consumer idx + */ + smp_wmb(); + netif_wake_queue(ndev); + } +} + +static void *netsec_alloc_rx_data(struct netsec_priv *priv, + dma_addr_t *dma_handle, u16 *desc_len) + +{ + + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; + struct page *page; + + page = page_pool_dev_alloc_pages(dring->page_pool); + if (!page) + return NULL; + + /* We allocate the same buffer length for XDP and non-XDP cases. + * page_pool API will map the whole page, skip what's needed for + * network payloads and/or XDP + */ + *dma_handle = page_pool_get_dma_addr(page) + NETSEC_RXBUF_HEADROOM; + /* Make sure the incoming payload fits in the page for XDP and non-XDP + * cases and reserve enough space for headroom + skb_shared_info + */ + *desc_len = NETSEC_RX_BUF_SIZE; + + return page_address(page); +} + +static void netsec_rx_fill(struct netsec_priv *priv, u16 from, u16 num) +{ + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; + u16 idx = from; + + while (num) { + netsec_set_rx_de(priv, dring, idx, &dring->desc[idx]); + idx++; + if (idx >= DESC_NUM) + idx = 0; + num--; + } +} + +static void netsec_xdp_ring_tx_db(struct netsec_priv *priv, u16 pkts) +{ + if (likely(pkts)) + netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, pkts); +} + +static void netsec_finalize_xdp_rx(struct netsec_priv *priv, u32 xdp_res, + u16 pkts) +{ + if (xdp_res & NETSEC_XDP_REDIR) + xdp_do_flush_map(); + + if (xdp_res & NETSEC_XDP_TX) + netsec_xdp_ring_tx_db(priv, pkts); +} + +static void netsec_set_tx_de(struct netsec_priv *priv, + struct netsec_desc_ring *dring, + const struct netsec_tx_pkt_ctrl *tx_ctrl, + const struct netsec_desc *desc, void *buf) +{ + int idx = dring->head; + struct netsec_de *de; + u32 attr; + + de = dring->vaddr + (DESC_SZ * idx); + + attr = (1 << NETSEC_TX_SHIFT_OWN_FIELD) | + (1 << NETSEC_TX_SHIFT_PT_FIELD) | + (NETSEC_RING_GMAC << NETSEC_TX_SHIFT_TDRID_FIELD) | + (1 << NETSEC_TX_SHIFT_FS_FIELD) | + (1 << NETSEC_TX_LAST) | + (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) | + (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) | + (1 << NETSEC_TX_SHIFT_TRS_FIELD); + if (idx == DESC_NUM - 1) + attr |= (1 << NETSEC_TX_SHIFT_LD_FIELD); + + de->data_buf_addr_up = upper_32_bits(desc->dma_addr); + de->data_buf_addr_lw = lower_32_bits(desc->dma_addr); + de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len; + de->attr = attr; + + dring->desc[idx] = *desc; + if (desc->buf_type == TYPE_NETSEC_SKB) + dring->desc[idx].skb = buf; + else if (desc->buf_type == TYPE_NETSEC_XDP_TX || + desc->buf_type == TYPE_NETSEC_XDP_NDO) + dring->desc[idx].xdpf = buf; + + /* move head ahead */ + dring->head = (dring->head + 1) % DESC_NUM; +} + +/* The current driver only supports 1 Txq, this should run under spin_lock() */ +static u32 netsec_xdp_queue_one(struct netsec_priv *priv, + struct xdp_frame *xdpf, bool is_ndo) + +{ + struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX]; + struct page *page = virt_to_page(xdpf->data); + struct netsec_tx_pkt_ctrl tx_ctrl = {}; + struct netsec_desc tx_desc; + dma_addr_t dma_handle; + u16 filled; + + if (tx_ring->head >= tx_ring->tail) + filled = tx_ring->head - tx_ring->tail; + else + filled = tx_ring->head + DESC_NUM - tx_ring->tail; + + if (DESC_NUM - filled <= 1) + return NETSEC_XDP_CONSUMED; + + if (is_ndo) { + /* this is for ndo_xdp_xmit, the buffer needs mapping before + * sending + */ + dma_handle = dma_map_single(priv->dev, xdpf->data, xdpf->len, + DMA_TO_DEVICE); + if (dma_mapping_error(priv->dev, dma_handle)) + return NETSEC_XDP_CONSUMED; + tx_desc.buf_type = TYPE_NETSEC_XDP_NDO; + } else { + /* This is the device Rx buffer from page_pool. No need to remap + * just sync and send it + */ + struct netsec_desc_ring *rx_ring = + &priv->desc_ring[NETSEC_RING_RX]; + enum dma_data_direction dma_dir = + page_pool_get_dma_dir(rx_ring->page_pool); + + dma_handle = page_pool_get_dma_addr(page) + xdpf->headroom + + sizeof(*xdpf); + dma_sync_single_for_device(priv->dev, dma_handle, xdpf->len, + dma_dir); + tx_desc.buf_type = TYPE_NETSEC_XDP_TX; + } + + tx_desc.dma_addr = dma_handle; + tx_desc.addr = xdpf->data; + tx_desc.len = xdpf->len; + + netdev_sent_queue(priv->ndev, xdpf->len); + netsec_set_tx_de(priv, tx_ring, &tx_ctrl, &tx_desc, xdpf); + + return NETSEC_XDP_TX; +} + +static u32 netsec_xdp_xmit_back(struct netsec_priv *priv, struct xdp_buff *xdp) +{ + struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX]; + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); + u32 ret; + + if (unlikely(!xdpf)) + return NETSEC_XDP_CONSUMED; + + spin_lock(&tx_ring->lock); + ret = netsec_xdp_queue_one(priv, xdpf, false); + spin_unlock(&tx_ring->lock); + + return ret; +} + +static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog, + struct xdp_buff *xdp) +{ + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; + unsigned int sync, len = xdp->data_end - xdp->data; + u32 ret = NETSEC_XDP_PASS; + struct page *page; + int err; + u32 act; + + act = bpf_prog_run_xdp(prog, xdp); + + /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ + sync = xdp->data_end - xdp->data_hard_start - NETSEC_RXBUF_HEADROOM; + sync = max(sync, len); + + switch (act) { + case XDP_PASS: + ret = NETSEC_XDP_PASS; + break; + case XDP_TX: + ret = netsec_xdp_xmit_back(priv, xdp); + if (ret != NETSEC_XDP_TX) { + page = virt_to_head_page(xdp->data); + page_pool_put_page(dring->page_pool, page, sync, true); + } + break; + case XDP_REDIRECT: + err = xdp_do_redirect(priv->ndev, xdp, prog); + if (!err) { + ret = NETSEC_XDP_REDIR; + } else { + ret = NETSEC_XDP_CONSUMED; + page = virt_to_head_page(xdp->data); + page_pool_put_page(dring->page_pool, page, sync, true); + } + break; + default: + bpf_warn_invalid_xdp_action(priv->ndev, prog, act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(priv->ndev, prog, act); + fallthrough; /* handle aborts by dropping packet */ + case XDP_DROP: + ret = NETSEC_XDP_CONSUMED; + page = virt_to_head_page(xdp->data); + page_pool_put_page(dring->page_pool, page, sync, true); + break; + } + + return ret; +} + +static int netsec_process_rx(struct netsec_priv *priv, int budget) +{ + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; + struct net_device *ndev = priv->ndev; + struct netsec_rx_pkt_info rx_info; + enum dma_data_direction dma_dir; + struct bpf_prog *xdp_prog; + struct xdp_buff xdp; + u16 xdp_xmit = 0; + u32 xdp_act = 0; + int done = 0; + + xdp_init_buff(&xdp, PAGE_SIZE, &dring->xdp_rxq); + + xdp_prog = READ_ONCE(priv->xdp_prog); + dma_dir = page_pool_get_dma_dir(dring->page_pool); + + while (done < budget) { + u16 idx = dring->tail; + struct netsec_de *de = dring->vaddr + (DESC_SZ * idx); + struct netsec_desc *desc = &dring->desc[idx]; + struct page *page = virt_to_page(desc->addr); + u32 xdp_result = NETSEC_XDP_PASS; + struct sk_buff *skb = NULL; + u16 pkt_len, desc_len; + dma_addr_t dma_handle; + void *buf_addr; + + if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) { + /* reading the register clears the irq */ + netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT); + break; + } + + /* This barrier is needed to keep us from reading + * any other fields out of the netsec_de until we have + * verified the descriptor has been written back + */ + dma_rmb(); + done++; + + pkt_len = de->buf_len_info >> 16; + rx_info.err_code = (de->attr >> NETSEC_RX_PKT_ERR_FIELD) & + NETSEC_RX_PKT_ERR_MASK; + rx_info.err_flag = (de->attr >> NETSEC_RX_PKT_ER_FIELD) & 1; + if (rx_info.err_flag) { + netif_err(priv, drv, priv->ndev, + "%s: rx fail err(%d)\n", __func__, + rx_info.err_code); + ndev->stats.rx_dropped++; + dring->tail = (dring->tail + 1) % DESC_NUM; + /* reuse buffer page frag */ + netsec_rx_fill(priv, idx, 1); + continue; + } + rx_info.rx_cksum_result = + (de->attr >> NETSEC_RX_PKT_CO_FIELD) & 3; + + /* allocate a fresh buffer and map it to the hardware. + * This will eventually replace the old buffer in the hardware + */ + buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len); + + if (unlikely(!buf_addr)) + break; + + dma_sync_single_for_cpu(priv->dev, desc->dma_addr, pkt_len, + dma_dir); + prefetch(desc->addr); + + xdp_prepare_buff(&xdp, desc->addr, NETSEC_RXBUF_HEADROOM, + pkt_len, false); + + if (xdp_prog) { + xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp); + if (xdp_result != NETSEC_XDP_PASS) { + xdp_act |= xdp_result; + if (xdp_result == NETSEC_XDP_TX) + xdp_xmit++; + goto next; + } + } + skb = build_skb(desc->addr, desc->len + NETSEC_RX_BUF_NON_DATA); + + if (unlikely(!skb)) { + /* If skb fails recycle_direct will either unmap and + * free the page or refill the cache depending on the + * cache state. Since we paid the allocation cost if + * building an skb fails try to put the page into cache + */ + page_pool_put_page(dring->page_pool, page, pkt_len, + true); + netif_err(priv, drv, priv->ndev, + "rx failed to build skb\n"); + break; + } + skb_mark_for_recycle(skb); + + skb_reserve(skb, xdp.data - xdp.data_hard_start); + skb_put(skb, xdp.data_end - xdp.data); + skb->protocol = eth_type_trans(skb, priv->ndev); + + if (priv->rx_cksum_offload_flag && + rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK) + skb->ip_summed = CHECKSUM_UNNECESSARY; + +next: + if (skb) + napi_gro_receive(&priv->napi, skb); + if (skb || xdp_result) { + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += xdp.data_end - xdp.data; + } + + /* Update the descriptor with fresh buffers */ + desc->len = desc_len; + desc->dma_addr = dma_handle; + desc->addr = buf_addr; + + netsec_rx_fill(priv, idx, 1); + dring->tail = (dring->tail + 1) % DESC_NUM; + } + netsec_finalize_xdp_rx(priv, xdp_act, xdp_xmit); + + return done; +} + +static int netsec_napi_poll(struct napi_struct *napi, int budget) +{ + struct netsec_priv *priv; + int done; + + priv = container_of(napi, struct netsec_priv, napi); + + netsec_process_tx(priv); + done = netsec_process_rx(priv, budget); + + if (done < budget && napi_complete_done(napi, done)) { + unsigned long flags; + + spin_lock_irqsave(&priv->reglock, flags); + netsec_write(priv, NETSEC_REG_INTEN_SET, + NETSEC_IRQ_RX | NETSEC_IRQ_TX); + spin_unlock_irqrestore(&priv->reglock, flags); + } + + return done; +} + + +static int netsec_desc_used(struct netsec_desc_ring *dring) +{ + int used; + + if (dring->head >= dring->tail) + used = dring->head - dring->tail; + else + used = dring->head + DESC_NUM - dring->tail; + + return used; +} + +static int netsec_check_stop_tx(struct netsec_priv *priv, int used) +{ + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; + + /* keep tail from touching the queue */ + if (DESC_NUM - used < 2) { + netif_stop_queue(priv->ndev); + + /* Make sure we read the updated value in case + * descriptors got freed + */ + smp_rmb(); + + used = netsec_desc_used(dring); + if (DESC_NUM - used < 2) + return NETDEV_TX_BUSY; + + netif_wake_queue(priv->ndev); + } + + return 0; +} + +static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct netsec_priv *priv = netdev_priv(ndev); + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; + struct netsec_tx_pkt_ctrl tx_ctrl = {}; + struct netsec_desc tx_desc; + u16 tso_seg_len = 0; + int filled; + + spin_lock_bh(&dring->lock); + filled = netsec_desc_used(dring); + if (netsec_check_stop_tx(priv, filled)) { + spin_unlock_bh(&dring->lock); + net_warn_ratelimited("%s %s Tx queue full\n", + dev_name(priv->dev), ndev->name); + return NETDEV_TX_BUSY; + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) + tx_ctrl.cksum_offload_flag = true; + + if (skb_is_gso(skb)) + tso_seg_len = skb_shinfo(skb)->gso_size; + + if (tso_seg_len > 0) { + if (skb->protocol == htons(ETH_P_IP)) { + ip_hdr(skb)->tot_len = 0; + tcp_hdr(skb)->check = + ~tcp_v4_check(0, ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, 0); + } else { + tcp_v6_gso_csum_prep(skb); + } + + tx_ctrl.tcp_seg_offload_flag = true; + tx_ctrl.tcp_seg_len = tso_seg_len; + } + + tx_desc.dma_addr = dma_map_single(priv->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) { + spin_unlock_bh(&dring->lock); + netif_err(priv, drv, priv->ndev, + "%s: DMA mapping failed\n", __func__); + ndev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + tx_desc.addr = skb->data; + tx_desc.len = skb_headlen(skb); + tx_desc.buf_type = TYPE_NETSEC_SKB; + + skb_tx_timestamp(skb); + netdev_sent_queue(priv->ndev, skb->len); + + netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb); + spin_unlock_bh(&dring->lock); + netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */ + + return NETDEV_TX_OK; +} + +static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id) +{ + struct netsec_desc_ring *dring = &priv->desc_ring[id]; + struct netsec_desc *desc; + u16 idx; + + if (!dring->vaddr || !dring->desc) + return; + for (idx = 0; idx < DESC_NUM; idx++) { + desc = &dring->desc[idx]; + if (!desc->addr) + continue; + + if (id == NETSEC_RING_RX) { + struct page *page = virt_to_page(desc->addr); + + page_pool_put_full_page(dring->page_pool, page, false); + } else if (id == NETSEC_RING_TX) { + dma_unmap_single(priv->dev, desc->dma_addr, desc->len, + DMA_TO_DEVICE); + dev_kfree_skb(desc->skb); + } + } + + /* Rx is currently using page_pool */ + if (id == NETSEC_RING_RX) { + if (xdp_rxq_info_is_reg(&dring->xdp_rxq)) + xdp_rxq_info_unreg(&dring->xdp_rxq); + page_pool_destroy(dring->page_pool); + } + + memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM); + memset(dring->vaddr, 0, DESC_SZ * DESC_NUM); + + dring->head = 0; + dring->tail = 0; + + if (id == NETSEC_RING_TX) + netdev_reset_queue(priv->ndev); +} + +static void netsec_free_dring(struct netsec_priv *priv, int id) +{ + struct netsec_desc_ring *dring = &priv->desc_ring[id]; + + if (dring->vaddr) { + dma_free_coherent(priv->dev, DESC_SZ * DESC_NUM, + dring->vaddr, dring->desc_dma); + dring->vaddr = NULL; + } + + kfree(dring->desc); + dring->desc = NULL; +} + +static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id) +{ + struct netsec_desc_ring *dring = &priv->desc_ring[id]; + + dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM, + &dring->desc_dma, GFP_KERNEL); + if (!dring->vaddr) + goto err; + + dring->desc = kcalloc(DESC_NUM, sizeof(*dring->desc), GFP_KERNEL); + if (!dring->desc) + goto err; + + return 0; +err: + netsec_free_dring(priv, id); + + return -ENOMEM; +} + +static void netsec_setup_tx_dring(struct netsec_priv *priv) +{ + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; + int i; + + for (i = 0; i < DESC_NUM; i++) { + struct netsec_de *de; + + de = dring->vaddr + (DESC_SZ * i); + /* de->attr is not going to be accessed by the NIC + * until netsec_set_tx_de() is called. + * No need for a dma_wmb() here + */ + de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD; + } +} + +static int netsec_setup_rx_dring(struct netsec_priv *priv) +{ + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; + struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog); + struct page_pool_params pp_params = { + .order = 0, + /* internal DMA mapping in page_pool */ + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .pool_size = DESC_NUM, + .nid = NUMA_NO_NODE, + .dev = priv->dev, + .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, + .offset = NETSEC_RXBUF_HEADROOM, + .max_len = NETSEC_RX_BUF_SIZE, + }; + int i, err; + + dring->page_pool = page_pool_create(&pp_params); + if (IS_ERR(dring->page_pool)) { + err = PTR_ERR(dring->page_pool); + dring->page_pool = NULL; + goto err_out; + } + + err = xdp_rxq_info_reg(&dring->xdp_rxq, priv->ndev, 0, priv->napi.napi_id); + if (err) + goto err_out; + + err = xdp_rxq_info_reg_mem_model(&dring->xdp_rxq, MEM_TYPE_PAGE_POOL, + dring->page_pool); + if (err) + goto err_out; + + for (i = 0; i < DESC_NUM; i++) { + struct netsec_desc *desc = &dring->desc[i]; + dma_addr_t dma_handle; + void *buf; + u16 len; + + buf = netsec_alloc_rx_data(priv, &dma_handle, &len); + + if (!buf) { + err = -ENOMEM; + goto err_out; + } + desc->dma_addr = dma_handle; + desc->addr = buf; + desc->len = len; + } + + netsec_rx_fill(priv, 0, DESC_NUM); + + return 0; + +err_out: + netsec_uninit_pkt_dring(priv, NETSEC_RING_RX); + return err; +} + +static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg, + u32 addr_h, u32 addr_l, u32 size) +{ + u64 base = (u64)addr_h << 32 | addr_l; + void __iomem *ucode; + u32 i; + + ucode = ioremap(base, size * sizeof(u32)); + if (!ucode) + return -ENOMEM; + + for (i = 0; i < size; i++) + netsec_write(priv, reg, readl(ucode + i * 4)); + + iounmap(ucode); + return 0; +} + +static int netsec_netdev_load_microcode(struct netsec_priv *priv) +{ + u32 addr_h, addr_l, size; + int err; + + addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_H); + addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_L); + size = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_SIZE); + err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_HM_CMD_BUF, + addr_h, addr_l, size); + if (err) + return err; + + addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_H); + addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_L); + size = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_SIZE); + err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_MH_CMD_BUF, + addr_h, addr_l, size); + if (err) + return err; + + addr_h = 0; + addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_ADDRESS); + size = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_SIZE); + err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_PKT_CMD_BUF, + addr_h, addr_l, size); + if (err) + return err; + + return 0; +} + +static int netsec_reset_hardware(struct netsec_priv *priv, + bool load_ucode) +{ + u32 value; + int err; + + /* stop DMA engines */ + if (!netsec_read(priv, NETSEC_REG_ADDR_DIS_CORE)) { + netsec_write(priv, NETSEC_REG_DMA_HM_CTRL, + NETSEC_DMA_CTRL_REG_STOP); + netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, + NETSEC_DMA_CTRL_REG_STOP); + + while (netsec_read(priv, NETSEC_REG_DMA_HM_CTRL) & + NETSEC_DMA_CTRL_REG_STOP) + cpu_relax(); + + while (netsec_read(priv, NETSEC_REG_DMA_MH_CTRL) & + NETSEC_DMA_CTRL_REG_STOP) + cpu_relax(); + } + + netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RESET); + netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RUN); + netsec_write(priv, NETSEC_REG_COM_INIT, NETSEC_COM_INIT_REG_ALL); + + while (netsec_read(priv, NETSEC_REG_COM_INIT) != 0) + cpu_relax(); + + /* set desc_start addr */ + netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_UP, + upper_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma)); + netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_LW, + lower_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma)); + + netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_UP, + upper_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma)); + netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_LW, + lower_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma)); + + /* set normal tx dring ring config */ + netsec_write(priv, NETSEC_REG_NRM_TX_CONFIG, + 1 << NETSEC_REG_DESC_ENDIAN); + netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG, + 1 << NETSEC_REG_DESC_ENDIAN); + + if (load_ucode) { + err = netsec_netdev_load_microcode(priv); + if (err) { + netif_err(priv, probe, priv->ndev, + "%s: failed to load microcode (%d)\n", + __func__, err); + return err; + } + } + + /* start DMA engines */ + netsec_write(priv, NETSEC_REG_DMA_TMR_CTRL, priv->freq / 1000000 - 1); + netsec_write(priv, NETSEC_REG_ADDR_DIS_CORE, 0); + + usleep_range(1000, 2000); + + if (!(netsec_read(priv, NETSEC_REG_TOP_STATUS) & + NETSEC_TOP_IRQ_REG_CODE_LOAD_END)) { + netif_err(priv, probe, priv->ndev, + "microengine start failed\n"); + return -ENXIO; + } + netsec_write(priv, NETSEC_REG_TOP_STATUS, + NETSEC_TOP_IRQ_REG_CODE_LOAD_END); + + value = NETSEC_PKT_CTRL_REG_MODE_NRM; + if (priv->ndev->mtu > ETH_DATA_LEN) + value |= NETSEC_PKT_CTRL_REG_EN_JUMBO; + + /* change to normal mode */ + netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, MH_CTRL__MODE_TRANS); + netsec_write(priv, NETSEC_REG_PKT_CTRL, value); + + while ((netsec_read(priv, NETSEC_REG_MODE_TRANS_COMP_STATUS) & + NETSEC_MODE_TRANS_COMP_IRQ_T2N) == 0) + cpu_relax(); + + /* clear any pending EMPTY/ERR irq status */ + netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, ~0); + + /* Disable TX & RX intr */ + netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0); + + return 0; +} + +static int netsec_start_gmac(struct netsec_priv *priv) +{ + struct phy_device *phydev = priv->ndev->phydev; + u32 value = 0; + int ret; + + if (phydev->speed != SPEED_1000) + value = (NETSEC_GMAC_MCR_REG_CST | + NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON); + + if (netsec_mac_write(priv, GMAC_REG_MCR, value)) + return -ETIMEDOUT; + if (netsec_mac_write(priv, GMAC_REG_BMR, + NETSEC_GMAC_BMR_REG_RESET)) + return -ETIMEDOUT; + + /* Wait soft reset */ + usleep_range(1000, 5000); + + ret = netsec_mac_read(priv, GMAC_REG_BMR, &value); + if (ret) + return ret; + if (value & NETSEC_GMAC_BMR_REG_SWR) + return -EAGAIN; + + netsec_write(priv, MAC_REG_DESC_SOFT_RST, 1); + if (netsec_wait_while_busy(priv, MAC_REG_DESC_SOFT_RST, 1)) + return -ETIMEDOUT; + + netsec_write(priv, MAC_REG_DESC_INIT, 1); + if (netsec_wait_while_busy(priv, MAC_REG_DESC_INIT, 1)) + return -ETIMEDOUT; + + if (netsec_mac_write(priv, GMAC_REG_BMR, + NETSEC_GMAC_BMR_REG_COMMON)) + return -ETIMEDOUT; + if (netsec_mac_write(priv, GMAC_REG_RDLAR, + NETSEC_GMAC_RDLAR_REG_COMMON)) + return -ETIMEDOUT; + if (netsec_mac_write(priv, GMAC_REG_TDLAR, + NETSEC_GMAC_TDLAR_REG_COMMON)) + return -ETIMEDOUT; + if (netsec_mac_write(priv, GMAC_REG_MFFR, 0x80000001)) + return -ETIMEDOUT; + + ret = netsec_mac_update_to_phy_state(priv); + if (ret) + return ret; + + ret = netsec_mac_read(priv, GMAC_REG_OMR, &value); + if (ret) + return ret; + + value |= NETSEC_GMAC_OMR_REG_SR; + value |= NETSEC_GMAC_OMR_REG_ST; + + netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0); + netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0); + + netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce, NULL, NULL); + + if (netsec_mac_write(priv, GMAC_REG_OMR, value)) + return -ETIMEDOUT; + + return 0; +} + +static int netsec_stop_gmac(struct netsec_priv *priv) +{ + u32 value; + int ret; + + ret = netsec_mac_read(priv, GMAC_REG_OMR, &value); + if (ret) + return ret; + value &= ~NETSEC_GMAC_OMR_REG_SR; + value &= ~NETSEC_GMAC_OMR_REG_ST; + + /* disable all interrupts */ + netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0); + netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0); + + return netsec_mac_write(priv, GMAC_REG_OMR, value); +} + +static void netsec_phy_adjust_link(struct net_device *ndev) +{ + struct netsec_priv *priv = netdev_priv(ndev); + + if (ndev->phydev->link) + netsec_start_gmac(priv); + else + netsec_stop_gmac(priv); + + phy_print_status(ndev->phydev); +} + +static irqreturn_t netsec_irq_handler(int irq, void *dev_id) +{ + struct netsec_priv *priv = dev_id; + u32 val, status = netsec_read(priv, NETSEC_REG_TOP_STATUS); + unsigned long flags; + + /* Disable interrupts */ + if (status & NETSEC_IRQ_TX) { + val = netsec_read(priv, NETSEC_REG_NRM_TX_STATUS); + netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, val); + } + if (status & NETSEC_IRQ_RX) { + val = netsec_read(priv, NETSEC_REG_NRM_RX_STATUS); + netsec_write(priv, NETSEC_REG_NRM_RX_STATUS, val); + } + + spin_lock_irqsave(&priv->reglock, flags); + netsec_write(priv, NETSEC_REG_INTEN_CLR, NETSEC_IRQ_RX | NETSEC_IRQ_TX); + spin_unlock_irqrestore(&priv->reglock, flags); + + napi_schedule(&priv->napi); + + return IRQ_HANDLED; +} + +static int netsec_netdev_open(struct net_device *ndev) +{ + struct netsec_priv *priv = netdev_priv(ndev); + int ret; + + pm_runtime_get_sync(priv->dev); + + netsec_setup_tx_dring(priv); + ret = netsec_setup_rx_dring(priv); + if (ret) { + netif_err(priv, probe, priv->ndev, + "%s: fail setup ring\n", __func__); + goto err1; + } + + ret = request_irq(priv->ndev->irq, netsec_irq_handler, + IRQF_SHARED, "netsec", priv); + if (ret) { + netif_err(priv, drv, priv->ndev, "request_irq failed\n"); + goto err2; + } + + if (dev_of_node(priv->dev)) { + if (!of_phy_connect(priv->ndev, priv->phy_np, + netsec_phy_adjust_link, 0, + priv->phy_interface)) { + netif_err(priv, link, priv->ndev, "missing PHY\n"); + ret = -ENODEV; + goto err3; + } + } else { + ret = phy_connect_direct(priv->ndev, priv->phydev, + netsec_phy_adjust_link, + priv->phy_interface); + if (ret) { + netif_err(priv, link, priv->ndev, + "phy_connect_direct() failed (%d)\n", ret); + goto err3; + } + } + + phy_start(ndev->phydev); + + netsec_start_gmac(priv); + napi_enable(&priv->napi); + netif_start_queue(ndev); + + /* Enable TX+RX intr. */ + netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX | NETSEC_IRQ_TX); + + return 0; +err3: + free_irq(priv->ndev->irq, priv); +err2: + netsec_uninit_pkt_dring(priv, NETSEC_RING_RX); +err1: + pm_runtime_put_sync(priv->dev); + return ret; +} + +static int netsec_netdev_stop(struct net_device *ndev) +{ + int ret; + struct netsec_priv *priv = netdev_priv(ndev); + + netif_stop_queue(priv->ndev); + dma_wmb(); + + napi_disable(&priv->napi); + + netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0); + netsec_stop_gmac(priv); + + free_irq(priv->ndev->irq, priv); + + netsec_uninit_pkt_dring(priv, NETSEC_RING_TX); + netsec_uninit_pkt_dring(priv, NETSEC_RING_RX); + + phy_stop(ndev->phydev); + phy_disconnect(ndev->phydev); + + ret = netsec_reset_hardware(priv, false); + + pm_runtime_put_sync(priv->dev); + + return ret; +} + +static int netsec_netdev_init(struct net_device *ndev) +{ + struct netsec_priv *priv = netdev_priv(ndev); + int ret; + u16 data; + + BUILD_BUG_ON_NOT_POWER_OF_2(DESC_NUM); + + ret = netsec_alloc_dring(priv, NETSEC_RING_TX); + if (ret) + return ret; + + ret = netsec_alloc_dring(priv, NETSEC_RING_RX); + if (ret) + goto err1; + + /* set phy power down */ + data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR); + netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, + data | BMCR_PDOWN); + + ret = netsec_reset_hardware(priv, true); + if (ret) + goto err2; + + /* Restore phy power state */ + netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data); + + spin_lock_init(&priv->desc_ring[NETSEC_RING_TX].lock); + spin_lock_init(&priv->desc_ring[NETSEC_RING_RX].lock); + + return 0; +err2: + netsec_free_dring(priv, NETSEC_RING_RX); +err1: + netsec_free_dring(priv, NETSEC_RING_TX); + return ret; +} + +static void netsec_netdev_uninit(struct net_device *ndev) +{ + struct netsec_priv *priv = netdev_priv(ndev); + + netsec_free_dring(priv, NETSEC_RING_RX); + netsec_free_dring(priv, NETSEC_RING_TX); +} + +static int netsec_netdev_set_features(struct net_device *ndev, + netdev_features_t features) +{ + struct netsec_priv *priv = netdev_priv(ndev); + + priv->rx_cksum_offload_flag = !!(features & NETIF_F_RXCSUM); + + return 0; +} + +static int netsec_xdp_xmit(struct net_device *ndev, int n, + struct xdp_frame **frames, u32 flags) +{ + struct netsec_priv *priv = netdev_priv(ndev); + struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX]; + int i, nxmit = 0; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + spin_lock(&tx_ring->lock); + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + int err; + + err = netsec_xdp_queue_one(priv, xdpf, true); + if (err != NETSEC_XDP_TX) + break; + + tx_ring->xdp_xmit++; + nxmit++; + } + spin_unlock(&tx_ring->lock); + + if (unlikely(flags & XDP_XMIT_FLUSH)) { + netsec_xdp_ring_tx_db(priv, tx_ring->xdp_xmit); + tx_ring->xdp_xmit = 0; + } + + return nxmit; +} + +static int netsec_xdp_setup(struct netsec_priv *priv, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct net_device *dev = priv->ndev; + struct bpf_prog *old_prog; + + /* For now just support only the usual MTU sized frames */ + if (prog && dev->mtu > 1500) { + NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP"); + return -EOPNOTSUPP; + } + + if (netif_running(dev)) + netsec_netdev_stop(dev); + + /* Detach old prog, if any */ + old_prog = xchg(&priv->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + if (netif_running(dev)) + netsec_netdev_open(dev); + + return 0; +} + +static int netsec_xdp(struct net_device *ndev, struct netdev_bpf *xdp) +{ + struct netsec_priv *priv = netdev_priv(ndev); + + switch (xdp->command) { + case XDP_SETUP_PROG: + return netsec_xdp_setup(priv, xdp->prog, xdp->extack); + default: + return -EINVAL; + } +} + +static const struct net_device_ops netsec_netdev_ops = { + .ndo_init = netsec_netdev_init, + .ndo_uninit = netsec_netdev_uninit, + .ndo_open = netsec_netdev_open, + .ndo_stop = netsec_netdev_stop, + .ndo_start_xmit = netsec_netdev_start_xmit, + .ndo_set_features = netsec_netdev_set_features, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_eth_ioctl = phy_do_ioctl, + .ndo_xdp_xmit = netsec_xdp_xmit, + .ndo_bpf = netsec_xdp, +}; + +static int netsec_of_probe(struct platform_device *pdev, + struct netsec_priv *priv, u32 *phy_addr) +{ + int err; + + err = of_get_phy_mode(pdev->dev.of_node, &priv->phy_interface); + if (err) { + dev_err(&pdev->dev, "missing required property 'phy-mode'\n"); + return err; + } + + /* + * SynQuacer is physically configured with TX and RX delays + * but the standard firmware claimed otherwise for a long + * time, ignore it. + */ + if (of_machine_is_compatible("socionext,developer-box") && + priv->phy_interface != PHY_INTERFACE_MODE_RGMII_ID) { + dev_warn(&pdev->dev, "Outdated firmware reports incorrect PHY mode, overriding\n"); + priv->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; + } + + priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); + if (!priv->phy_np) { + dev_err(&pdev->dev, "missing required property 'phy-handle'\n"); + return -EINVAL; + } + + *phy_addr = of_mdio_parse_addr(&pdev->dev, priv->phy_np); + + priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */ + if (IS_ERR(priv->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk), + "phy_ref_clk not found\n"); + priv->freq = clk_get_rate(priv->clk); + + return 0; +} + +static int netsec_acpi_probe(struct platform_device *pdev, + struct netsec_priv *priv, u32 *phy_addr) +{ + int ret; + + if (!IS_ENABLED(CONFIG_ACPI)) + return -ENODEV; + + /* ACPI systems are assumed to configure the PHY in firmware, so + * there is really no need to discover the PHY mode from the DSDT. + * Since firmware is known to exist in the field that configures the + * PHY correctly but passes the wrong mode string in the phy-mode + * device property, we have no choice but to ignore it. + */ + priv->phy_interface = PHY_INTERFACE_MODE_NA; + + ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "missing required property 'phy-channel'\n"); + + ret = device_property_read_u32(&pdev->dev, + "socionext,phy-clock-frequency", + &priv->freq); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "missing required property 'socionext,phy-clock-frequency'\n"); + return 0; +} + +static void netsec_unregister_mdio(struct netsec_priv *priv) +{ + struct phy_device *phydev = priv->phydev; + + if (!dev_of_node(priv->dev) && phydev) { + phy_device_remove(phydev); + phy_device_free(phydev); + } + + mdiobus_unregister(priv->mii_bus); +} + +static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr) +{ + struct mii_bus *bus; + int ret; + + bus = devm_mdiobus_alloc(priv->dev); + if (!bus) + return -ENOMEM; + + snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(priv->dev)); + bus->priv = priv; + bus->name = "SNI NETSEC MDIO"; + bus->read = netsec_phy_read; + bus->write = netsec_phy_write; + bus->parent = priv->dev; + priv->mii_bus = bus; + + if (dev_of_node(priv->dev)) { + struct device_node *mdio_node, *parent = dev_of_node(priv->dev); + + mdio_node = of_get_child_by_name(parent, "mdio"); + if (mdio_node) { + parent = mdio_node; + } else { + /* older f/w doesn't populate the mdio subnode, + * allow relaxed upgrade of f/w in due time. + */ + dev_info(priv->dev, "Upgrade f/w for mdio subnode!\n"); + } + + ret = of_mdiobus_register(bus, parent); + of_node_put(mdio_node); + + if (ret) { + dev_err(priv->dev, "mdiobus register err(%d)\n", ret); + return ret; + } + } else { + /* Mask out all PHYs from auto probing. */ + bus->phy_mask = ~0; + ret = mdiobus_register(bus); + if (ret) { + dev_err(priv->dev, "mdiobus register err(%d)\n", ret); + return ret; + } + + priv->phydev = get_phy_device(bus, phy_addr, false); + if (IS_ERR(priv->phydev)) { + ret = PTR_ERR(priv->phydev); + dev_err(priv->dev, "get_phy_device err(%d)\n", ret); + priv->phydev = NULL; + mdiobus_unregister(bus); + return -ENODEV; + } + + ret = phy_device_register(priv->phydev); + if (ret) { + phy_device_free(priv->phydev); + mdiobus_unregister(bus); + dev_err(priv->dev, + "phy_device_register err(%d)\n", ret); + } + } + + return ret; +} + +static int netsec_probe(struct platform_device *pdev) +{ + struct resource *mmio_res, *eeprom_res; + struct netsec_priv *priv; + u32 hw_ver, phy_addr = 0; + struct net_device *ndev; + int ret; + int irq; + + mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mmio_res) { + dev_err(&pdev->dev, "No MMIO resource found.\n"); + return -ENODEV; + } + + eeprom_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!eeprom_res) { + dev_info(&pdev->dev, "No EEPROM resource found.\n"); + return -ENODEV; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ndev = alloc_etherdev(sizeof(*priv)); + if (!ndev) + return -ENOMEM; + + priv = netdev_priv(ndev); + + spin_lock_init(&priv->reglock); + SET_NETDEV_DEV(ndev, &pdev->dev); + platform_set_drvdata(pdev, priv); + ndev->irq = irq; + priv->dev = &pdev->dev; + priv->ndev = ndev; + + priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | + NETIF_MSG_LINK | NETIF_MSG_PROBE; + + priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start, + resource_size(mmio_res)); + if (!priv->ioaddr) { + dev_err(&pdev->dev, "devm_ioremap() failed\n"); + ret = -ENXIO; + goto free_ndev; + } + + priv->eeprom_base = devm_ioremap(&pdev->dev, eeprom_res->start, + resource_size(eeprom_res)); + if (!priv->eeprom_base) { + dev_err(&pdev->dev, "devm_ioremap() failed for EEPROM\n"); + ret = -ENXIO; + goto free_ndev; + } + + ret = device_get_ethdev_address(&pdev->dev, ndev); + if (ret && priv->eeprom_base) { + void __iomem *macp = priv->eeprom_base + + NETSEC_EEPROM_MAC_ADDRESS; + u8 addr[ETH_ALEN]; + + addr[0] = readb(macp + 3); + addr[1] = readb(macp + 2); + addr[2] = readb(macp + 1); + addr[3] = readb(macp + 0); + addr[4] = readb(macp + 7); + addr[5] = readb(macp + 6); + eth_hw_addr_set(ndev, addr); + } + + if (!is_valid_ether_addr(ndev->dev_addr)) { + dev_warn(&pdev->dev, "No MAC address found, using random\n"); + eth_hw_addr_random(ndev); + } + + if (dev_of_node(&pdev->dev)) + ret = netsec_of_probe(pdev, priv, &phy_addr); + else + ret = netsec_acpi_probe(pdev, priv, &phy_addr); + if (ret) + goto free_ndev; + + priv->phy_addr = phy_addr; + + if (!priv->freq) { + dev_err(&pdev->dev, "missing PHY reference clock frequency\n"); + ret = -ENODEV; + goto free_ndev; + } + + /* default for throughput */ + priv->et_coalesce.rx_coalesce_usecs = 500; + priv->et_coalesce.rx_max_coalesced_frames = 8; + priv->et_coalesce.tx_coalesce_usecs = 500; + priv->et_coalesce.tx_max_coalesced_frames = 8; + + ret = device_property_read_u32(&pdev->dev, "max-frame-size", + &ndev->max_mtu); + if (ret < 0) + ndev->max_mtu = ETH_DATA_LEN; + + /* runtime_pm coverage just for probe, open/close also cover it */ + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + + hw_ver = netsec_read(priv, NETSEC_REG_F_TAIKI_VER); + /* this driver only supports F_TAIKI style NETSEC */ + if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver) != + NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI)) { + ret = -ENODEV; + goto pm_disable; + } + + dev_info(&pdev->dev, "hardware revision %d.%d\n", + hw_ver >> 16, hw_ver & 0xffff); + + netif_napi_add(ndev, &priv->napi, netsec_napi_poll); + + ndev->netdev_ops = &netsec_netdev_ops; + ndev->ethtool_ops = &netsec_ethtool_ops; + + ndev->features |= NETIF_F_HIGHDMA | NETIF_F_RXCSUM | NETIF_F_GSO | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + ndev->hw_features = ndev->features; + + priv->rx_cksum_offload_flag = true; + + ret = netsec_register_mdio(priv, phy_addr); + if (ret) + goto unreg_napi; + + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40))) + dev_warn(&pdev->dev, "Failed to set DMA mask\n"); + + ret = register_netdev(ndev); + if (ret) { + netif_err(priv, probe, ndev, "register_netdev() failed\n"); + goto unreg_mii; + } + + pm_runtime_put_sync(&pdev->dev); + return 0; + +unreg_mii: + netsec_unregister_mdio(priv); +unreg_napi: + netif_napi_del(&priv->napi); +pm_disable: + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); +free_ndev: + free_netdev(ndev); + dev_err(&pdev->dev, "init failed\n"); + + return ret; +} + +static int netsec_remove(struct platform_device *pdev) +{ + struct netsec_priv *priv = platform_get_drvdata(pdev); + + unregister_netdev(priv->ndev); + + netsec_unregister_mdio(priv); + + netif_napi_del(&priv->napi); + + pm_runtime_disable(&pdev->dev); + free_netdev(priv->ndev); + + return 0; +} + +#ifdef CONFIG_PM +static int netsec_runtime_suspend(struct device *dev) +{ + struct netsec_priv *priv = dev_get_drvdata(dev); + + netsec_write(priv, NETSEC_REG_CLK_EN, 0); + + clk_disable_unprepare(priv->clk); + + return 0; +} + +static int netsec_runtime_resume(struct device *dev) +{ + struct netsec_priv *priv = dev_get_drvdata(dev); + + clk_prepare_enable(priv->clk); + + netsec_write(priv, NETSEC_REG_CLK_EN, NETSEC_CLK_EN_REG_DOM_D | + NETSEC_CLK_EN_REG_DOM_C | + NETSEC_CLK_EN_REG_DOM_G); + return 0; +} +#endif + +static const struct dev_pm_ops netsec_pm_ops = { + SET_RUNTIME_PM_OPS(netsec_runtime_suspend, netsec_runtime_resume, NULL) +}; + +static const struct of_device_id netsec_dt_ids[] = { + { .compatible = "socionext,synquacer-netsec" }, + { } +}; +MODULE_DEVICE_TABLE(of, netsec_dt_ids); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id netsec_acpi_ids[] = { + { "SCX0001" }, + { } +}; +MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids); +#endif + +static struct platform_driver netsec_driver = { + .probe = netsec_probe, + .remove = netsec_remove, + .driver = { + .name = "netsec", + .pm = &netsec_pm_ops, + .of_match_table = netsec_dt_ids, + .acpi_match_table = ACPI_PTR(netsec_acpi_ids), + }, +}; +module_platform_driver(netsec_driver); + +MODULE_AUTHOR("Jassi Brar "); +MODULE_AUTHOR("Ard Biesheuvel "); +MODULE_DESCRIPTION("NETSEC Ethernet driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c new file mode 100644 index 000000000..d2c6a5dfd --- /dev/null +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -0,0 +1,1995 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sni_ave.c - Socionext UniPhier AVE ethernet driver + * Copyright 2014 Panasonic Corporation + * Copyright 2015-2017 Socionext Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* General Register Group */ +#define AVE_IDR 0x000 /* ID */ +#define AVE_VR 0x004 /* Version */ +#define AVE_GRR 0x008 /* Global Reset */ +#define AVE_CFGR 0x00c /* Configuration */ + +/* Interrupt Register Group */ +#define AVE_GIMR 0x100 /* Global Interrupt Mask */ +#define AVE_GISR 0x104 /* Global Interrupt Status */ + +/* MAC Register Group */ +#define AVE_TXCR 0x200 /* TX Setup */ +#define AVE_RXCR 0x204 /* RX Setup */ +#define AVE_RXMAC1R 0x208 /* MAC address (lower) */ +#define AVE_RXMAC2R 0x20c /* MAC address (upper) */ +#define AVE_MDIOCTR 0x214 /* MDIO Control */ +#define AVE_MDIOAR 0x218 /* MDIO Address */ +#define AVE_MDIOWDR 0x21c /* MDIO Data */ +#define AVE_MDIOSR 0x220 /* MDIO Status */ +#define AVE_MDIORDR 0x224 /* MDIO Rd Data */ + +/* Descriptor Control Register Group */ +#define AVE_DESCC 0x300 /* Descriptor Control */ +#define AVE_TXDC 0x304 /* TX Descriptor Configuration */ +#define AVE_RXDC0 0x308 /* RX Descriptor Ring0 Configuration */ +#define AVE_IIRQC 0x34c /* Interval IRQ Control */ + +/* Packet Filter Register Group */ +#define AVE_PKTF_BASE 0x800 /* PF Base Address */ +#define AVE_PFMBYTE_BASE 0xd00 /* PF Mask Byte Base Address */ +#define AVE_PFMBIT_BASE 0xe00 /* PF Mask Bit Base Address */ +#define AVE_PFSEL_BASE 0xf00 /* PF Selector Base Address */ +#define AVE_PFEN 0xffc /* Packet Filter Enable */ +#define AVE_PKTF(ent) (AVE_PKTF_BASE + (ent) * 0x40) +#define AVE_PFMBYTE(ent) (AVE_PFMBYTE_BASE + (ent) * 8) +#define AVE_PFMBIT(ent) (AVE_PFMBIT_BASE + (ent) * 4) +#define AVE_PFSEL(ent) (AVE_PFSEL_BASE + (ent) * 4) + +/* 64bit descriptor memory */ +#define AVE_DESC_SIZE_64 12 /* Descriptor Size */ + +#define AVE_TXDM_64 0x1000 /* Tx Descriptor Memory */ +#define AVE_RXDM_64 0x1c00 /* Rx Descriptor Memory */ + +#define AVE_TXDM_SIZE_64 0x0ba0 /* Tx Descriptor Memory Size 3KB */ +#define AVE_RXDM_SIZE_64 0x6000 /* Rx Descriptor Memory Size 24KB */ + +/* 32bit descriptor memory */ +#define AVE_DESC_SIZE_32 8 /* Descriptor Size */ + +#define AVE_TXDM_32 0x1000 /* Tx Descriptor Memory */ +#define AVE_RXDM_32 0x1800 /* Rx Descriptor Memory */ + +#define AVE_TXDM_SIZE_32 0x07c0 /* Tx Descriptor Memory Size 2KB */ +#define AVE_RXDM_SIZE_32 0x4000 /* Rx Descriptor Memory Size 16KB */ + +/* RMII Bridge Register Group */ +#define AVE_RSTCTRL 0x8028 /* Reset control */ +#define AVE_RSTCTRL_RMIIRST BIT(16) +#define AVE_LINKSEL 0x8034 /* Link speed setting */ +#define AVE_LINKSEL_100M BIT(0) + +/* AVE_GRR */ +#define AVE_GRR_RXFFR BIT(5) /* Reset RxFIFO */ +#define AVE_GRR_PHYRST BIT(4) /* Reset external PHY */ +#define AVE_GRR_GRST BIT(0) /* Reset all MAC */ + +/* AVE_CFGR */ +#define AVE_CFGR_FLE BIT(31) /* Filter Function */ +#define AVE_CFGR_CHE BIT(30) /* Checksum Function */ +#define AVE_CFGR_MII BIT(27) /* Func mode (1:MII/RMII, 0:RGMII) */ +#define AVE_CFGR_IPFCEN BIT(24) /* IP fragment sum Enable */ + +/* AVE_GISR (common with GIMR) */ +#define AVE_GI_PHY BIT(24) /* PHY interrupt */ +#define AVE_GI_TX BIT(16) /* Tx complete */ +#define AVE_GI_RXERR BIT(8) /* Receive frame more than max size */ +#define AVE_GI_RXOVF BIT(7) /* Overflow at the RxFIFO */ +#define AVE_GI_RXDROP BIT(6) /* Drop packet */ +#define AVE_GI_RXIINT BIT(5) /* Interval interrupt */ + +/* AVE_TXCR */ +#define AVE_TXCR_FLOCTR BIT(18) /* Flow control */ +#define AVE_TXCR_TXSPD_1G BIT(17) +#define AVE_TXCR_TXSPD_100 BIT(16) + +/* AVE_RXCR */ +#define AVE_RXCR_RXEN BIT(30) /* Rx enable */ +#define AVE_RXCR_FDUPEN BIT(22) /* Interface mode */ +#define AVE_RXCR_FLOCTR BIT(21) /* Flow control */ +#define AVE_RXCR_AFEN BIT(19) /* MAC address filter */ +#define AVE_RXCR_DRPEN BIT(18) /* Drop pause frame */ +#define AVE_RXCR_MPSIZ_MASK GENMASK(10, 0) + +/* AVE_MDIOCTR */ +#define AVE_MDIOCTR_RREQ BIT(3) /* Read request */ +#define AVE_MDIOCTR_WREQ BIT(2) /* Write request */ + +/* AVE_MDIOSR */ +#define AVE_MDIOSR_STS BIT(0) /* access status */ + +/* AVE_DESCC */ +#define AVE_DESCC_STATUS_MASK GENMASK(31, 16) +#define AVE_DESCC_RD0 BIT(8) /* Enable Rx descriptor Ring0 */ +#define AVE_DESCC_RDSTP BIT(4) /* Pause Rx descriptor */ +#define AVE_DESCC_TD BIT(0) /* Enable Tx descriptor */ + +/* AVE_TXDC */ +#define AVE_TXDC_SIZE GENMASK(27, 16) /* Size of Tx descriptor */ +#define AVE_TXDC_ADDR GENMASK(11, 0) /* Start address */ +#define AVE_TXDC_ADDR_START 0 + +/* AVE_RXDC0 */ +#define AVE_RXDC0_SIZE GENMASK(30, 16) /* Size of Rx descriptor */ +#define AVE_RXDC0_ADDR GENMASK(14, 0) /* Start address */ +#define AVE_RXDC0_ADDR_START 0 + +/* AVE_IIRQC */ +#define AVE_IIRQC_EN0 BIT(27) /* Enable interval interrupt Ring0 */ +#define AVE_IIRQC_BSCK GENMASK(15, 0) /* Interval count unit */ + +/* Command status for descriptor */ +#define AVE_STS_OWN BIT(31) /* Descriptor ownership */ +#define AVE_STS_INTR BIT(29) /* Request for interrupt */ +#define AVE_STS_OK BIT(27) /* Normal transmit */ +/* TX */ +#define AVE_STS_NOCSUM BIT(28) /* No use HW checksum */ +#define AVE_STS_1ST BIT(26) /* Head of buffer chain */ +#define AVE_STS_LAST BIT(25) /* Tail of buffer chain */ +#define AVE_STS_OWC BIT(21) /* Out of window,Late Collision */ +#define AVE_STS_EC BIT(20) /* Excess collision occurred */ +#define AVE_STS_PKTLEN_TX_MASK GENMASK(15, 0) +/* RX */ +#define AVE_STS_CSSV BIT(21) /* Checksum check performed */ +#define AVE_STS_CSER BIT(20) /* Checksum error detected */ +#define AVE_STS_PKTLEN_RX_MASK GENMASK(10, 0) + +/* Packet filter */ +#define AVE_PFMBYTE_MASK0 (GENMASK(31, 8) | GENMASK(5, 0)) +#define AVE_PFMBYTE_MASK1 GENMASK(25, 0) +#define AVE_PFMBIT_MASK GENMASK(15, 0) + +#define AVE_PF_SIZE 17 /* Number of all packet filter */ +#define AVE_PF_MULTICAST_SIZE 7 /* Number of multicast filter */ + +#define AVE_PFNUM_FILTER 0 /* No.0 */ +#define AVE_PFNUM_UNICAST 1 /* No.1 */ +#define AVE_PFNUM_BROADCAST 2 /* No.2 */ +#define AVE_PFNUM_MULTICAST 11 /* No.11-17 */ + +/* NETIF Message control */ +#define AVE_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_IFDOWN | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) + +/* Parameter for descriptor */ +#define AVE_NR_TXDESC 64 /* Tx descriptor */ +#define AVE_NR_RXDESC 256 /* Rx descriptor */ + +#define AVE_DESC_OFS_CMDSTS 0 +#define AVE_DESC_OFS_ADDRL 4 +#define AVE_DESC_OFS_ADDRU 8 + +/* Parameter for ethernet frame */ +#define AVE_MAX_ETHFRAME 1518 +#define AVE_FRAME_HEADROOM 2 + +/* Parameter for interrupt */ +#define AVE_INTM_COUNT 20 +#define AVE_FORCE_TXINTCNT 1 + +/* SG */ +#define SG_ETPINMODE 0x540 +#define SG_ETPINMODE_EXTPHY BIT(1) /* for LD11 */ +#define SG_ETPINMODE_RMII(ins) BIT(ins) + +#define IS_DESC_64BIT(p) ((p)->data->is_desc_64bit) + +#define AVE_MAX_CLKS 4 +#define AVE_MAX_RSTS 2 + +enum desc_id { + AVE_DESCID_RX, + AVE_DESCID_TX, +}; + +enum desc_state { + AVE_DESC_RX_PERMIT, + AVE_DESC_RX_SUSPEND, + AVE_DESC_START, + AVE_DESC_STOP, +}; + +struct ave_desc { + struct sk_buff *skbs; + dma_addr_t skbs_dma; + size_t skbs_dmalen; +}; + +struct ave_desc_info { + u32 ndesc; /* number of descriptor */ + u32 daddr; /* start address of descriptor */ + u32 proc_idx; /* index of processing packet */ + u32 done_idx; /* index of processed packet */ + struct ave_desc *desc; /* skb info related descriptor */ +}; + +struct ave_stats { + struct u64_stats_sync syncp; + u64 packets; + u64 bytes; + u64 errors; + u64 dropped; + u64 collisions; + u64 fifo_errors; +}; + +struct ave_private { + void __iomem *base; + int irq; + int phy_id; + unsigned int desc_size; + u32 msg_enable; + int nclks; + struct clk *clk[AVE_MAX_CLKS]; + int nrsts; + struct reset_control *rst[AVE_MAX_RSTS]; + phy_interface_t phy_mode; + struct phy_device *phydev; + struct mii_bus *mdio; + struct regmap *regmap; + unsigned int pinmode_mask; + unsigned int pinmode_val; + u32 wolopts; + + /* stats */ + struct ave_stats stats_rx; + struct ave_stats stats_tx; + + /* NAPI support */ + struct net_device *ndev; + struct napi_struct napi_rx; + struct napi_struct napi_tx; + + /* descriptor */ + struct ave_desc_info rx; + struct ave_desc_info tx; + + /* flow control */ + int pause_auto; + int pause_rx; + int pause_tx; + + const struct ave_soc_data *data; +}; + +struct ave_soc_data { + bool is_desc_64bit; + const char *clock_names[AVE_MAX_CLKS]; + const char *reset_names[AVE_MAX_RSTS]; + int (*get_pinmode)(struct ave_private *priv, + phy_interface_t phy_mode, u32 arg); +}; + +static u32 ave_desc_read(struct net_device *ndev, enum desc_id id, int entry, + int offset) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 addr; + + addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr) + + entry * priv->desc_size + offset; + + return readl(priv->base + addr); +} + +static u32 ave_desc_read_cmdsts(struct net_device *ndev, enum desc_id id, + int entry) +{ + return ave_desc_read(ndev, id, entry, AVE_DESC_OFS_CMDSTS); +} + +static void ave_desc_write(struct net_device *ndev, enum desc_id id, + int entry, int offset, u32 val) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 addr; + + addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr) + + entry * priv->desc_size + offset; + + writel(val, priv->base + addr); +} + +static void ave_desc_write_cmdsts(struct net_device *ndev, enum desc_id id, + int entry, u32 val) +{ + ave_desc_write(ndev, id, entry, AVE_DESC_OFS_CMDSTS, val); +} + +static void ave_desc_write_addr(struct net_device *ndev, enum desc_id id, + int entry, dma_addr_t paddr) +{ + struct ave_private *priv = netdev_priv(ndev); + + ave_desc_write(ndev, id, entry, AVE_DESC_OFS_ADDRL, + lower_32_bits(paddr)); + if (IS_DESC_64BIT(priv)) + ave_desc_write(ndev, id, + entry, AVE_DESC_OFS_ADDRU, + upper_32_bits(paddr)); +} + +static u32 ave_irq_disable_all(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 ret; + + ret = readl(priv->base + AVE_GIMR); + writel(0, priv->base + AVE_GIMR); + + return ret; +} + +static void ave_irq_restore(struct net_device *ndev, u32 val) +{ + struct ave_private *priv = netdev_priv(ndev); + + writel(val, priv->base + AVE_GIMR); +} + +static void ave_irq_enable(struct net_device *ndev, u32 bitflag) +{ + struct ave_private *priv = netdev_priv(ndev); + + writel(readl(priv->base + AVE_GIMR) | bitflag, priv->base + AVE_GIMR); + writel(bitflag, priv->base + AVE_GISR); +} + +static void ave_hw_write_macaddr(struct net_device *ndev, + const unsigned char *mac_addr, + int reg1, int reg2) +{ + struct ave_private *priv = netdev_priv(ndev); + + writel(mac_addr[0] | mac_addr[1] << 8 | + mac_addr[2] << 16 | mac_addr[3] << 24, priv->base + reg1); + writel(mac_addr[4] | mac_addr[5] << 8, priv->base + reg2); +} + +static void ave_hw_read_version(struct net_device *ndev, char *buf, int len) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 major, minor, vr; + + vr = readl(priv->base + AVE_VR); + major = (vr & GENMASK(15, 8)) >> 8; + minor = (vr & GENMASK(7, 0)); + snprintf(buf, len, "v%u.%u", major, minor); +} + +static void ave_ethtool_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + struct device *dev = ndev->dev.parent; + + strscpy(info->driver, dev->driver->name, sizeof(info->driver)); + strscpy(info->bus_info, dev_name(dev), sizeof(info->bus_info)); + ave_hw_read_version(ndev, info->fw_version, sizeof(info->fw_version)); +} + +static u32 ave_ethtool_get_msglevel(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + + return priv->msg_enable; +} + +static void ave_ethtool_set_msglevel(struct net_device *ndev, u32 val) +{ + struct ave_private *priv = netdev_priv(ndev); + + priv->msg_enable = val; +} + +static void ave_ethtool_get_wol(struct net_device *ndev, + struct ethtool_wolinfo *wol) +{ + wol->supported = 0; + wol->wolopts = 0; + + if (ndev->phydev) + phy_ethtool_get_wol(ndev->phydev, wol); +} + +static int __ave_ethtool_set_wol(struct net_device *ndev, + struct ethtool_wolinfo *wol) +{ + if (!ndev->phydev || + (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))) + return -EOPNOTSUPP; + + return phy_ethtool_set_wol(ndev->phydev, wol); +} + +static int ave_ethtool_set_wol(struct net_device *ndev, + struct ethtool_wolinfo *wol) +{ + int ret; + + ret = __ave_ethtool_set_wol(ndev, wol); + if (!ret) + device_set_wakeup_enable(&ndev->dev, !!wol->wolopts); + + return ret; +} + +static void ave_ethtool_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct ave_private *priv = netdev_priv(ndev); + + pause->autoneg = priv->pause_auto; + pause->rx_pause = priv->pause_rx; + pause->tx_pause = priv->pause_tx; +} + +static int ave_ethtool_set_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct ave_private *priv = netdev_priv(ndev); + struct phy_device *phydev = ndev->phydev; + + if (!phydev) + return -EINVAL; + + priv->pause_auto = pause->autoneg; + priv->pause_rx = pause->rx_pause; + priv->pause_tx = pause->tx_pause; + + phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause); + + return 0; +} + +static const struct ethtool_ops ave_ethtool_ops = { + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_drvinfo = ave_ethtool_get_drvinfo, + .nway_reset = phy_ethtool_nway_reset, + .get_link = ethtool_op_get_link, + .get_msglevel = ave_ethtool_get_msglevel, + .set_msglevel = ave_ethtool_set_msglevel, + .get_wol = ave_ethtool_get_wol, + .set_wol = ave_ethtool_set_wol, + .get_pauseparam = ave_ethtool_get_pauseparam, + .set_pauseparam = ave_ethtool_set_pauseparam, +}; + +static int ave_mdiobus_read(struct mii_bus *bus, int phyid, int regnum) +{ + struct net_device *ndev = bus->priv; + struct ave_private *priv; + u32 mdioctl, mdiosr; + int ret; + + priv = netdev_priv(ndev); + + /* write address */ + writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR); + + /* read request */ + mdioctl = readl(priv->base + AVE_MDIOCTR); + writel((mdioctl | AVE_MDIOCTR_RREQ) & ~AVE_MDIOCTR_WREQ, + priv->base + AVE_MDIOCTR); + + ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr, + !(mdiosr & AVE_MDIOSR_STS), 20, 2000); + if (ret) { + netdev_err(ndev, "failed to read (phy:%d reg:%x)\n", + phyid, regnum); + return ret; + } + + return readl(priv->base + AVE_MDIORDR) & GENMASK(15, 0); +} + +static int ave_mdiobus_write(struct mii_bus *bus, int phyid, int regnum, + u16 val) +{ + struct net_device *ndev = bus->priv; + struct ave_private *priv; + u32 mdioctl, mdiosr; + int ret; + + priv = netdev_priv(ndev); + + /* write address */ + writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR); + + /* write data */ + writel(val, priv->base + AVE_MDIOWDR); + + /* write request */ + mdioctl = readl(priv->base + AVE_MDIOCTR); + writel((mdioctl | AVE_MDIOCTR_WREQ) & ~AVE_MDIOCTR_RREQ, + priv->base + AVE_MDIOCTR); + + ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr, + !(mdiosr & AVE_MDIOSR_STS), 20, 2000); + if (ret) + netdev_err(ndev, "failed to write (phy:%d reg:%x)\n", + phyid, regnum); + + return ret; +} + +static int ave_dma_map(struct net_device *ndev, struct ave_desc *desc, + void *ptr, size_t len, enum dma_data_direction dir, + dma_addr_t *paddr) +{ + dma_addr_t map_addr; + + map_addr = dma_map_single(ndev->dev.parent, ptr, len, dir); + if (unlikely(dma_mapping_error(ndev->dev.parent, map_addr))) + return -ENOMEM; + + desc->skbs_dma = map_addr; + desc->skbs_dmalen = len; + *paddr = map_addr; + + return 0; +} + +static void ave_dma_unmap(struct net_device *ndev, struct ave_desc *desc, + enum dma_data_direction dir) +{ + if (!desc->skbs_dma) + return; + + dma_unmap_single(ndev->dev.parent, + desc->skbs_dma, desc->skbs_dmalen, dir); + desc->skbs_dma = 0; +} + +/* Prepare Rx descriptor and memory */ +static int ave_rxdesc_prepare(struct net_device *ndev, int entry) +{ + struct ave_private *priv = netdev_priv(ndev); + struct sk_buff *skb; + dma_addr_t paddr; + int ret; + + skb = priv->rx.desc[entry].skbs; + if (!skb) { + skb = netdev_alloc_skb(ndev, AVE_MAX_ETHFRAME); + if (!skb) { + netdev_err(ndev, "can't allocate skb for Rx\n"); + return -ENOMEM; + } + skb->data += AVE_FRAME_HEADROOM; + skb->tail += AVE_FRAME_HEADROOM; + } + + /* set disable to cmdsts */ + ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry, + AVE_STS_INTR | AVE_STS_OWN); + + /* map Rx buffer + * Rx buffer set to the Rx descriptor has two restrictions: + * - Rx buffer address is 4 byte aligned. + * - Rx buffer begins with 2 byte headroom, and data will be put from + * (buffer + 2). + * To satisfy this, specify the address to put back the buffer + * pointer advanced by AVE_FRAME_HEADROOM, and expand the map size + * by AVE_FRAME_HEADROOM. + */ + ret = ave_dma_map(ndev, &priv->rx.desc[entry], + skb->data - AVE_FRAME_HEADROOM, + AVE_MAX_ETHFRAME + AVE_FRAME_HEADROOM, + DMA_FROM_DEVICE, &paddr); + if (ret) { + netdev_err(ndev, "can't map skb for Rx\n"); + dev_kfree_skb_any(skb); + return ret; + } + priv->rx.desc[entry].skbs = skb; + + /* set buffer pointer */ + ave_desc_write_addr(ndev, AVE_DESCID_RX, entry, paddr); + + /* set enable to cmdsts */ + ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry, + AVE_STS_INTR | AVE_MAX_ETHFRAME); + + return ret; +} + +/* Switch state of descriptor */ +static int ave_desc_switch(struct net_device *ndev, enum desc_state state) +{ + struct ave_private *priv = netdev_priv(ndev); + int ret = 0; + u32 val; + + switch (state) { + case AVE_DESC_START: + writel(AVE_DESCC_TD | AVE_DESCC_RD0, priv->base + AVE_DESCC); + break; + + case AVE_DESC_STOP: + writel(0, priv->base + AVE_DESCC); + if (readl_poll_timeout(priv->base + AVE_DESCC, val, !val, + 150, 15000)) { + netdev_err(ndev, "can't stop descriptor\n"); + ret = -EBUSY; + } + break; + + case AVE_DESC_RX_SUSPEND: + val = readl(priv->base + AVE_DESCC); + val |= AVE_DESCC_RDSTP; + val &= ~AVE_DESCC_STATUS_MASK; + writel(val, priv->base + AVE_DESCC); + if (readl_poll_timeout(priv->base + AVE_DESCC, val, + val & (AVE_DESCC_RDSTP << 16), + 150, 150000)) { + netdev_err(ndev, "can't suspend descriptor\n"); + ret = -EBUSY; + } + break; + + case AVE_DESC_RX_PERMIT: + val = readl(priv->base + AVE_DESCC); + val &= ~AVE_DESCC_RDSTP; + val &= ~AVE_DESCC_STATUS_MASK; + writel(val, priv->base + AVE_DESCC); + break; + + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int ave_tx_complete(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 proc_idx, done_idx, ndesc, cmdsts; + unsigned int nr_freebuf = 0; + unsigned int tx_packets = 0; + unsigned int tx_bytes = 0; + + proc_idx = priv->tx.proc_idx; + done_idx = priv->tx.done_idx; + ndesc = priv->tx.ndesc; + + /* free pre-stored skb from done_idx to proc_idx */ + while (proc_idx != done_idx) { + cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_TX, done_idx); + + /* do nothing if owner is HW (==1 for Tx) */ + if (cmdsts & AVE_STS_OWN) + break; + + /* check Tx status and updates statistics */ + if (cmdsts & AVE_STS_OK) { + tx_bytes += cmdsts & AVE_STS_PKTLEN_TX_MASK; + /* success */ + if (cmdsts & AVE_STS_LAST) + tx_packets++; + } else { + /* error */ + if (cmdsts & AVE_STS_LAST) { + priv->stats_tx.errors++; + if (cmdsts & (AVE_STS_OWC | AVE_STS_EC)) + priv->stats_tx.collisions++; + } + } + + /* release skb */ + if (priv->tx.desc[done_idx].skbs) { + ave_dma_unmap(ndev, &priv->tx.desc[done_idx], + DMA_TO_DEVICE); + dev_consume_skb_any(priv->tx.desc[done_idx].skbs); + priv->tx.desc[done_idx].skbs = NULL; + nr_freebuf++; + } + done_idx = (done_idx + 1) % ndesc; + } + + priv->tx.done_idx = done_idx; + + /* update stats */ + u64_stats_update_begin(&priv->stats_tx.syncp); + priv->stats_tx.packets += tx_packets; + priv->stats_tx.bytes += tx_bytes; + u64_stats_update_end(&priv->stats_tx.syncp); + + /* wake queue for freeing buffer */ + if (unlikely(netif_queue_stopped(ndev)) && nr_freebuf) + netif_wake_queue(ndev); + + return nr_freebuf; +} + +static int ave_rx_receive(struct net_device *ndev, int num) +{ + struct ave_private *priv = netdev_priv(ndev); + unsigned int rx_packets = 0; + unsigned int rx_bytes = 0; + u32 proc_idx, done_idx; + struct sk_buff *skb; + unsigned int pktlen; + int restpkt, npkts; + u32 ndesc, cmdsts; + + proc_idx = priv->rx.proc_idx; + done_idx = priv->rx.done_idx; + ndesc = priv->rx.ndesc; + restpkt = ((proc_idx + ndesc - 1) - done_idx) % ndesc; + + for (npkts = 0; npkts < num; npkts++) { + /* we can't receive more packet, so fill desc quickly */ + if (--restpkt < 0) + break; + + cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_RX, proc_idx); + + /* do nothing if owner is HW (==0 for Rx) */ + if (!(cmdsts & AVE_STS_OWN)) + break; + + if (!(cmdsts & AVE_STS_OK)) { + priv->stats_rx.errors++; + proc_idx = (proc_idx + 1) % ndesc; + continue; + } + + pktlen = cmdsts & AVE_STS_PKTLEN_RX_MASK; + + /* get skbuff for rx */ + skb = priv->rx.desc[proc_idx].skbs; + priv->rx.desc[proc_idx].skbs = NULL; + + ave_dma_unmap(ndev, &priv->rx.desc[proc_idx], DMA_FROM_DEVICE); + + skb->dev = ndev; + skb_put(skb, pktlen); + skb->protocol = eth_type_trans(skb, ndev); + + if ((cmdsts & AVE_STS_CSSV) && (!(cmdsts & AVE_STS_CSER))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + rx_packets++; + rx_bytes += pktlen; + + netif_receive_skb(skb); + + proc_idx = (proc_idx + 1) % ndesc; + } + + priv->rx.proc_idx = proc_idx; + + /* update stats */ + u64_stats_update_begin(&priv->stats_rx.syncp); + priv->stats_rx.packets += rx_packets; + priv->stats_rx.bytes += rx_bytes; + u64_stats_update_end(&priv->stats_rx.syncp); + + /* refill the Rx buffers */ + while (proc_idx != done_idx) { + if (ave_rxdesc_prepare(ndev, done_idx)) + break; + done_idx = (done_idx + 1) % ndesc; + } + + priv->rx.done_idx = done_idx; + + return npkts; +} + +static int ave_napi_poll_rx(struct napi_struct *napi, int budget) +{ + struct ave_private *priv; + struct net_device *ndev; + int num; + + priv = container_of(napi, struct ave_private, napi_rx); + ndev = priv->ndev; + + num = ave_rx_receive(ndev, budget); + if (num < budget) { + napi_complete_done(napi, num); + + /* enable Rx interrupt when NAPI finishes */ + ave_irq_enable(ndev, AVE_GI_RXIINT); + } + + return num; +} + +static int ave_napi_poll_tx(struct napi_struct *napi, int budget) +{ + struct ave_private *priv; + struct net_device *ndev; + int num; + + priv = container_of(napi, struct ave_private, napi_tx); + ndev = priv->ndev; + + num = ave_tx_complete(ndev); + napi_complete(napi); + + /* enable Tx interrupt when NAPI finishes */ + ave_irq_enable(ndev, AVE_GI_TX); + + return num; +} + +static void ave_global_reset(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 val; + + /* set config register */ + val = AVE_CFGR_FLE | AVE_CFGR_IPFCEN | AVE_CFGR_CHE; + if (!phy_interface_mode_is_rgmii(priv->phy_mode)) + val |= AVE_CFGR_MII; + writel(val, priv->base + AVE_CFGR); + + /* reset RMII register */ + val = readl(priv->base + AVE_RSTCTRL); + val &= ~AVE_RSTCTRL_RMIIRST; + writel(val, priv->base + AVE_RSTCTRL); + + /* assert reset */ + writel(AVE_GRR_GRST | AVE_GRR_PHYRST, priv->base + AVE_GRR); + msleep(20); + + /* 1st, negate PHY reset only */ + writel(AVE_GRR_GRST, priv->base + AVE_GRR); + msleep(40); + + /* negate reset */ + writel(0, priv->base + AVE_GRR); + msleep(40); + + /* negate RMII register */ + val = readl(priv->base + AVE_RSTCTRL); + val |= AVE_RSTCTRL_RMIIRST; + writel(val, priv->base + AVE_RSTCTRL); + + ave_irq_disable_all(ndev); +} + +static void ave_rxfifo_reset(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 rxcr_org; + + /* save and disable MAC receive op */ + rxcr_org = readl(priv->base + AVE_RXCR); + writel(rxcr_org & (~AVE_RXCR_RXEN), priv->base + AVE_RXCR); + + /* suspend Rx descriptor */ + ave_desc_switch(ndev, AVE_DESC_RX_SUSPEND); + + /* receive all packets before descriptor starts */ + ave_rx_receive(ndev, priv->rx.ndesc); + + /* assert reset */ + writel(AVE_GRR_RXFFR, priv->base + AVE_GRR); + udelay(50); + + /* negate reset */ + writel(0, priv->base + AVE_GRR); + udelay(20); + + /* negate interrupt status */ + writel(AVE_GI_RXOVF, priv->base + AVE_GISR); + + /* permit descriptor */ + ave_desc_switch(ndev, AVE_DESC_RX_PERMIT); + + /* restore MAC reccieve op */ + writel(rxcr_org, priv->base + AVE_RXCR); +} + +static irqreturn_t ave_irq_handler(int irq, void *netdev) +{ + struct net_device *ndev = (struct net_device *)netdev; + struct ave_private *priv = netdev_priv(ndev); + u32 gimr_val, gisr_val; + + gimr_val = ave_irq_disable_all(ndev); + + /* get interrupt status */ + gisr_val = readl(priv->base + AVE_GISR); + + /* PHY */ + if (gisr_val & AVE_GI_PHY) + writel(AVE_GI_PHY, priv->base + AVE_GISR); + + /* check exceeding packet */ + if (gisr_val & AVE_GI_RXERR) { + writel(AVE_GI_RXERR, priv->base + AVE_GISR); + netdev_err(ndev, "receive a packet exceeding frame buffer\n"); + } + + gisr_val &= gimr_val; + if (!gisr_val) + goto exit_isr; + + /* RxFIFO overflow */ + if (gisr_val & AVE_GI_RXOVF) { + priv->stats_rx.fifo_errors++; + ave_rxfifo_reset(ndev); + goto exit_isr; + } + + /* Rx drop */ + if (gisr_val & AVE_GI_RXDROP) { + priv->stats_rx.dropped++; + writel(AVE_GI_RXDROP, priv->base + AVE_GISR); + } + + /* Rx interval */ + if (gisr_val & AVE_GI_RXIINT) { + napi_schedule(&priv->napi_rx); + /* still force to disable Rx interrupt until NAPI finishes */ + gimr_val &= ~AVE_GI_RXIINT; + } + + /* Tx completed */ + if (gisr_val & AVE_GI_TX) { + napi_schedule(&priv->napi_tx); + /* still force to disable Tx interrupt until NAPI finishes */ + gimr_val &= ~AVE_GI_TX; + } + +exit_isr: + ave_irq_restore(ndev, gimr_val); + + return IRQ_HANDLED; +} + +static int ave_pfsel_start(struct net_device *ndev, unsigned int entry) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 val; + + if (WARN_ON(entry > AVE_PF_SIZE)) + return -EINVAL; + + val = readl(priv->base + AVE_PFEN); + writel(val | BIT(entry), priv->base + AVE_PFEN); + + return 0; +} + +static int ave_pfsel_stop(struct net_device *ndev, unsigned int entry) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 val; + + if (WARN_ON(entry > AVE_PF_SIZE)) + return -EINVAL; + + val = readl(priv->base + AVE_PFEN); + writel(val & ~BIT(entry), priv->base + AVE_PFEN); + + return 0; +} + +static int ave_pfsel_set_macaddr(struct net_device *ndev, + unsigned int entry, + const unsigned char *mac_addr, + unsigned int set_size) +{ + struct ave_private *priv = netdev_priv(ndev); + + if (WARN_ON(entry > AVE_PF_SIZE)) + return -EINVAL; + if (WARN_ON(set_size > 6)) + return -EINVAL; + + ave_pfsel_stop(ndev, entry); + + /* set MAC address for the filter */ + ave_hw_write_macaddr(ndev, mac_addr, + AVE_PKTF(entry), AVE_PKTF(entry) + 4); + + /* set byte mask */ + writel(GENMASK(31, set_size) & AVE_PFMBYTE_MASK0, + priv->base + AVE_PFMBYTE(entry)); + writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4); + + /* set bit mask filter */ + writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry)); + + /* set selector to ring 0 */ + writel(0, priv->base + AVE_PFSEL(entry)); + + /* restart filter */ + ave_pfsel_start(ndev, entry); + + return 0; +} + +static void ave_pfsel_set_promisc(struct net_device *ndev, + unsigned int entry, u32 rxring) +{ + struct ave_private *priv = netdev_priv(ndev); + + if (WARN_ON(entry > AVE_PF_SIZE)) + return; + + ave_pfsel_stop(ndev, entry); + + /* set byte mask */ + writel(AVE_PFMBYTE_MASK0, priv->base + AVE_PFMBYTE(entry)); + writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4); + + /* set bit mask filter */ + writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry)); + + /* set selector to rxring */ + writel(rxring, priv->base + AVE_PFSEL(entry)); + + ave_pfsel_start(ndev, entry); +} + +static void ave_pfsel_init(struct net_device *ndev) +{ + unsigned char bcast_mac[ETH_ALEN]; + int i; + + eth_broadcast_addr(bcast_mac); + + for (i = 0; i < AVE_PF_SIZE; i++) + ave_pfsel_stop(ndev, i); + + /* promiscious entry, select ring 0 */ + ave_pfsel_set_promisc(ndev, AVE_PFNUM_FILTER, 0); + + /* unicast entry */ + ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6); + + /* broadcast entry */ + ave_pfsel_set_macaddr(ndev, AVE_PFNUM_BROADCAST, bcast_mac, 6); +} + +static void ave_phy_adjust_link(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + struct phy_device *phydev = ndev->phydev; + u32 val, txcr, rxcr, rxcr_org; + u16 rmt_adv = 0, lcl_adv = 0; + u8 cap; + + /* set RGMII speed */ + val = readl(priv->base + AVE_TXCR); + val &= ~(AVE_TXCR_TXSPD_100 | AVE_TXCR_TXSPD_1G); + + if (phy_interface_is_rgmii(phydev) && phydev->speed == SPEED_1000) + val |= AVE_TXCR_TXSPD_1G; + else if (phydev->speed == SPEED_100) + val |= AVE_TXCR_TXSPD_100; + + writel(val, priv->base + AVE_TXCR); + + /* set RMII speed (100M/10M only) */ + if (!phy_interface_is_rgmii(phydev)) { + val = readl(priv->base + AVE_LINKSEL); + if (phydev->speed == SPEED_10) + val &= ~AVE_LINKSEL_100M; + else + val |= AVE_LINKSEL_100M; + writel(val, priv->base + AVE_LINKSEL); + } + + /* check current RXCR/TXCR */ + rxcr = readl(priv->base + AVE_RXCR); + txcr = readl(priv->base + AVE_TXCR); + rxcr_org = rxcr; + + if (phydev->duplex) { + rxcr |= AVE_RXCR_FDUPEN; + + if (phydev->pause) + rmt_adv |= LPA_PAUSE_CAP; + if (phydev->asym_pause) + rmt_adv |= LPA_PAUSE_ASYM; + + lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising); + cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); + if (cap & FLOW_CTRL_TX) + txcr |= AVE_TXCR_FLOCTR; + else + txcr &= ~AVE_TXCR_FLOCTR; + if (cap & FLOW_CTRL_RX) + rxcr |= AVE_RXCR_FLOCTR; + else + rxcr &= ~AVE_RXCR_FLOCTR; + } else { + rxcr &= ~AVE_RXCR_FDUPEN; + rxcr &= ~AVE_RXCR_FLOCTR; + txcr &= ~AVE_TXCR_FLOCTR; + } + + if (rxcr_org != rxcr) { + /* disable Rx mac */ + writel(rxcr & ~AVE_RXCR_RXEN, priv->base + AVE_RXCR); + /* change and enable TX/Rx mac */ + writel(txcr, priv->base + AVE_TXCR); + writel(rxcr, priv->base + AVE_RXCR); + } + + phy_print_status(phydev); +} + +static void ave_macaddr_init(struct net_device *ndev) +{ + ave_hw_write_macaddr(ndev, ndev->dev_addr, AVE_RXMAC1R, AVE_RXMAC2R); + + /* pfsel unicast entry */ + ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6); +} + +static int ave_init(struct net_device *ndev) +{ + struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; + struct ave_private *priv = netdev_priv(ndev); + struct device *dev = ndev->dev.parent; + struct device_node *np = dev->of_node; + struct device_node *mdio_np; + struct phy_device *phydev; + int nc, nr, ret; + + /* enable clk because of hw access until ndo_open */ + for (nc = 0; nc < priv->nclks; nc++) { + ret = clk_prepare_enable(priv->clk[nc]); + if (ret) { + dev_err(dev, "can't enable clock\n"); + goto out_clk_disable; + } + } + + for (nr = 0; nr < priv->nrsts; nr++) { + ret = reset_control_deassert(priv->rst[nr]); + if (ret) { + dev_err(dev, "can't deassert reset\n"); + goto out_reset_assert; + } + } + + ret = regmap_update_bits(priv->regmap, SG_ETPINMODE, + priv->pinmode_mask, priv->pinmode_val); + if (ret) + goto out_reset_assert; + + ave_global_reset(ndev); + + mdio_np = of_get_child_by_name(np, "mdio"); + if (!mdio_np) { + dev_err(dev, "mdio node not found\n"); + ret = -EINVAL; + goto out_reset_assert; + } + ret = of_mdiobus_register(priv->mdio, mdio_np); + of_node_put(mdio_np); + if (ret) { + dev_err(dev, "failed to register mdiobus\n"); + goto out_reset_assert; + } + + phydev = of_phy_get_and_connect(ndev, np, ave_phy_adjust_link); + if (!phydev) { + dev_err(dev, "could not attach to PHY\n"); + ret = -ENODEV; + goto out_mdio_unregister; + } + + priv->phydev = phydev; + + ave_ethtool_get_wol(ndev, &wol); + device_set_wakeup_capable(&ndev->dev, !!wol.supported); + + /* set wol initial state disabled */ + wol.wolopts = 0; + __ave_ethtool_set_wol(ndev, &wol); + + if (!phy_interface_is_rgmii(phydev)) + phy_set_max_speed(phydev, SPEED_100); + + phy_support_asym_pause(phydev); + + phydev->mac_managed_pm = true; + + phy_attached_info(phydev); + + return 0; + +out_mdio_unregister: + mdiobus_unregister(priv->mdio); +out_reset_assert: + while (--nr >= 0) + reset_control_assert(priv->rst[nr]); +out_clk_disable: + while (--nc >= 0) + clk_disable_unprepare(priv->clk[nc]); + + return ret; +} + +static void ave_uninit(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + int i; + + phy_disconnect(priv->phydev); + mdiobus_unregister(priv->mdio); + + /* disable clk because of hw access after ndo_stop */ + for (i = 0; i < priv->nrsts; i++) + reset_control_assert(priv->rst[i]); + for (i = 0; i < priv->nclks; i++) + clk_disable_unprepare(priv->clk[i]); +} + +static int ave_open(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + int entry; + int ret; + u32 val; + + ret = request_irq(priv->irq, ave_irq_handler, IRQF_SHARED, ndev->name, + ndev); + if (ret) + return ret; + + priv->tx.desc = kcalloc(priv->tx.ndesc, sizeof(*priv->tx.desc), + GFP_KERNEL); + if (!priv->tx.desc) { + ret = -ENOMEM; + goto out_free_irq; + } + + priv->rx.desc = kcalloc(priv->rx.ndesc, sizeof(*priv->rx.desc), + GFP_KERNEL); + if (!priv->rx.desc) { + kfree(priv->tx.desc); + ret = -ENOMEM; + goto out_free_irq; + } + + /* initialize Tx work and descriptor */ + priv->tx.proc_idx = 0; + priv->tx.done_idx = 0; + for (entry = 0; entry < priv->tx.ndesc; entry++) { + ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, entry, 0); + ave_desc_write_addr(ndev, AVE_DESCID_TX, entry, 0); + } + writel(AVE_TXDC_ADDR_START | + (((priv->tx.ndesc * priv->desc_size) << 16) & AVE_TXDC_SIZE), + priv->base + AVE_TXDC); + + /* initialize Rx work and descriptor */ + priv->rx.proc_idx = 0; + priv->rx.done_idx = 0; + for (entry = 0; entry < priv->rx.ndesc; entry++) { + if (ave_rxdesc_prepare(ndev, entry)) + break; + } + writel(AVE_RXDC0_ADDR_START | + (((priv->rx.ndesc * priv->desc_size) << 16) & AVE_RXDC0_SIZE), + priv->base + AVE_RXDC0); + + ave_desc_switch(ndev, AVE_DESC_START); + + ave_pfsel_init(ndev); + ave_macaddr_init(ndev); + + /* set Rx configuration */ + /* full duplex, enable pause drop, enalbe flow control */ + val = AVE_RXCR_RXEN | AVE_RXCR_FDUPEN | AVE_RXCR_DRPEN | + AVE_RXCR_FLOCTR | (AVE_MAX_ETHFRAME & AVE_RXCR_MPSIZ_MASK); + writel(val, priv->base + AVE_RXCR); + + /* set Tx configuration */ + /* enable flow control, disable loopback */ + writel(AVE_TXCR_FLOCTR, priv->base + AVE_TXCR); + + /* enable timer, clear EN,INTM, and mask interval unit(BSCK) */ + val = readl(priv->base + AVE_IIRQC) & AVE_IIRQC_BSCK; + val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16); + writel(val, priv->base + AVE_IIRQC); + + val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX | AVE_GI_RXDROP; + ave_irq_restore(ndev, val); + + napi_enable(&priv->napi_rx); + napi_enable(&priv->napi_tx); + + phy_start(ndev->phydev); + phy_start_aneg(ndev->phydev); + netif_start_queue(ndev); + + return 0; + +out_free_irq: + disable_irq(priv->irq); + free_irq(priv->irq, ndev); + + return ret; +} + +static int ave_stop(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + int entry; + + ave_irq_disable_all(ndev); + disable_irq(priv->irq); + free_irq(priv->irq, ndev); + + netif_tx_disable(ndev); + phy_stop(ndev->phydev); + napi_disable(&priv->napi_tx); + napi_disable(&priv->napi_rx); + + ave_desc_switch(ndev, AVE_DESC_STOP); + + /* free Tx buffer */ + for (entry = 0; entry < priv->tx.ndesc; entry++) { + if (!priv->tx.desc[entry].skbs) + continue; + + ave_dma_unmap(ndev, &priv->tx.desc[entry], DMA_TO_DEVICE); + dev_kfree_skb_any(priv->tx.desc[entry].skbs); + priv->tx.desc[entry].skbs = NULL; + } + priv->tx.proc_idx = 0; + priv->tx.done_idx = 0; + + /* free Rx buffer */ + for (entry = 0; entry < priv->rx.ndesc; entry++) { + if (!priv->rx.desc[entry].skbs) + continue; + + ave_dma_unmap(ndev, &priv->rx.desc[entry], DMA_FROM_DEVICE); + dev_kfree_skb_any(priv->rx.desc[entry].skbs); + priv->rx.desc[entry].skbs = NULL; + } + priv->rx.proc_idx = 0; + priv->rx.done_idx = 0; + + kfree(priv->tx.desc); + kfree(priv->rx.desc); + + return 0; +} + +static netdev_tx_t ave_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 proc_idx, done_idx, ndesc, cmdsts; + int ret, freepkt; + dma_addr_t paddr; + + proc_idx = priv->tx.proc_idx; + done_idx = priv->tx.done_idx; + ndesc = priv->tx.ndesc; + freepkt = ((done_idx + ndesc - 1) - proc_idx) % ndesc; + + /* stop queue when not enough entry */ + if (unlikely(freepkt < 1)) { + netif_stop_queue(ndev); + return NETDEV_TX_BUSY; + } + + /* add padding for short packet */ + if (skb_put_padto(skb, ETH_ZLEN)) { + priv->stats_tx.dropped++; + return NETDEV_TX_OK; + } + + /* map Tx buffer + * Tx buffer set to the Tx descriptor doesn't have any restriction. + */ + ret = ave_dma_map(ndev, &priv->tx.desc[proc_idx], + skb->data, skb->len, DMA_TO_DEVICE, &paddr); + if (ret) { + dev_kfree_skb_any(skb); + priv->stats_tx.dropped++; + return NETDEV_TX_OK; + } + + priv->tx.desc[proc_idx].skbs = skb; + + ave_desc_write_addr(ndev, AVE_DESCID_TX, proc_idx, paddr); + + cmdsts = AVE_STS_OWN | AVE_STS_1ST | AVE_STS_LAST | + (skb->len & AVE_STS_PKTLEN_TX_MASK); + + /* set interrupt per AVE_FORCE_TXINTCNT or when queue is stopped */ + if (!(proc_idx % AVE_FORCE_TXINTCNT) || netif_queue_stopped(ndev)) + cmdsts |= AVE_STS_INTR; + + /* disable checksum calculation when skb doesn't calurate checksum */ + if (skb->ip_summed == CHECKSUM_NONE || + skb->ip_summed == CHECKSUM_UNNECESSARY) + cmdsts |= AVE_STS_NOCSUM; + + ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, proc_idx, cmdsts); + + priv->tx.proc_idx = (proc_idx + 1) % ndesc; + + return NETDEV_TX_OK; +} + +static int ave_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) +{ + return phy_mii_ioctl(ndev->phydev, ifr, cmd); +} + +static const u8 v4multi_macadr[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; +static const u8 v6multi_macadr[] = { 0x33, 0x00, 0x00, 0x00, 0x00, 0x00 }; + +static void ave_set_rx_mode(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + struct netdev_hw_addr *hw_adr; + int count, mc_cnt; + u32 val; + + /* MAC addr filter enable for promiscious mode */ + mc_cnt = netdev_mc_count(ndev); + val = readl(priv->base + AVE_RXCR); + if (ndev->flags & IFF_PROMISC || !mc_cnt) + val &= ~AVE_RXCR_AFEN; + else + val |= AVE_RXCR_AFEN; + writel(val, priv->base + AVE_RXCR); + + /* set all multicast address */ + if ((ndev->flags & IFF_ALLMULTI) || mc_cnt > AVE_PF_MULTICAST_SIZE) { + ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST, + v4multi_macadr, 1); + ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + 1, + v6multi_macadr, 1); + } else { + /* stop all multicast filter */ + for (count = 0; count < AVE_PF_MULTICAST_SIZE; count++) + ave_pfsel_stop(ndev, AVE_PFNUM_MULTICAST + count); + + /* set multicast addresses */ + count = 0; + netdev_for_each_mc_addr(hw_adr, ndev) { + if (count == mc_cnt) + break; + ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + count, + hw_adr->addr, 6); + count++; + } + } +} + +static void ave_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *stats) +{ + struct ave_private *priv = netdev_priv(ndev); + unsigned int start; + + do { + start = u64_stats_fetch_begin_irq(&priv->stats_rx.syncp); + stats->rx_packets = priv->stats_rx.packets; + stats->rx_bytes = priv->stats_rx.bytes; + } while (u64_stats_fetch_retry_irq(&priv->stats_rx.syncp, start)); + + do { + start = u64_stats_fetch_begin_irq(&priv->stats_tx.syncp); + stats->tx_packets = priv->stats_tx.packets; + stats->tx_bytes = priv->stats_tx.bytes; + } while (u64_stats_fetch_retry_irq(&priv->stats_tx.syncp, start)); + + stats->rx_errors = priv->stats_rx.errors; + stats->tx_errors = priv->stats_tx.errors; + stats->rx_dropped = priv->stats_rx.dropped; + stats->tx_dropped = priv->stats_tx.dropped; + stats->rx_fifo_errors = priv->stats_rx.fifo_errors; + stats->collisions = priv->stats_tx.collisions; +} + +static int ave_set_mac_address(struct net_device *ndev, void *p) +{ + int ret = eth_mac_addr(ndev, p); + + if (ret) + return ret; + + ave_macaddr_init(ndev); + + return 0; +} + +static const struct net_device_ops ave_netdev_ops = { + .ndo_init = ave_init, + .ndo_uninit = ave_uninit, + .ndo_open = ave_open, + .ndo_stop = ave_stop, + .ndo_start_xmit = ave_start_xmit, + .ndo_eth_ioctl = ave_ioctl, + .ndo_set_rx_mode = ave_set_rx_mode, + .ndo_get_stats64 = ave_get_stats64, + .ndo_set_mac_address = ave_set_mac_address, +}; + +static int ave_probe(struct platform_device *pdev) +{ + const struct ave_soc_data *data; + struct device *dev = &pdev->dev; + char buf[ETHTOOL_FWVERS_LEN]; + struct of_phandle_args args; + phy_interface_t phy_mode; + struct ave_private *priv; + struct net_device *ndev; + struct device_node *np; + void __iomem *base; + const char *name; + int i, irq, ret; + u64 dma_mask; + u32 ave_id; + + data = of_device_get_match_data(dev); + if (WARN_ON(!data)) + return -EINVAL; + + np = dev->of_node; + ret = of_get_phy_mode(np, &phy_mode); + if (ret) { + dev_err(dev, "phy-mode not found\n"); + return ret; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + ndev = devm_alloc_etherdev(dev, sizeof(struct ave_private)); + if (!ndev) { + dev_err(dev, "can't allocate ethernet device\n"); + return -ENOMEM; + } + + ndev->netdev_ops = &ave_netdev_ops; + ndev->ethtool_ops = &ave_ethtool_ops; + SET_NETDEV_DEV(ndev, dev); + + ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM); + ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM); + + ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN); + + ret = of_get_ethdev_address(np, ndev); + if (ret) { + /* if the mac address is invalid, use random mac address */ + eth_hw_addr_random(ndev); + dev_warn(dev, "Using random MAC address: %pM\n", + ndev->dev_addr); + } + + priv = netdev_priv(ndev); + priv->base = base; + priv->irq = irq; + priv->ndev = ndev; + priv->msg_enable = netif_msg_init(-1, AVE_DEFAULT_MSG_ENABLE); + priv->phy_mode = phy_mode; + priv->data = data; + + if (IS_DESC_64BIT(priv)) { + priv->desc_size = AVE_DESC_SIZE_64; + priv->tx.daddr = AVE_TXDM_64; + priv->rx.daddr = AVE_RXDM_64; + dma_mask = DMA_BIT_MASK(64); + } else { + priv->desc_size = AVE_DESC_SIZE_32; + priv->tx.daddr = AVE_TXDM_32; + priv->rx.daddr = AVE_RXDM_32; + dma_mask = DMA_BIT_MASK(32); + } + ret = dma_set_mask(dev, dma_mask); + if (ret) + return ret; + + priv->tx.ndesc = AVE_NR_TXDESC; + priv->rx.ndesc = AVE_NR_RXDESC; + + u64_stats_init(&priv->stats_tx.syncp); + u64_stats_init(&priv->stats_rx.syncp); + + for (i = 0; i < AVE_MAX_CLKS; i++) { + name = priv->data->clock_names[i]; + if (!name) + break; + priv->clk[i] = devm_clk_get(dev, name); + if (IS_ERR(priv->clk[i])) + return PTR_ERR(priv->clk[i]); + priv->nclks++; + } + + for (i = 0; i < AVE_MAX_RSTS; i++) { + name = priv->data->reset_names[i]; + if (!name) + break; + priv->rst[i] = devm_reset_control_get_shared(dev, name); + if (IS_ERR(priv->rst[i])) + return PTR_ERR(priv->rst[i]); + priv->nrsts++; + } + + ret = of_parse_phandle_with_fixed_args(np, + "socionext,syscon-phy-mode", + 1, 0, &args); + if (ret) { + dev_err(dev, "can't get syscon-phy-mode property\n"); + return ret; + } + priv->regmap = syscon_node_to_regmap(args.np); + of_node_put(args.np); + if (IS_ERR(priv->regmap)) { + dev_err(dev, "can't map syscon-phy-mode\n"); + return PTR_ERR(priv->regmap); + } + ret = priv->data->get_pinmode(priv, phy_mode, args.args[0]); + if (ret) { + dev_err(dev, "invalid phy-mode setting\n"); + return ret; + } + + priv->mdio = devm_mdiobus_alloc(dev); + if (!priv->mdio) + return -ENOMEM; + priv->mdio->priv = ndev; + priv->mdio->parent = dev; + priv->mdio->read = ave_mdiobus_read; + priv->mdio->write = ave_mdiobus_write; + priv->mdio->name = "uniphier-mdio"; + snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, pdev->id); + + /* Register as a NAPI supported driver */ + netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx); + netif_napi_add_tx(ndev, &priv->napi_tx, ave_napi_poll_tx); + + platform_set_drvdata(pdev, ndev); + + ret = register_netdev(ndev); + if (ret) { + dev_err(dev, "failed to register netdevice\n"); + goto out_del_napi; + } + + /* get ID and version */ + ave_id = readl(priv->base + AVE_IDR); + ave_hw_read_version(ndev, buf, sizeof(buf)); + + dev_info(dev, "Socionext %c%c%c%c Ethernet IP %s (irq=%d, phy=%s)\n", + (ave_id >> 24) & 0xff, (ave_id >> 16) & 0xff, + (ave_id >> 8) & 0xff, (ave_id >> 0) & 0xff, + buf, priv->irq, phy_modes(phy_mode)); + + return 0; + +out_del_napi: + netif_napi_del(&priv->napi_rx); + netif_napi_del(&priv->napi_tx); + + return ret; +} + +static int ave_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct ave_private *priv = netdev_priv(ndev); + + unregister_netdev(ndev); + netif_napi_del(&priv->napi_rx); + netif_napi_del(&priv->napi_tx); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int ave_suspend(struct device *dev) +{ + struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; + struct net_device *ndev = dev_get_drvdata(dev); + struct ave_private *priv = netdev_priv(ndev); + int ret = 0; + + if (netif_running(ndev)) { + ret = ave_stop(ndev); + netif_device_detach(ndev); + } + + ave_ethtool_get_wol(ndev, &wol); + priv->wolopts = wol.wolopts; + + return ret; +} + +static int ave_resume(struct device *dev) +{ + struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; + struct net_device *ndev = dev_get_drvdata(dev); + struct ave_private *priv = netdev_priv(ndev); + int ret = 0; + + ave_global_reset(ndev); + + ret = phy_init_hw(ndev->phydev); + if (ret) + return ret; + + ave_ethtool_get_wol(ndev, &wol); + wol.wolopts = priv->wolopts; + __ave_ethtool_set_wol(ndev, &wol); + + if (ndev->phydev) { + ret = phy_resume(ndev->phydev); + if (ret) + return ret; + } + + if (netif_running(ndev)) { + ret = ave_open(ndev); + netif_device_attach(ndev); + } + + return ret; +} + +static SIMPLE_DEV_PM_OPS(ave_pm_ops, ave_suspend, ave_resume); +#define AVE_PM_OPS (&ave_pm_ops) +#else +#define AVE_PM_OPS NULL +#endif + +static int ave_pro4_get_pinmode(struct ave_private *priv, + phy_interface_t phy_mode, u32 arg) +{ + if (arg > 0) + return -EINVAL; + + priv->pinmode_mask = SG_ETPINMODE_RMII(0); + + switch (phy_mode) { + case PHY_INTERFACE_MODE_RMII: + priv->pinmode_val = SG_ETPINMODE_RMII(0); + break; + case PHY_INTERFACE_MODE_MII: + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + priv->pinmode_val = 0; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ave_ld11_get_pinmode(struct ave_private *priv, + phy_interface_t phy_mode, u32 arg) +{ + if (arg > 0) + return -EINVAL; + + priv->pinmode_mask = SG_ETPINMODE_EXTPHY | SG_ETPINMODE_RMII(0); + + switch (phy_mode) { + case PHY_INTERFACE_MODE_INTERNAL: + priv->pinmode_val = 0; + break; + case PHY_INTERFACE_MODE_RMII: + priv->pinmode_val = SG_ETPINMODE_EXTPHY | SG_ETPINMODE_RMII(0); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ave_ld20_get_pinmode(struct ave_private *priv, + phy_interface_t phy_mode, u32 arg) +{ + if (arg > 0) + return -EINVAL; + + priv->pinmode_mask = SG_ETPINMODE_RMII(0); + + switch (phy_mode) { + case PHY_INTERFACE_MODE_RMII: + priv->pinmode_val = SG_ETPINMODE_RMII(0); + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + priv->pinmode_val = 0; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ave_pxs3_get_pinmode(struct ave_private *priv, + phy_interface_t phy_mode, u32 arg) +{ + if (arg > 1) + return -EINVAL; + + priv->pinmode_mask = SG_ETPINMODE_RMII(arg); + + switch (phy_mode) { + case PHY_INTERFACE_MODE_RMII: + priv->pinmode_val = SG_ETPINMODE_RMII(arg); + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + priv->pinmode_val = 0; + break; + default: + return -EINVAL; + } + + return 0; +} + +static const struct ave_soc_data ave_pro4_data = { + .is_desc_64bit = false, + .clock_names = { + "gio", "ether", "ether-gb", "ether-phy", + }, + .reset_names = { + "gio", "ether", + }, + .get_pinmode = ave_pro4_get_pinmode, +}; + +static const struct ave_soc_data ave_pxs2_data = { + .is_desc_64bit = false, + .clock_names = { + "ether", + }, + .reset_names = { + "ether", + }, + .get_pinmode = ave_pro4_get_pinmode, +}; + +static const struct ave_soc_data ave_ld11_data = { + .is_desc_64bit = false, + .clock_names = { + "ether", + }, + .reset_names = { + "ether", + }, + .get_pinmode = ave_ld11_get_pinmode, +}; + +static const struct ave_soc_data ave_ld20_data = { + .is_desc_64bit = true, + .clock_names = { + "ether", + }, + .reset_names = { + "ether", + }, + .get_pinmode = ave_ld20_get_pinmode, +}; + +static const struct ave_soc_data ave_pxs3_data = { + .is_desc_64bit = false, + .clock_names = { + "ether", + }, + .reset_names = { + "ether", + }, + .get_pinmode = ave_pxs3_get_pinmode, +}; + +static const struct ave_soc_data ave_nx1_data = { + .is_desc_64bit = true, + .clock_names = { + "ether", + }, + .reset_names = { + "ether", + }, + .get_pinmode = ave_pxs3_get_pinmode, +}; + +static const struct of_device_id of_ave_match[] = { + { + .compatible = "socionext,uniphier-pro4-ave4", + .data = &ave_pro4_data, + }, + { + .compatible = "socionext,uniphier-pxs2-ave4", + .data = &ave_pxs2_data, + }, + { + .compatible = "socionext,uniphier-ld11-ave4", + .data = &ave_ld11_data, + }, + { + .compatible = "socionext,uniphier-ld20-ave4", + .data = &ave_ld20_data, + }, + { + .compatible = "socionext,uniphier-pxs3-ave4", + .data = &ave_pxs3_data, + }, + { + .compatible = "socionext,uniphier-nx1-ave4", + .data = &ave_nx1_data, + }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, of_ave_match); + +static struct platform_driver ave_driver = { + .probe = ave_probe, + .remove = ave_remove, + .driver = { + .name = "ave", + .pm = AVE_PM_OPS, + .of_match_table = of_ave_match, + }, +}; +module_platform_driver(ave_driver); + +MODULE_AUTHOR("Kunihiko Hayashi "); +MODULE_DESCRIPTION("Socionext UniPhier AVE ethernet driver"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3