diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
commit | ace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch) | |
tree | b2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/net/ethernet/wangxun | |
parent | Initial commit. (diff) | |
download | linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip |
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/wangxun')
28 files changed, 9377 insertions, 0 deletions
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig new file mode 100644 index 0000000000..23cd610bd3 --- /dev/null +++ b/drivers/net/ethernet/wangxun/Kconfig @@ -0,0 +1,65 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Wangxun network device configuration +# + +config NET_VENDOR_WANGXUN + bool "Wangxun devices" + default y + help + If you have a network (Ethernet) card from Wangxun(R), say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Wangxun(R) cards. If you say Y, you will + be asked for your specific card in the following questions. + +if NET_VENDOR_WANGXUN + +config LIBWX + tristate + select PAGE_POOL + help + Common library for Wangxun(R) Ethernet drivers. + +config NGBE + tristate "Wangxun(R) GbE PCI Express adapters support" + depends on PCI + select LIBWX + select PHYLIB + help + This driver supports Wangxun(R) GbE PCI Express family of + adapters. + + More specific information on configuring the driver is in + <file:Documentation/networking/device_drivers/ethernet/wangxun/ngbe.rst>. + + To compile this driver as a module, choose M here. The module + will be called ngbe. + +config TXGBE + tristate "Wangxun(R) 10GbE PCI Express adapters support" + depends on PCI + depends on COMMON_CLK + select MARVELL_10G_PHY + select REGMAP + select I2C + select I2C_DESIGNWARE_PLATFORM + select PHYLINK + select HWMON if TXGBE=y + select SFP + select GPIOLIB + select GPIOLIB_IRQCHIP + select PCS_XPCS + select LIBWX + help + This driver supports Wangxun(R) 10GbE PCI Express family of + adapters. + + More specific information on configuring the driver is in + <file:Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst>. + + To compile this driver as a module, choose M here. The module + will be called txgbe. + +endif # NET_VENDOR_WANGXUN diff --git a/drivers/net/ethernet/wangxun/Makefile b/drivers/net/ethernet/wangxun/Makefile new file mode 100644 index 0000000000..ca19311dbe --- /dev/null +++ b/drivers/net/ethernet/wangxun/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Wangxun network device drivers. +# + +obj-$(CONFIG_LIBWX) += libwx/ +obj-$(CONFIG_TXGBE) += txgbe/ +obj-$(CONFIG_NGBE) += ngbe/ diff --git a/drivers/net/ethernet/wangxun/libwx/Makefile b/drivers/net/ethernet/wangxun/libwx/Makefile new file mode 100644 index 0000000000..42ccd6e405 --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. +# + +obj-$(CONFIG_LIBWX) += libwx.o + +libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c new file mode 100644 index 0000000000..93cb6f2294 --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/pci.h> +#include <linux/phy.h> + +#include "wx_type.h" +#include "wx_ethtool.h" + +void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) +{ + struct wx *wx = netdev_priv(netdev); + + strscpy(info->driver, wx->driver_name, sizeof(info->driver)); + strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version)); + strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info)); +} +EXPORT_SYMBOL(wx_get_drvinfo); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h new file mode 100644 index 0000000000..e85538c694 --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _WX_ETHTOOL_H_ +#define _WX_ETHTOOL_H_ + +void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info); +#endif /* _WX_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c new file mode 100644 index 0000000000..52130df26a --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -0,0 +1,1937 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <linux/iopoll.h> +#include <linux/pci.h> + +#include "wx_type.h" +#include "wx_lib.h" +#include "wx_hw.h" + +static void wx_intr_disable(struct wx *wx, u64 qmask) +{ + u32 mask; + + mask = (qmask & U32_MAX); + if (mask) + wr32(wx, WX_PX_IMS(0), mask); + + if (wx->mac.type == wx_mac_sp) { + mask = (qmask >> 32); + if (mask) + wr32(wx, WX_PX_IMS(1), mask); + } +} + +void wx_intr_enable(struct wx *wx, u64 qmask) +{ + u32 mask; + + mask = (qmask & U32_MAX); + if (mask) + wr32(wx, WX_PX_IMC(0), mask); + if (wx->mac.type == wx_mac_sp) { + mask = (qmask >> 32); + if (mask) + wr32(wx, WX_PX_IMC(1), mask); + } +} +EXPORT_SYMBOL(wx_intr_enable); + +/** + * wx_irq_disable - Mask off interrupt generation on the NIC + * @wx: board private structure + **/ +void wx_irq_disable(struct wx *wx) +{ + struct pci_dev *pdev = wx->pdev; + + wr32(wx, WX_PX_MISC_IEN, 0); + wx_intr_disable(wx, WX_INTR_ALL); + + if (pdev->msix_enabled) { + int vector; + + for (vector = 0; vector < wx->num_q_vectors; vector++) + synchronize_irq(wx->msix_entries[vector].vector); + + synchronize_irq(wx->msix_entries[vector].vector); + } else { + synchronize_irq(pdev->irq); + } +} +EXPORT_SYMBOL(wx_irq_disable); + +/* cmd_addr is used for some special command: + * 1. to be sector address, when implemented erase sector command + * 2. to be flash address when implemented read, write flash address + */ +static int wx_fmgr_cmd_op(struct wx *wx, u32 cmd, u32 cmd_addr) +{ + u32 cmd_val = 0, val = 0; + + cmd_val = WX_SPI_CMD_CMD(cmd) | + WX_SPI_CMD_CLK(WX_SPI_CLK_DIV) | + cmd_addr; + wr32(wx, WX_SPI_CMD, cmd_val); + + return read_poll_timeout(rd32, val, (val & 0x1), 10, 100000, + false, wx, WX_SPI_STATUS); +} + +static int wx_flash_read_dword(struct wx *wx, u32 addr, u32 *data) +{ + int ret = 0; + + ret = wx_fmgr_cmd_op(wx, WX_SPI_CMD_READ_DWORD, addr); + if (ret < 0) + return ret; + + *data = rd32(wx, WX_SPI_DATA); + + return ret; +} + +int wx_check_flash_load(struct wx *hw, u32 check_bit) +{ + u32 reg = 0; + int err = 0; + + /* if there's flash existing */ + if (!(rd32(hw, WX_SPI_STATUS) & + WX_SPI_STATUS_FLASH_BYPASS)) { + /* wait hw load flash done */ + err = read_poll_timeout(rd32, reg, !(reg & check_bit), 20000, 2000000, + false, hw, WX_SPI_ILDR_STATUS); + if (err < 0) + wx_err(hw, "Check flash load timeout.\n"); + } + + return err; +} +EXPORT_SYMBOL(wx_check_flash_load); + +void wx_control_hw(struct wx *wx, bool drv) +{ + /* True : Let firmware know the driver has taken over + * False : Let firmware take over control of hw + */ + wr32m(wx, WX_CFG_PORT_CTL, WX_CFG_PORT_CTL_DRV_LOAD, + drv ? WX_CFG_PORT_CTL_DRV_LOAD : 0); +} +EXPORT_SYMBOL(wx_control_hw); + +/** + * wx_mng_present - returns 0 when management capability is present + * @wx: pointer to hardware structure + */ +int wx_mng_present(struct wx *wx) +{ + u32 fwsm; + + fwsm = rd32(wx, WX_MIS_ST); + if (fwsm & WX_MIS_ST_MNG_INIT_DN) + return 0; + else + return -EACCES; +} +EXPORT_SYMBOL(wx_mng_present); + +/* Software lock to be held while software semaphore is being accessed. */ +static DEFINE_MUTEX(wx_sw_sync_lock); + +/** + * wx_release_sw_sync - Release SW semaphore + * @wx: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SW semaphore for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +static void wx_release_sw_sync(struct wx *wx, u32 mask) +{ + mutex_lock(&wx_sw_sync_lock); + wr32m(wx, WX_MNG_SWFW_SYNC, mask, 0); + mutex_unlock(&wx_sw_sync_lock); +} + +/** + * wx_acquire_sw_sync - Acquire SW semaphore + * @wx: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SW semaphore for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +static int wx_acquire_sw_sync(struct wx *wx, u32 mask) +{ + u32 sem = 0; + int ret = 0; + + mutex_lock(&wx_sw_sync_lock); + ret = read_poll_timeout(rd32, sem, !(sem & mask), + 5000, 2000000, false, wx, WX_MNG_SWFW_SYNC); + if (!ret) { + sem |= mask; + wr32(wx, WX_MNG_SWFW_SYNC, sem); + } else { + wx_err(wx, "SW Semaphore not granted: 0x%x.\n", sem); + } + mutex_unlock(&wx_sw_sync_lock); + + return ret; +} + +/** + * wx_host_interface_command - Issue command to manageability block + * @wx: pointer to the HW structure + * @buffer: contains the command to write and where the return status will + * be placed + * @length: length of buffer, must be multiple of 4 bytes + * @timeout: time in ms to wait for command completion + * @return_data: read and return data from the buffer (true) or not (false) + * Needed because FW structures are big endian and decoding of + * these fields can be 8 bit or 16 bit based on command. Decoding + * is not easily understood without making a table of commands. + * So we will leave this up to the caller to read back the data + * in these cases. + **/ +int wx_host_interface_command(struct wx *wx, u32 *buffer, + u32 length, u32 timeout, bool return_data) +{ + u32 hdr_size = sizeof(struct wx_hic_hdr); + u32 hicr, i, bi, buf[64] = {}; + int status = 0; + u32 dword_len; + u16 buf_len; + + if (length == 0 || length > WX_HI_MAX_BLOCK_BYTE_LENGTH) { + wx_err(wx, "Buffer length failure buffersize=%d.\n", length); + return -EINVAL; + } + + status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB); + if (status != 0) + return status; + + /* Calculate length in DWORDs. We must be DWORD aligned */ + if ((length % (sizeof(u32))) != 0) { + wx_err(wx, "Buffer length failure, not aligned to dword"); + status = -EINVAL; + goto rel_out; + } + + dword_len = length >> 2; + + /* The device driver writes the relevant command block + * into the ram area. + */ + for (i = 0; i < dword_len; i++) { + wr32a(wx, WX_MNG_MBOX, i, (__force u32)cpu_to_le32(buffer[i])); + /* write flush */ + buf[i] = rd32a(wx, WX_MNG_MBOX, i); + } + /* Setting this bit tells the ARC that a new command is pending. */ + wr32m(wx, WX_MNG_MBOX_CTL, + WX_MNG_MBOX_CTL_SWRDY, WX_MNG_MBOX_CTL_SWRDY); + + status = read_poll_timeout(rd32, hicr, hicr & WX_MNG_MBOX_CTL_FWRDY, 1000, + timeout * 1000, false, wx, WX_MNG_MBOX_CTL); + + /* Check command completion */ + if (status) { + wx_dbg(wx, "Command has failed with no status valid.\n"); + + buf[0] = rd32(wx, WX_MNG_MBOX); + if ((buffer[0] & 0xff) != (~buf[0] >> 24)) { + status = -EINVAL; + goto rel_out; + } + if ((buf[0] & 0xff0000) >> 16 == 0x80) { + wx_dbg(wx, "It's unknown cmd.\n"); + status = -EINVAL; + goto rel_out; + } + + wx_dbg(wx, "write value:\n"); + for (i = 0; i < dword_len; i++) + wx_dbg(wx, "%x ", buffer[i]); + wx_dbg(wx, "read value:\n"); + for (i = 0; i < dword_len; i++) + wx_dbg(wx, "%x ", buf[i]); + } + + if (!return_data) + goto rel_out; + + /* Calculate length in DWORDs */ + dword_len = hdr_size >> 2; + + /* first pull in the header so we know the buffer length */ + for (bi = 0; bi < dword_len; bi++) { + buffer[bi] = rd32a(wx, WX_MNG_MBOX, bi); + le32_to_cpus(&buffer[bi]); + } + + /* If there is any thing in data position pull it in */ + buf_len = ((struct wx_hic_hdr *)buffer)->buf_len; + if (buf_len == 0) + goto rel_out; + + if (length < buf_len + hdr_size) { + wx_err(wx, "Buffer not large enough for reply message.\n"); + status = -EFAULT; + goto rel_out; + } + + /* Calculate length in DWORDs, add 3 for odd lengths */ + dword_len = (buf_len + 3) >> 2; + + /* Pull in the rest of the buffer (bi is where we left off) */ + for (; bi <= dword_len; bi++) { + buffer[bi] = rd32a(wx, WX_MNG_MBOX, bi); + le32_to_cpus(&buffer[bi]); + } + +rel_out: + wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB); + return status; +} +EXPORT_SYMBOL(wx_host_interface_command); + +/** + * wx_read_ee_hostif_data - Read EEPROM word using a host interface cmd + * assuming that the semaphore is already obtained. + * @wx: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +static int wx_read_ee_hostif_data(struct wx *wx, u16 offset, u16 *data) +{ + struct wx_hic_read_shadow_ram buffer; + int status; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = (__force u32)cpu_to_be32(offset * 2); + /* one word */ + buffer.length = (__force u16)cpu_to_be16(sizeof(u16)); + + status = wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer), + WX_HI_COMMAND_TIMEOUT, false); + + if (status != 0) + return status; + + *data = (u16)rd32a(wx, WX_MNG_MBOX, FW_NVM_DATA_OFFSET); + + return status; +} + +/** + * wx_read_ee_hostif - Read EEPROM word using a host interface cmd + * @wx: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data) +{ + int status = 0; + + status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH); + if (status == 0) { + status = wx_read_ee_hostif_data(wx, offset, data); + wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH); + } + + return status; +} +EXPORT_SYMBOL(wx_read_ee_hostif); + +/** + * wx_read_ee_hostif_buffer- Read EEPROM word(s) using hostif + * @wx: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the hostif. + **/ +int wx_read_ee_hostif_buffer(struct wx *wx, + u16 offset, u16 words, u16 *data) +{ + struct wx_hic_read_shadow_ram buffer; + u32 current_word = 0; + u16 words_to_read; + u32 value = 0; + int status; + u32 i; + + /* Take semaphore for the entire operation. */ + status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH); + if (status != 0) + return status; + + while (words) { + if (words > FW_MAX_READ_BUFFER_SIZE / 2) + words_to_read = FW_MAX_READ_BUFFER_SIZE / 2; + else + words_to_read = words; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = (__force u32)cpu_to_be32((offset + current_word) * 2); + buffer.length = (__force u16)cpu_to_be16(words_to_read * 2); + + status = wx_host_interface_command(wx, (u32 *)&buffer, + sizeof(buffer), + WX_HI_COMMAND_TIMEOUT, + false); + + if (status != 0) { + wx_err(wx, "Host interface command failed\n"); + goto out; + } + + for (i = 0; i < words_to_read; i++) { + u32 reg = WX_MNG_MBOX + (FW_NVM_DATA_OFFSET << 2) + 2 * i; + + value = rd32(wx, reg); + data[current_word] = (u16)(value & 0xffff); + current_word++; + i++; + if (i < words_to_read) { + value >>= 16; + data[current_word] = (u16)(value & 0xffff); + current_word++; + } + } + words -= words_to_read; + } + +out: + wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH); + return status; +} +EXPORT_SYMBOL(wx_read_ee_hostif_buffer); + +/** + * wx_init_eeprom_params - Initialize EEPROM params + * @wx: pointer to hardware structure + * + * Initializes the EEPROM parameters wx_eeprom_info within the + * wx_hw struct in order to set up EEPROM access. + **/ +void wx_init_eeprom_params(struct wx *wx) +{ + struct wx_eeprom_info *eeprom = &wx->eeprom; + u16 eeprom_size; + u16 data = 0x80; + + if (eeprom->type == wx_eeprom_uninitialized) { + eeprom->semaphore_delay = 10; + eeprom->type = wx_eeprom_none; + + if (!(rd32(wx, WX_SPI_STATUS) & + WX_SPI_STATUS_FLASH_BYPASS)) { + eeprom->type = wx_flash; + + eeprom_size = 4096; + eeprom->word_size = eeprom_size >> 1; + + wx_dbg(wx, "Eeprom params: type = %d, size = %d\n", + eeprom->type, eeprom->word_size); + } + } + + if (wx->mac.type == wx_mac_sp) { + if (wx_read_ee_hostif(wx, WX_SW_REGION_PTR, &data)) { + wx_err(wx, "NVM Read Error\n"); + return; + } + data = data >> 1; + } + + eeprom->sw_region_offset = data; +} +EXPORT_SYMBOL(wx_init_eeprom_params); + +/** + * wx_get_mac_addr - Generic get MAC address + * @wx: pointer to hardware structure + * @mac_addr: Adapter MAC address + * + * Reads the adapter's MAC address from first Receive Address Register (RAR0) + * A reset of the adapter must be performed prior to calling this function + * in order for the MAC address to have been loaded from the EEPROM into RAR0 + **/ +void wx_get_mac_addr(struct wx *wx, u8 *mac_addr) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + wr32(wx, WX_PSR_MAC_SWC_IDX, 0); + rar_high = rd32(wx, WX_PSR_MAC_SWC_AD_H); + rar_low = rd32(wx, WX_PSR_MAC_SWC_AD_L); + + for (i = 0; i < 2; i++) + mac_addr[i] = (u8)(rar_high >> (1 - i) * 8); + + for (i = 0; i < 4; i++) + mac_addr[i + 2] = (u8)(rar_low >> (3 - i) * 8); +} +EXPORT_SYMBOL(wx_get_mac_addr); + +/** + * wx_set_rar - Set Rx address register + * @wx: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @pools: VMDq "set" or "pool" index + * @enable_addr: set flag that address is active + * + * Puts an ethernet address into a receive address register. + **/ +static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools, + u32 enable_addr) +{ + u32 rar_entries = wx->mac.num_rar_entries; + u32 rar_low, rar_high; + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + wx_err(wx, "RAR index %d is out of range.\n", index); + return -EINVAL; + } + + /* select the MAC address */ + wr32(wx, WX_PSR_MAC_SWC_IDX, index); + + /* setup VMDq pool mapping */ + wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF); + if (wx->mac.type == wx_mac_sp) + wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32); + + /* HW expects these in little endian so we reverse the byte + * order from network order (big endian) to little endian + * + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_low = ((u32)addr[5] | + ((u32)addr[4] << 8) | + ((u32)addr[3] << 16) | + ((u32)addr[2] << 24)); + rar_high = ((u32)addr[1] | + ((u32)addr[0] << 8)); + if (enable_addr != 0) + rar_high |= WX_PSR_MAC_SWC_AD_H_AV; + + wr32(wx, WX_PSR_MAC_SWC_AD_L, rar_low); + wr32m(wx, WX_PSR_MAC_SWC_AD_H, + (WX_PSR_MAC_SWC_AD_H_AD(U16_MAX) | + WX_PSR_MAC_SWC_AD_H_ADTYPE(1) | + WX_PSR_MAC_SWC_AD_H_AV), + rar_high); + + return 0; +} + +/** + * wx_clear_rar - Remove Rx address register + * @wx: pointer to hardware structure + * @index: Receive address register to write + * + * Clears an ethernet address from a receive address register. + **/ +static int wx_clear_rar(struct wx *wx, u32 index) +{ + u32 rar_entries = wx->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + wx_err(wx, "RAR index %d is out of range.\n", index); + return -EINVAL; + } + + /* Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + wr32(wx, WX_PSR_MAC_SWC_IDX, index); + + wr32(wx, WX_PSR_MAC_SWC_VM_L, 0); + wr32(wx, WX_PSR_MAC_SWC_VM_H, 0); + + wr32(wx, WX_PSR_MAC_SWC_AD_L, 0); + wr32m(wx, WX_PSR_MAC_SWC_AD_H, + (WX_PSR_MAC_SWC_AD_H_AD(U16_MAX) | + WX_PSR_MAC_SWC_AD_H_ADTYPE(1) | + WX_PSR_MAC_SWC_AD_H_AV), + 0); + + return 0; +} + +/** + * wx_clear_vmdq - Disassociate a VMDq pool index from a rx address + * @wx: pointer to hardware struct + * @rar: receive address register index to disassociate + * @vmdq: VMDq pool index to remove from the rar + **/ +static int wx_clear_vmdq(struct wx *wx, u32 rar, u32 __maybe_unused vmdq) +{ + u32 rar_entries = wx->mac.num_rar_entries; + u32 mpsar_lo, mpsar_hi; + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + wx_err(wx, "RAR index %d is out of range.\n", rar); + return -EINVAL; + } + + wr32(wx, WX_PSR_MAC_SWC_IDX, rar); + mpsar_lo = rd32(wx, WX_PSR_MAC_SWC_VM_L); + mpsar_hi = rd32(wx, WX_PSR_MAC_SWC_VM_H); + + if (!mpsar_lo && !mpsar_hi) + return 0; + + /* was that the last pool using this rar? */ + if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) + wx_clear_rar(wx, rar); + + return 0; +} + +/** + * wx_init_uta_tables - Initialize the Unicast Table Array + * @wx: pointer to hardware structure + **/ +static void wx_init_uta_tables(struct wx *wx) +{ + int i; + + wx_dbg(wx, " Clearing UTA\n"); + + for (i = 0; i < 128; i++) + wr32(wx, WX_PSR_UC_TBL(i), 0); +} + +/** + * wx_init_rx_addrs - Initializes receive address filters. + * @wx: pointer to hardware structure + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + **/ +void wx_init_rx_addrs(struct wx *wx) +{ + u32 rar_entries = wx->mac.num_rar_entries; + u32 psrctl; + int i; + + /* If the current mac address is valid, assume it is a software override + * to the permanent address. + * Otherwise, use the permanent address from the eeprom. + */ + if (!is_valid_ether_addr(wx->mac.addr)) { + /* Get the MAC address from the RAR0 for later reference */ + wx_get_mac_addr(wx, wx->mac.addr); + wx_dbg(wx, "Keeping Current RAR0 Addr = %pM\n", wx->mac.addr); + } else { + /* Setup the receive address. */ + wx_dbg(wx, "Overriding MAC Address in RAR[0]\n"); + wx_dbg(wx, "New MAC Addr = %pM\n", wx->mac.addr); + + wx_set_rar(wx, 0, wx->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV); + + if (wx->mac.type == wx_mac_sp) { + /* clear VMDq pool/queue selection for RAR 0 */ + wx_clear_vmdq(wx, 0, WX_CLEAR_VMDQ_ALL); + } + } + + /* Zero out the other receive addresses. */ + wx_dbg(wx, "Clearing RAR[1-%d]\n", rar_entries - 1); + for (i = 1; i < rar_entries; i++) { + wr32(wx, WX_PSR_MAC_SWC_IDX, i); + wr32(wx, WX_PSR_MAC_SWC_AD_L, 0); + wr32(wx, WX_PSR_MAC_SWC_AD_H, 0); + } + + /* Clear the MTA */ + wx->addr_ctrl.mta_in_use = 0; + psrctl = rd32(wx, WX_PSR_CTL); + psrctl &= ~(WX_PSR_CTL_MO | WX_PSR_CTL_MFE); + psrctl |= wx->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT; + wr32(wx, WX_PSR_CTL, psrctl); + wx_dbg(wx, " Clearing MTA\n"); + for (i = 0; i < wx->mac.mcft_size; i++) + wr32(wx, WX_PSR_MC_TBL(i), 0); + + wx_init_uta_tables(wx); +} +EXPORT_SYMBOL(wx_init_rx_addrs); + +static void wx_sync_mac_table(struct wx *wx) +{ + int i; + + for (i = 0; i < wx->mac.num_rar_entries; i++) { + if (wx->mac_table[i].state & WX_MAC_STATE_MODIFIED) { + if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) { + wx_set_rar(wx, i, + wx->mac_table[i].addr, + wx->mac_table[i].pools, + WX_PSR_MAC_SWC_AD_H_AV); + } else { + wx_clear_rar(wx, i); + } + wx->mac_table[i].state &= ~(WX_MAC_STATE_MODIFIED); + } + } +} + +/* this function destroys the first RAR entry */ +void wx_mac_set_default_filter(struct wx *wx, u8 *addr) +{ + memcpy(&wx->mac_table[0].addr, addr, ETH_ALEN); + wx->mac_table[0].pools = 1ULL; + wx->mac_table[0].state = (WX_MAC_STATE_DEFAULT | WX_MAC_STATE_IN_USE); + wx_set_rar(wx, 0, wx->mac_table[0].addr, + wx->mac_table[0].pools, + WX_PSR_MAC_SWC_AD_H_AV); +} +EXPORT_SYMBOL(wx_mac_set_default_filter); + +void wx_flush_sw_mac_table(struct wx *wx) +{ + u32 i; + + for (i = 0; i < wx->mac.num_rar_entries; i++) { + if (!(wx->mac_table[i].state & WX_MAC_STATE_IN_USE)) + continue; + + wx->mac_table[i].state |= WX_MAC_STATE_MODIFIED; + wx->mac_table[i].state &= ~WX_MAC_STATE_IN_USE; + memset(wx->mac_table[i].addr, 0, ETH_ALEN); + wx->mac_table[i].pools = 0; + } + wx_sync_mac_table(wx); +} +EXPORT_SYMBOL(wx_flush_sw_mac_table); + +static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool) +{ + u32 i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + for (i = 0; i < wx->mac.num_rar_entries; i++) { + if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) { + if (ether_addr_equal(addr, wx->mac_table[i].addr)) { + if (wx->mac_table[i].pools != (1ULL << pool)) { + memcpy(wx->mac_table[i].addr, addr, ETH_ALEN); + wx->mac_table[i].pools |= (1ULL << pool); + wx_sync_mac_table(wx); + return i; + } + } + } + + if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) + continue; + wx->mac_table[i].state |= (WX_MAC_STATE_MODIFIED | + WX_MAC_STATE_IN_USE); + memcpy(wx->mac_table[i].addr, addr, ETH_ALEN); + wx->mac_table[i].pools |= (1ULL << pool); + wx_sync_mac_table(wx); + return i; + } + return -ENOMEM; +} + +static int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool) +{ + u32 i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + /* search table for addr, if found, set to 0 and sync */ + for (i = 0; i < wx->mac.num_rar_entries; i++) { + if (!ether_addr_equal(addr, wx->mac_table[i].addr)) + continue; + + wx->mac_table[i].state |= WX_MAC_STATE_MODIFIED; + wx->mac_table[i].pools &= ~(1ULL << pool); + if (!wx->mac_table[i].pools) { + wx->mac_table[i].state &= ~WX_MAC_STATE_IN_USE; + memset(wx->mac_table[i].addr, 0, ETH_ALEN); + } + wx_sync_mac_table(wx); + return 0; + } + return -ENOMEM; +} + +static int wx_available_rars(struct wx *wx) +{ + u32 i, count = 0; + + for (i = 0; i < wx->mac.num_rar_entries; i++) { + if (wx->mac_table[i].state == 0) + count++; + } + + return count; +} + +/** + * wx_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * @pool: index for mac table + * + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + * 0 on no addresses written + * X on writing X addresses to the RAR table + **/ +static int wx_write_uc_addr_list(struct net_device *netdev, int pool) +{ + struct wx *wx = netdev_priv(netdev); + int count = 0; + + /* return ENOMEM indicating insufficient memory for addresses */ + if (netdev_uc_count(netdev) > wx_available_rars(wx)) + return -ENOMEM; + + if (!netdev_uc_empty(netdev)) { + struct netdev_hw_addr *ha; + + netdev_for_each_uc_addr(ha, netdev) { + wx_del_mac_filter(wx, ha->addr, pool); + wx_add_mac_filter(wx, ha->addr, pool); + count++; + } + } + return count; +} + +/** + * wx_mta_vector - Determines bit-vector in multicast table to set + * @wx: pointer to private structure + * @mc_addr: the multicast address + * + * Extracts the 12 bits, from a multicast address, to determine which + * bit-vector to set in the multicast table. The hardware uses 12 bits, from + * incoming rx multicast addresses, to determine the bit-vector to check in + * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set + * by the MO field of the MCSTCTRL. The MO field is set during initialization + * to mc_filter_type. + **/ +static u32 wx_mta_vector(struct wx *wx, u8 *mc_addr) +{ + u32 vector = 0; + + switch (wx->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + wx_err(wx, "MC filter type param set incorrectly\n"); + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +/** + * wx_set_mta - Set bit-vector in multicast table + * @wx: pointer to private structure + * @mc_addr: Multicast address + * + * Sets the bit-vector in the multicast table. + **/ +static void wx_set_mta(struct wx *wx, u8 *mc_addr) +{ + u32 vector, vector_bit, vector_reg; + + wx->addr_ctrl.mta_in_use++; + + vector = wx_mta_vector(wx, mc_addr); + wx_dbg(wx, " bit-vector = 0x%03X\n", vector); + + /* The MTA is a register array of 128 32-bit registers. It is treated + * like an array of 4096 bits. We want to set bit + * BitArray[vector_value]. So we figure out what register the bit is + * in, read it, OR in the new bit, then write back the new value. The + * register is determined by the upper 7 bits of the vector value and + * the bit within that register are determined by the lower 5 bits of + * the value. + */ + vector_reg = (vector >> 5) & 0x7F; + vector_bit = vector & 0x1F; + wx->mac.mta_shadow[vector_reg] |= (1 << vector_bit); +} + +/** + * wx_update_mc_addr_list - Updates MAC list of multicast addresses + * @wx: pointer to private structure + * @netdev: pointer to net device structure + * + * The given list replaces any existing list. Clears the MC addrs from receive + * address registers and the multicast table. Uses unused receive address + * registers for the first multicast addresses, and hashes the rest into the + * multicast table. + **/ +static void wx_update_mc_addr_list(struct wx *wx, struct net_device *netdev) +{ + struct netdev_hw_addr *ha; + u32 i, psrctl; + + /* Set the new number of MC addresses that we are being requested to + * use. + */ + wx->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); + wx->addr_ctrl.mta_in_use = 0; + + /* Clear mta_shadow */ + wx_dbg(wx, " Clearing MTA\n"); + memset(&wx->mac.mta_shadow, 0, sizeof(wx->mac.mta_shadow)); + + /* Update mta_shadow */ + netdev_for_each_mc_addr(ha, netdev) { + wx_dbg(wx, " Adding the multicast addresses:\n"); + wx_set_mta(wx, ha->addr); + } + + /* Enable mta */ + for (i = 0; i < wx->mac.mcft_size; i++) + wr32a(wx, WX_PSR_MC_TBL(0), i, + wx->mac.mta_shadow[i]); + + if (wx->addr_ctrl.mta_in_use > 0) { + psrctl = rd32(wx, WX_PSR_CTL); + psrctl &= ~(WX_PSR_CTL_MO | WX_PSR_CTL_MFE); + psrctl |= WX_PSR_CTL_MFE | + (wx->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT); + wr32(wx, WX_PSR_CTL, psrctl); + } + + wx_dbg(wx, "Update mc addr list Complete\n"); +} + +/** + * wx_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: 0 on no addresses written + * X on writing X addresses to MTA + **/ +static int wx_write_mc_addr_list(struct net_device *netdev) +{ + struct wx *wx = netdev_priv(netdev); + + if (!netif_running(netdev)) + return 0; + + wx_update_mc_addr_list(wx, netdev); + + return netdev_mc_count(netdev); +} + +/** + * wx_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +int wx_set_mac(struct net_device *netdev, void *p) +{ + struct wx *wx = netdev_priv(netdev); + struct sockaddr *addr = p; + int retval; + + retval = eth_prepare_mac_addr_change(netdev, addr); + if (retval) + return retval; + + wx_del_mac_filter(wx, wx->mac.addr, 0); + eth_hw_addr_set(netdev, addr->sa_data); + memcpy(wx->mac.addr, addr->sa_data, netdev->addr_len); + + wx_mac_set_default_filter(wx, wx->mac.addr); + + return 0; +} +EXPORT_SYMBOL(wx_set_mac); + +void wx_disable_rx(struct wx *wx) +{ + u32 pfdtxgswc; + u32 rxctrl; + + rxctrl = rd32(wx, WX_RDB_PB_CTL); + if (rxctrl & WX_RDB_PB_CTL_RXEN) { + pfdtxgswc = rd32(wx, WX_PSR_CTL); + if (pfdtxgswc & WX_PSR_CTL_SW_EN) { + pfdtxgswc &= ~WX_PSR_CTL_SW_EN; + wr32(wx, WX_PSR_CTL, pfdtxgswc); + wx->mac.set_lben = true; + } else { + wx->mac.set_lben = false; + } + rxctrl &= ~WX_RDB_PB_CTL_RXEN; + wr32(wx, WX_RDB_PB_CTL, rxctrl); + + if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) || + ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) { + /* disable mac receiver */ + wr32m(wx, WX_MAC_RX_CFG, + WX_MAC_RX_CFG_RE, 0); + } + } +} +EXPORT_SYMBOL(wx_disable_rx); + +static void wx_enable_rx(struct wx *wx) +{ + u32 psrctl; + + /* enable mac receiver */ + wr32m(wx, WX_MAC_RX_CFG, + WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE); + + wr32m(wx, WX_RDB_PB_CTL, + WX_RDB_PB_CTL_RXEN, WX_RDB_PB_CTL_RXEN); + + if (wx->mac.set_lben) { + psrctl = rd32(wx, WX_PSR_CTL); + psrctl |= WX_PSR_CTL_SW_EN; + wr32(wx, WX_PSR_CTL, psrctl); + wx->mac.set_lben = false; + } +} + +/** + * wx_set_rxpba - Initialize Rx packet buffer + * @wx: pointer to private structure + **/ +static void wx_set_rxpba(struct wx *wx) +{ + u32 rxpktsize, txpktsize, txpbthresh; + + rxpktsize = wx->mac.rx_pb_size << WX_RDB_PB_SZ_SHIFT; + wr32(wx, WX_RDB_PB_SZ(0), rxpktsize); + + /* Only support an equally distributed Tx packet buffer strategy. */ + txpktsize = wx->mac.tx_pb_size; + txpbthresh = (txpktsize / 1024) - WX_TXPKT_SIZE_MAX; + wr32(wx, WX_TDB_PB_SZ(0), txpktsize); + wr32(wx, WX_TDM_PB_THRE(0), txpbthresh); +} + +static void wx_configure_port(struct wx *wx) +{ + u32 value, i; + + value = WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ; + wr32m(wx, WX_CFG_PORT_CTL, + WX_CFG_PORT_CTL_D_VLAN | + WX_CFG_PORT_CTL_QINQ, + value); + + wr32(wx, WX_CFG_TAG_TPID(0), + ETH_P_8021Q | ETH_P_8021AD << 16); + wx->tpid[0] = ETH_P_8021Q; + wx->tpid[1] = ETH_P_8021AD; + for (i = 1; i < 4; i++) + wr32(wx, WX_CFG_TAG_TPID(i), + ETH_P_8021Q | ETH_P_8021Q << 16); + for (i = 2; i < 8; i++) + wx->tpid[i] = ETH_P_8021Q; +} + +/** + * wx_disable_sec_rx_path - Stops the receive data path + * @wx: pointer to private structure + * + * Stops the receive data path and waits for the HW to internally empty + * the Rx security block + **/ +static int wx_disable_sec_rx_path(struct wx *wx) +{ + u32 secrx; + + wr32m(wx, WX_RSC_CTL, + WX_RSC_CTL_RX_DIS, WX_RSC_CTL_RX_DIS); + + return read_poll_timeout(rd32, secrx, secrx & WX_RSC_ST_RSEC_RDY, + 1000, 40000, false, wx, WX_RSC_ST); +} + +/** + * wx_enable_sec_rx_path - Enables the receive data path + * @wx: pointer to private structure + * + * Enables the receive data path. + **/ +static void wx_enable_sec_rx_path(struct wx *wx) +{ + wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_RX_DIS, 0); + WX_WRITE_FLUSH(wx); +} + +static void wx_vlan_strip_control(struct wx *wx, bool enable) +{ + int i, j; + + for (i = 0; i < wx->num_rx_queues; i++) { + struct wx_ring *ring = wx->rx_ring[i]; + + j = ring->reg_idx; + wr32m(wx, WX_PX_RR_CFG(j), WX_PX_RR_CFG_VLAN, + enable ? WX_PX_RR_CFG_VLAN : 0); + } +} + +void wx_set_rx_mode(struct net_device *netdev) +{ + struct wx *wx = netdev_priv(netdev); + netdev_features_t features; + u32 fctrl, vmolr, vlnctrl; + int count; + + features = netdev->features; + + /* Check for Promiscuous and All Multicast modes */ + fctrl = rd32(wx, WX_PSR_CTL); + fctrl &= ~(WX_PSR_CTL_UPE | WX_PSR_CTL_MPE); + vmolr = rd32(wx, WX_PSR_VM_L2CTL(0)); + vmolr &= ~(WX_PSR_VM_L2CTL_UPE | + WX_PSR_VM_L2CTL_MPE | + WX_PSR_VM_L2CTL_ROPE | + WX_PSR_VM_L2CTL_ROMPE); + vlnctrl = rd32(wx, WX_PSR_VLAN_CTL); + vlnctrl &= ~(WX_PSR_VLAN_CTL_VFE | WX_PSR_VLAN_CTL_CFIEN); + + /* set all bits that we expect to always be set */ + fctrl |= WX_PSR_CTL_BAM | WX_PSR_CTL_MFE; + vmolr |= WX_PSR_VM_L2CTL_BAM | + WX_PSR_VM_L2CTL_AUPE | + WX_PSR_VM_L2CTL_VACC; + vlnctrl |= WX_PSR_VLAN_CTL_VFE; + + wx->addr_ctrl.user_set_promisc = false; + if (netdev->flags & IFF_PROMISC) { + wx->addr_ctrl.user_set_promisc = true; + fctrl |= WX_PSR_CTL_UPE | WX_PSR_CTL_MPE; + /* pf don't want packets routing to vf, so clear UPE */ + vmolr |= WX_PSR_VM_L2CTL_MPE; + vlnctrl &= ~WX_PSR_VLAN_CTL_VFE; + } + + if (netdev->flags & IFF_ALLMULTI) { + fctrl |= WX_PSR_CTL_MPE; + vmolr |= WX_PSR_VM_L2CTL_MPE; + } + + if (netdev->features & NETIF_F_RXALL) { + vmolr |= (WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_MPE); + vlnctrl &= ~WX_PSR_VLAN_CTL_VFE; + /* receive bad packets */ + wr32m(wx, WX_RSC_CTL, + WX_RSC_CTL_SAVE_MAC_ERR, + WX_RSC_CTL_SAVE_MAC_ERR); + } else { + vmolr |= WX_PSR_VM_L2CTL_ROPE | WX_PSR_VM_L2CTL_ROMPE; + } + + /* Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + count = wx_write_uc_addr_list(netdev, 0); + if (count < 0) { + vmolr &= ~WX_PSR_VM_L2CTL_ROPE; + vmolr |= WX_PSR_VM_L2CTL_UPE; + } + + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = wx_write_mc_addr_list(netdev); + if (count < 0) { + vmolr &= ~WX_PSR_VM_L2CTL_ROMPE; + vmolr |= WX_PSR_VM_L2CTL_MPE; + } + + wr32(wx, WX_PSR_VLAN_CTL, vlnctrl); + wr32(wx, WX_PSR_CTL, fctrl); + wr32(wx, WX_PSR_VM_L2CTL(0), vmolr); + + if ((features & NETIF_F_HW_VLAN_CTAG_RX) && + (features & NETIF_F_HW_VLAN_STAG_RX)) + wx_vlan_strip_control(wx, true); + else + wx_vlan_strip_control(wx, false); + +} +EXPORT_SYMBOL(wx_set_rx_mode); + +static void wx_set_rx_buffer_len(struct wx *wx) +{ + struct net_device *netdev = wx->netdev; + u32 mhadd, max_frame; + + max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN); + + mhadd = rd32(wx, WX_PSR_MAX_SZ); + if (max_frame != mhadd) + wr32(wx, WX_PSR_MAX_SZ, max_frame); +} + +/** + * wx_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +int wx_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct wx *wx = netdev_priv(netdev); + + netdev->mtu = new_mtu; + wx_set_rx_buffer_len(wx); + + return 0; +} +EXPORT_SYMBOL(wx_change_mtu); + +/* Disable the specified rx queue */ +void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring) +{ + u8 reg_idx = ring->reg_idx; + u32 rxdctl; + int ret; + + /* write value back with RRCFG.EN bit cleared */ + wr32m(wx, WX_PX_RR_CFG(reg_idx), + WX_PX_RR_CFG_RR_EN, 0); + + /* the hardware may take up to 100us to really disable the rx queue */ + ret = read_poll_timeout(rd32, rxdctl, !(rxdctl & WX_PX_RR_CFG_RR_EN), + 10, 100, true, wx, WX_PX_RR_CFG(reg_idx)); + + if (ret == -ETIMEDOUT) { + /* Just for information */ + wx_err(wx, + "RRCFG.EN on Rx queue %d not cleared within the polling period\n", + reg_idx); + } +} +EXPORT_SYMBOL(wx_disable_rx_queue); + +static void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring) +{ + u8 reg_idx = ring->reg_idx; + u32 rxdctl; + int ret; + + ret = read_poll_timeout(rd32, rxdctl, rxdctl & WX_PX_RR_CFG_RR_EN, + 1000, 10000, true, wx, WX_PX_RR_CFG(reg_idx)); + + if (ret == -ETIMEDOUT) { + /* Just for information */ + wx_err(wx, + "RRCFG.EN on Rx queue %d not set within the polling period\n", + reg_idx); + } +} + +static void wx_configure_srrctl(struct wx *wx, + struct wx_ring *rx_ring) +{ + u16 reg_idx = rx_ring->reg_idx; + u32 srrctl; + + srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx)); + srrctl &= ~(WX_PX_RR_CFG_RR_HDR_SZ | + WX_PX_RR_CFG_RR_BUF_SZ | + WX_PX_RR_CFG_SPLIT_MODE); + /* configure header buffer length, needed for RSC */ + srrctl |= WX_RXBUFFER_256 << WX_PX_RR_CFG_BHDRSIZE_SHIFT; + + /* configure the packet buffer length */ + srrctl |= WX_RX_BUFSZ >> WX_PX_RR_CFG_BSIZEPKT_SHIFT; + + wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl); +} + +static void wx_configure_tx_ring(struct wx *wx, + struct wx_ring *ring) +{ + u32 txdctl = WX_PX_TR_CFG_ENABLE; + u8 reg_idx = ring->reg_idx; + u64 tdba = ring->dma; + int ret; + + /* disable queue to avoid issues while updating state */ + wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH); + WX_WRITE_FLUSH(wx); + + wr32(wx, WX_PX_TR_BAL(reg_idx), tdba & DMA_BIT_MASK(32)); + wr32(wx, WX_PX_TR_BAH(reg_idx), upper_32_bits(tdba)); + + /* reset head and tail pointers */ + wr32(wx, WX_PX_TR_RP(reg_idx), 0); + wr32(wx, WX_PX_TR_WP(reg_idx), 0); + ring->tail = wx->hw_addr + WX_PX_TR_WP(reg_idx); + + if (ring->count < WX_MAX_TXD) + txdctl |= ring->count / 128 << WX_PX_TR_CFG_TR_SIZE_SHIFT; + txdctl |= 0x20 << WX_PX_TR_CFG_WTHRESH_SHIFT; + + /* reinitialize tx_buffer_info */ + memset(ring->tx_buffer_info, 0, + sizeof(struct wx_tx_buffer) * ring->count); + + /* enable queue */ + wr32(wx, WX_PX_TR_CFG(reg_idx), txdctl); + + /* poll to verify queue is enabled */ + ret = read_poll_timeout(rd32, txdctl, txdctl & WX_PX_TR_CFG_ENABLE, + 1000, 10000, true, wx, WX_PX_TR_CFG(reg_idx)); + if (ret == -ETIMEDOUT) + wx_err(wx, "Could not enable Tx Queue %d\n", reg_idx); +} + +static void wx_configure_rx_ring(struct wx *wx, + struct wx_ring *ring) +{ + u16 reg_idx = ring->reg_idx; + union wx_rx_desc *rx_desc; + u64 rdba = ring->dma; + u32 rxdctl; + + /* disable queue to avoid issues while updating state */ + rxdctl = rd32(wx, WX_PX_RR_CFG(reg_idx)); + wx_disable_rx_queue(wx, ring); + + wr32(wx, WX_PX_RR_BAL(reg_idx), rdba & DMA_BIT_MASK(32)); + wr32(wx, WX_PX_RR_BAH(reg_idx), upper_32_bits(rdba)); + + if (ring->count == WX_MAX_RXD) + rxdctl |= 0 << WX_PX_RR_CFG_RR_SIZE_SHIFT; + else + rxdctl |= (ring->count / 128) << WX_PX_RR_CFG_RR_SIZE_SHIFT; + + rxdctl |= 0x1 << WX_PX_RR_CFG_RR_THER_SHIFT; + wr32(wx, WX_PX_RR_CFG(reg_idx), rxdctl); + + /* reset head and tail pointers */ + wr32(wx, WX_PX_RR_RP(reg_idx), 0); + wr32(wx, WX_PX_RR_WP(reg_idx), 0); + ring->tail = wx->hw_addr + WX_PX_RR_WP(reg_idx); + + wx_configure_srrctl(wx, ring); + + /* initialize rx_buffer_info */ + memset(ring->rx_buffer_info, 0, + sizeof(struct wx_rx_buffer) * ring->count); + + /* initialize Rx descriptor 0 */ + rx_desc = WX_RX_DESC(ring, 0); + rx_desc->wb.upper.length = 0; + + /* enable receive descriptor ring */ + wr32m(wx, WX_PX_RR_CFG(reg_idx), + WX_PX_RR_CFG_RR_EN, WX_PX_RR_CFG_RR_EN); + + wx_enable_rx_queue(wx, ring); + wx_alloc_rx_buffers(ring, wx_desc_unused(ring)); +} + +/** + * wx_configure_tx - Configure Transmit Unit after Reset + * @wx: pointer to private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void wx_configure_tx(struct wx *wx) +{ + u32 i; + + /* TDM_CTL.TE must be before Tx queues are enabled */ + wr32m(wx, WX_TDM_CTL, + WX_TDM_CTL_TE, WX_TDM_CTL_TE); + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < wx->num_tx_queues; i++) + wx_configure_tx_ring(wx, wx->tx_ring[i]); + + wr32m(wx, WX_TSC_BUF_AE, WX_TSC_BUF_AE_THR, 0x10); + + if (wx->mac.type == wx_mac_em) + wr32m(wx, WX_TSC_CTL, WX_TSC_CTL_TX_DIS | WX_TSC_CTL_TSEC_DIS, 0x1); + + /* enable mac transmitter */ + wr32m(wx, WX_MAC_TX_CFG, + WX_MAC_TX_CFG_TE, WX_MAC_TX_CFG_TE); +} + +static void wx_restore_vlan(struct wx *wx) +{ + u16 vid = 1; + + wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), 0); + + for_each_set_bit_from(vid, wx->active_vlans, VLAN_N_VID) + wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), vid); +} + +/** + * wx_configure_rx - Configure Receive Unit after Reset + * @wx: pointer to private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +void wx_configure_rx(struct wx *wx) +{ + u32 psrtype, i; + int ret; + + wx_disable_rx(wx); + + psrtype = WX_RDB_PL_CFG_L4HDR | + WX_RDB_PL_CFG_L3HDR | + WX_RDB_PL_CFG_L2HDR | + WX_RDB_PL_CFG_TUN_TUNHDR; + wr32(wx, WX_RDB_PL_CFG(0), psrtype); + + /* enable hw crc stripping */ + wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_CRC_STRIP, WX_RSC_CTL_CRC_STRIP); + + if (wx->mac.type == wx_mac_sp) { + u32 psrctl; + + /* RSC Setup */ + psrctl = rd32(wx, WX_PSR_CTL); + psrctl |= WX_PSR_CTL_RSC_ACK; /* Disable RSC for ACK packets */ + psrctl |= WX_PSR_CTL_RSC_DIS; + wr32(wx, WX_PSR_CTL, psrctl); + } + + /* set_rx_buffer_len must be called before ring initialization */ + wx_set_rx_buffer_len(wx); + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < wx->num_rx_queues; i++) + wx_configure_rx_ring(wx, wx->rx_ring[i]); + + /* Enable all receives, disable security engine prior to block traffic */ + ret = wx_disable_sec_rx_path(wx); + if (ret < 0) + wx_err(wx, "The register status is abnormal, please check device."); + + wx_enable_rx(wx); + wx_enable_sec_rx_path(wx); +} +EXPORT_SYMBOL(wx_configure_rx); + +static void wx_configure_isb(struct wx *wx) +{ + /* set ISB Address */ + wr32(wx, WX_PX_ISB_ADDR_L, wx->isb_dma & DMA_BIT_MASK(32)); + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) + wr32(wx, WX_PX_ISB_ADDR_H, upper_32_bits(wx->isb_dma)); +} + +void wx_configure(struct wx *wx) +{ + wx_set_rxpba(wx); + wx_configure_port(wx); + + wx_set_rx_mode(wx->netdev); + wx_restore_vlan(wx); + wx_enable_sec_rx_path(wx); + + wx_configure_tx(wx); + wx_configure_rx(wx); + wx_configure_isb(wx); +} +EXPORT_SYMBOL(wx_configure); + +/** + * wx_disable_pcie_master - Disable PCI-express master access + * @wx: pointer to hardware structure + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + **/ +int wx_disable_pcie_master(struct wx *wx) +{ + int status = 0; + u32 val; + + /* Always set this bit to ensure any future transactions are blocked */ + pci_clear_master(wx->pdev); + + /* Exit if master requests are blocked */ + if (!(rd32(wx, WX_PX_TRANSACTION_PENDING))) + return 0; + + /* Poll for master request bit to clear */ + status = read_poll_timeout(rd32, val, !val, 100, WX_PCI_MASTER_DISABLE_TIMEOUT, + false, wx, WX_PX_TRANSACTION_PENDING); + if (status < 0) + wx_err(wx, "PCIe transaction pending bit did not clear.\n"); + + return status; +} +EXPORT_SYMBOL(wx_disable_pcie_master); + +/** + * wx_stop_adapter - Generic stop Tx/Rx units + * @wx: pointer to hardware structure + * + * Sets the adapter_stopped flag within wx_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. + **/ +int wx_stop_adapter(struct wx *wx) +{ + u16 i; + + /* Set the adapter_stopped flag so other driver functions stop touching + * the hardware + */ + wx->adapter_stopped = true; + + /* Disable the receive unit */ + wx_disable_rx(wx); + + /* Set interrupt mask to stop interrupts from being generated */ + wx_intr_disable(wx, WX_INTR_ALL); + + /* Clear any pending interrupts, flush previous writes */ + wr32(wx, WX_PX_MISC_IC, 0xffffffff); + wr32(wx, WX_BME_CTL, 0x3); + + /* Disable the transmit unit. Each queue must be disabled. */ + for (i = 0; i < wx->mac.max_tx_queues; i++) { + wr32m(wx, WX_PX_TR_CFG(i), + WX_PX_TR_CFG_SWFLSH | WX_PX_TR_CFG_ENABLE, + WX_PX_TR_CFG_SWFLSH); + } + + /* Disable the receive unit by stopping each queue */ + for (i = 0; i < wx->mac.max_rx_queues; i++) { + wr32m(wx, WX_PX_RR_CFG(i), + WX_PX_RR_CFG_RR_EN, 0); + } + + /* flush all queues disables */ + WX_WRITE_FLUSH(wx); + + /* Prevent the PCI-E bus from hanging by disabling PCI-E master + * access and verify no pending requests + */ + return wx_disable_pcie_master(wx); +} +EXPORT_SYMBOL(wx_stop_adapter); + +void wx_reset_misc(struct wx *wx) +{ + int i; + + /* receive packets that size > 2048 */ + wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_JE, WX_MAC_RX_CFG_JE); + + /* clear counters on read */ + wr32m(wx, WX_MMC_CONTROL, + WX_MMC_CONTROL_RSTONRD, WX_MMC_CONTROL_RSTONRD); + + wr32m(wx, WX_MAC_RX_FLOW_CTRL, + WX_MAC_RX_FLOW_CTRL_RFE, WX_MAC_RX_FLOW_CTRL_RFE); + + wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); + + wr32m(wx, WX_MIS_RST_ST, + WX_MIS_RST_ST_RST_INIT, 0x1E00); + + /* errata 4: initialize mng flex tbl and wakeup flex tbl*/ + wr32(wx, WX_PSR_MNG_FLEX_SEL, 0); + for (i = 0; i < 16; i++) { + wr32(wx, WX_PSR_MNG_FLEX_DW_L(i), 0); + wr32(wx, WX_PSR_MNG_FLEX_DW_H(i), 0); + wr32(wx, WX_PSR_MNG_FLEX_MSK(i), 0); + } + wr32(wx, WX_PSR_LAN_FLEX_SEL, 0); + for (i = 0; i < 16; i++) { + wr32(wx, WX_PSR_LAN_FLEX_DW_L(i), 0); + wr32(wx, WX_PSR_LAN_FLEX_DW_H(i), 0); + wr32(wx, WX_PSR_LAN_FLEX_MSK(i), 0); + } + + /* set pause frame dst mac addr */ + wr32(wx, WX_RDB_PFCMACDAL, 0xC2000001); + wr32(wx, WX_RDB_PFCMACDAH, 0x0180); +} +EXPORT_SYMBOL(wx_reset_misc); + +/** + * wx_get_pcie_msix_counts - Gets MSI-X vector count + * @wx: pointer to hardware structure + * @msix_count: number of MSI interrupts that can be obtained + * @max_msix_count: number of MSI interrupts that mac need + * + * Read PCIe configuration space, and get the MSI-X vector count from + * the capabilities table. + **/ +int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count) +{ + struct pci_dev *pdev = wx->pdev; + struct device *dev = &pdev->dev; + int pos; + + *msix_count = 1; + pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); + if (!pos) { + dev_err(dev, "Unable to find MSI-X Capabilities\n"); + return -EINVAL; + } + pci_read_config_word(pdev, + pos + PCI_MSIX_FLAGS, + msix_count); + *msix_count &= WX_PCIE_MSIX_TBL_SZ_MASK; + /* MSI-X count is zero-based in HW */ + *msix_count += 1; + + if (*msix_count > max_msix_count) + *msix_count = max_msix_count; + + return 0; +} +EXPORT_SYMBOL(wx_get_pcie_msix_counts); + +int wx_sw_init(struct wx *wx) +{ + struct pci_dev *pdev = wx->pdev; + u32 ssid = 0; + int err = 0; + + wx->vendor_id = pdev->vendor; + wx->device_id = pdev->device; + wx->revision_id = pdev->revision; + wx->oem_svid = pdev->subsystem_vendor; + wx->oem_ssid = pdev->subsystem_device; + wx->bus.device = PCI_SLOT(pdev->devfn); + wx->bus.func = PCI_FUNC(pdev->devfn); + + if (wx->oem_svid == PCI_VENDOR_ID_WANGXUN) { + wx->subsystem_vendor_id = pdev->subsystem_vendor; + wx->subsystem_device_id = pdev->subsystem_device; + } else { + err = wx_flash_read_dword(wx, 0xfffdc, &ssid); + if (err < 0) { + wx_err(wx, "read of internal subsystem device id failed\n"); + return err; + } + + wx->subsystem_device_id = swab16((u16)ssid); + } + + wx->mac_table = kcalloc(wx->mac.num_rar_entries, + sizeof(struct wx_mac_addr), + GFP_KERNEL); + if (!wx->mac_table) { + wx_err(wx, "mac_table allocation failed\n"); + return -ENOMEM; + } + + return 0; +} +EXPORT_SYMBOL(wx_sw_init); + +/** + * wx_find_vlvf_slot - find the vlanid or the first empty slot + * @wx: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * + * return the VLVF index where this VLAN id should be placed + * + **/ +static int wx_find_vlvf_slot(struct wx *wx, u32 vlan) +{ + u32 bits = 0, first_empty_slot = 0; + int regindex; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the vlan id in the VLVF entries. Save off the first empty + * slot found along the way + */ + for (regindex = 1; regindex < WX_PSR_VLAN_SWC_ENTRIES; regindex++) { + wr32(wx, WX_PSR_VLAN_SWC_IDX, regindex); + bits = rd32(wx, WX_PSR_VLAN_SWC); + if (!bits && !(first_empty_slot)) + first_empty_slot = regindex; + else if ((bits & 0x0FFF) == vlan) + break; + } + + if (regindex >= WX_PSR_VLAN_SWC_ENTRIES) { + if (first_empty_slot) + regindex = first_empty_slot; + else + regindex = -ENOMEM; + } + + return regindex; +} + +/** + * wx_set_vlvf - Set VLAN Pool Filter + * @wx: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFVFB + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * @vfta_changed: pointer to boolean flag which indicates whether VFTA + * should be changed + * + * Turn on/off specified bit in VLVF table. + **/ +static int wx_set_vlvf(struct wx *wx, u32 vlan, u32 vind, bool vlan_on, + bool *vfta_changed) +{ + int vlvf_index; + u32 vt, bits; + + /* If VT Mode is set + * Either vlan_on + * make sure the vlan is in VLVF + * set the vind bit in the matching VLVFB + * Or !vlan_on + * clear the pool bit and possibly the vind + */ + vt = rd32(wx, WX_CFG_PORT_CTL); + if (!(vt & WX_CFG_PORT_CTL_NUM_VT_MASK)) + return 0; + + vlvf_index = wx_find_vlvf_slot(wx, vlan); + if (vlvf_index < 0) + return vlvf_index; + + wr32(wx, WX_PSR_VLAN_SWC_IDX, vlvf_index); + if (vlan_on) { + /* set the pool bit */ + if (vind < 32) { + bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L); + bits |= (1 << vind); + wr32(wx, WX_PSR_VLAN_SWC_VM_L, bits); + } else { + bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H); + bits |= (1 << (vind - 32)); + wr32(wx, WX_PSR_VLAN_SWC_VM_H, bits); + } + } else { + /* clear the pool bit */ + if (vind < 32) { + bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L); + bits &= ~(1 << vind); + wr32(wx, WX_PSR_VLAN_SWC_VM_L, bits); + bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_H); + } else { + bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H); + bits &= ~(1 << (vind - 32)); + wr32(wx, WX_PSR_VLAN_SWC_VM_H, bits); + bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_L); + } + } + + if (bits) { + wr32(wx, WX_PSR_VLAN_SWC, (WX_PSR_VLAN_SWC_VIEN | vlan)); + if (!vlan_on && vfta_changed) + *vfta_changed = false; + } else { + wr32(wx, WX_PSR_VLAN_SWC, 0); + } + + return 0; +} + +/** + * wx_set_vfta - Set VLAN filter table + * @wx: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFVFB + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +static int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on) +{ + u32 bitindex, vfta, targetbit; + bool vfta_changed = false; + int regindex, ret; + + /* this is a 2 part operation - first the VFTA, then the + * VLVF and VLVFB if VT Mode is set + * We don't write the VFTA until we know the VLVF part succeeded. + */ + + /* Part 1 + * The VFTA is a bitstring made up of 128 32-bit registers + * that enable the particular VLAN id, much like the MTA: + * bits[11-5]: which register + * bits[4-0]: which bit in the register + */ + regindex = (vlan >> 5) & 0x7F; + bitindex = vlan & 0x1F; + targetbit = (1 << bitindex); + /* errata 5 */ + vfta = wx->mac.vft_shadow[regindex]; + if (vlan_on) { + if (!(vfta & targetbit)) { + vfta |= targetbit; + vfta_changed = true; + } + } else { + if ((vfta & targetbit)) { + vfta &= ~targetbit; + vfta_changed = true; + } + } + /* Part 2 + * Call wx_set_vlvf to set VLVFB and VLVF + */ + ret = wx_set_vlvf(wx, vlan, vind, vlan_on, &vfta_changed); + if (ret != 0) + return ret; + + if (vfta_changed) + wr32(wx, WX_PSR_VLAN_TBL(regindex), vfta); + wx->mac.vft_shadow[regindex] = vfta; + + return 0; +} + +/** + * wx_clear_vfta - Clear VLAN filter table + * @wx: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +static void wx_clear_vfta(struct wx *wx) +{ + u32 offset; + + for (offset = 0; offset < wx->mac.vft_size; offset++) { + wr32(wx, WX_PSR_VLAN_TBL(offset), 0); + wx->mac.vft_shadow[offset] = 0; + } + + for (offset = 0; offset < WX_PSR_VLAN_SWC_ENTRIES; offset++) { + wr32(wx, WX_PSR_VLAN_SWC_IDX, offset); + wr32(wx, WX_PSR_VLAN_SWC, 0); + wr32(wx, WX_PSR_VLAN_SWC_VM_L, 0); + wr32(wx, WX_PSR_VLAN_SWC_VM_H, 0); + } +} + +int wx_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct wx *wx = netdev_priv(netdev); + + /* add VID to filter table */ + wx_set_vfta(wx, vid, VMDQ_P(0), true); + set_bit(vid, wx->active_vlans); + + return 0; +} +EXPORT_SYMBOL(wx_vlan_rx_add_vid); + +int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct wx *wx = netdev_priv(netdev); + + /* remove VID from filter table */ + if (vid) + wx_set_vfta(wx, vid, VMDQ_P(0), false); + clear_bit(vid, wx->active_vlans); + + return 0; +} +EXPORT_SYMBOL(wx_vlan_rx_kill_vid); + +/** + * wx_start_hw - Prepare hardware for Tx/Rx + * @wx: pointer to hardware structure + * + * Starts the hardware using the generic start_hw function + * and the generation start_hw function. + * Then performs revision-specific operations, if any. + **/ +void wx_start_hw(struct wx *wx) +{ + int i; + + /* Clear the VLAN filter table */ + wx_clear_vfta(wx); + WX_WRITE_FLUSH(wx); + /* Clear the rate limiters */ + for (i = 0; i < wx->mac.max_tx_queues; i++) { + wr32(wx, WX_TDM_RP_IDX, i); + wr32(wx, WX_TDM_RP_RATE, 0); + } +} +EXPORT_SYMBOL(wx_start_hw); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h new file mode 100644 index 0000000000..0b3447bc6f --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _WX_HW_H_ +#define _WX_HW_H_ + +void wx_intr_enable(struct wx *wx, u64 qmask); +void wx_irq_disable(struct wx *wx); +int wx_check_flash_load(struct wx *wx, u32 check_bit); +void wx_control_hw(struct wx *wx, bool drv); +int wx_mng_present(struct wx *wx); +int wx_host_interface_command(struct wx *wx, u32 *buffer, + u32 length, u32 timeout, bool return_data); +int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data); +int wx_read_ee_hostif_buffer(struct wx *wx, + u16 offset, u16 words, u16 *data); +void wx_init_eeprom_params(struct wx *wx); +void wx_get_mac_addr(struct wx *wx, u8 *mac_addr); +void wx_init_rx_addrs(struct wx *wx); +void wx_mac_set_default_filter(struct wx *wx, u8 *addr); +void wx_flush_sw_mac_table(struct wx *wx); +int wx_set_mac(struct net_device *netdev, void *p); +void wx_disable_rx(struct wx *wx); +void wx_set_rx_mode(struct net_device *netdev); +int wx_change_mtu(struct net_device *netdev, int new_mtu); +void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring); +void wx_configure_rx(struct wx *wx); +void wx_configure(struct wx *wx); +void wx_start_hw(struct wx *wx); +int wx_disable_pcie_master(struct wx *wx); +int wx_stop_adapter(struct wx *wx); +void wx_reset_misc(struct wx *wx); +int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count); +int wx_sw_init(struct wx *wx); +int wx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); +int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); + +#endif /* _WX_HW_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c new file mode 100644 index 0000000000..e078f4071d --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -0,0 +1,2658 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/etherdevice.h> +#include <net/ip6_checksum.h> +#include <net/page_pool/helpers.h> +#include <net/inet_ecn.h> +#include <linux/iopoll.h> +#include <linux/sctp.h> +#include <linux/pci.h> +#include <net/tcp.h> +#include <net/ip.h> + +#include "wx_type.h" +#include "wx_lib.h" +#include "wx_hw.h" + +/* Lookup table mapping the HW PTYPE to the bit field for decoding */ +static struct wx_dec_ptype wx_ptype_lookup[256] = { + /* L2: mac */ + [0x11] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2), + [0x12] = WX_PTT(L2, NONE, NONE, NONE, TS, PAY2), + [0x13] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2), + [0x14] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2), + [0x15] = WX_PTT(L2, NONE, NONE, NONE, NONE, NONE), + [0x16] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2), + [0x17] = WX_PTT(L2, NONE, NONE, NONE, NONE, NONE), + + /* L2: ethertype filter */ + [0x18 ... 0x1F] = WX_PTT(L2, NONE, NONE, NONE, NONE, NONE), + + /* L3: ip non-tunnel */ + [0x21] = WX_PTT(IP, FGV4, NONE, NONE, NONE, PAY3), + [0x22] = WX_PTT(IP, IPV4, NONE, NONE, NONE, PAY3), + [0x23] = WX_PTT(IP, IPV4, NONE, NONE, UDP, PAY4), + [0x24] = WX_PTT(IP, IPV4, NONE, NONE, TCP, PAY4), + [0x25] = WX_PTT(IP, IPV4, NONE, NONE, SCTP, PAY4), + [0x29] = WX_PTT(IP, FGV6, NONE, NONE, NONE, PAY3), + [0x2A] = WX_PTT(IP, IPV6, NONE, NONE, NONE, PAY3), + [0x2B] = WX_PTT(IP, IPV6, NONE, NONE, UDP, PAY3), + [0x2C] = WX_PTT(IP, IPV6, NONE, NONE, TCP, PAY4), + [0x2D] = WX_PTT(IP, IPV6, NONE, NONE, SCTP, PAY4), + + /* L2: fcoe */ + [0x30 ... 0x34] = WX_PTT(FCOE, NONE, NONE, NONE, NONE, PAY3), + [0x38 ... 0x3C] = WX_PTT(FCOE, NONE, NONE, NONE, NONE, PAY3), + + /* IPv4 --> IPv4/IPv6 */ + [0x81] = WX_PTT(IP, IPV4, IPIP, FGV4, NONE, PAY3), + [0x82] = WX_PTT(IP, IPV4, IPIP, IPV4, NONE, PAY3), + [0x83] = WX_PTT(IP, IPV4, IPIP, IPV4, UDP, PAY4), + [0x84] = WX_PTT(IP, IPV4, IPIP, IPV4, TCP, PAY4), + [0x85] = WX_PTT(IP, IPV4, IPIP, IPV4, SCTP, PAY4), + [0x89] = WX_PTT(IP, IPV4, IPIP, FGV6, NONE, PAY3), + [0x8A] = WX_PTT(IP, IPV4, IPIP, IPV6, NONE, PAY3), + [0x8B] = WX_PTT(IP, IPV4, IPIP, IPV6, UDP, PAY4), + [0x8C] = WX_PTT(IP, IPV4, IPIP, IPV6, TCP, PAY4), + [0x8D] = WX_PTT(IP, IPV4, IPIP, IPV6, SCTP, PAY4), + + /* IPv4 --> GRE/NAT --> NONE/IPv4/IPv6 */ + [0x90] = WX_PTT(IP, IPV4, IG, NONE, NONE, PAY3), + [0x91] = WX_PTT(IP, IPV4, IG, FGV4, NONE, PAY3), + [0x92] = WX_PTT(IP, IPV4, IG, IPV4, NONE, PAY3), + [0x93] = WX_PTT(IP, IPV4, IG, IPV4, UDP, PAY4), + [0x94] = WX_PTT(IP, IPV4, IG, IPV4, TCP, PAY4), + [0x95] = WX_PTT(IP, IPV4, IG, IPV4, SCTP, PAY4), + [0x99] = WX_PTT(IP, IPV4, IG, FGV6, NONE, PAY3), + [0x9A] = WX_PTT(IP, IPV4, IG, IPV6, NONE, PAY3), + [0x9B] = WX_PTT(IP, IPV4, IG, IPV6, UDP, PAY4), + [0x9C] = WX_PTT(IP, IPV4, IG, IPV6, TCP, PAY4), + [0x9D] = WX_PTT(IP, IPV4, IG, IPV6, SCTP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC --> NONE/IPv4/IPv6 */ + [0xA0] = WX_PTT(IP, IPV4, IGM, NONE, NONE, PAY3), + [0xA1] = WX_PTT(IP, IPV4, IGM, FGV4, NONE, PAY3), + [0xA2] = WX_PTT(IP, IPV4, IGM, IPV4, NONE, PAY3), + [0xA3] = WX_PTT(IP, IPV4, IGM, IPV4, UDP, PAY4), + [0xA4] = WX_PTT(IP, IPV4, IGM, IPV4, TCP, PAY4), + [0xA5] = WX_PTT(IP, IPV4, IGM, IPV4, SCTP, PAY4), + [0xA9] = WX_PTT(IP, IPV4, IGM, FGV6, NONE, PAY3), + [0xAA] = WX_PTT(IP, IPV4, IGM, IPV6, NONE, PAY3), + [0xAB] = WX_PTT(IP, IPV4, IGM, IPV6, UDP, PAY4), + [0xAC] = WX_PTT(IP, IPV4, IGM, IPV6, TCP, PAY4), + [0xAD] = WX_PTT(IP, IPV4, IGM, IPV6, SCTP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC+VLAN --> NONE/IPv4/IPv6 */ + [0xB0] = WX_PTT(IP, IPV4, IGMV, NONE, NONE, PAY3), + [0xB1] = WX_PTT(IP, IPV4, IGMV, FGV4, NONE, PAY3), + [0xB2] = WX_PTT(IP, IPV4, IGMV, IPV4, NONE, PAY3), + [0xB3] = WX_PTT(IP, IPV4, IGMV, IPV4, UDP, PAY4), + [0xB4] = WX_PTT(IP, IPV4, IGMV, IPV4, TCP, PAY4), + [0xB5] = WX_PTT(IP, IPV4, IGMV, IPV4, SCTP, PAY4), + [0xB9] = WX_PTT(IP, IPV4, IGMV, FGV6, NONE, PAY3), + [0xBA] = WX_PTT(IP, IPV4, IGMV, IPV6, NONE, PAY3), + [0xBB] = WX_PTT(IP, IPV4, IGMV, IPV6, UDP, PAY4), + [0xBC] = WX_PTT(IP, IPV4, IGMV, IPV6, TCP, PAY4), + [0xBD] = WX_PTT(IP, IPV4, IGMV, IPV6, SCTP, PAY4), + + /* IPv6 --> IPv4/IPv6 */ + [0xC1] = WX_PTT(IP, IPV6, IPIP, FGV4, NONE, PAY3), + [0xC2] = WX_PTT(IP, IPV6, IPIP, IPV4, NONE, PAY3), + [0xC3] = WX_PTT(IP, IPV6, IPIP, IPV4, UDP, PAY4), + [0xC4] = WX_PTT(IP, IPV6, IPIP, IPV4, TCP, PAY4), + [0xC5] = WX_PTT(IP, IPV6, IPIP, IPV4, SCTP, PAY4), + [0xC9] = WX_PTT(IP, IPV6, IPIP, FGV6, NONE, PAY3), + [0xCA] = WX_PTT(IP, IPV6, IPIP, IPV6, NONE, PAY3), + [0xCB] = WX_PTT(IP, IPV6, IPIP, IPV6, UDP, PAY4), + [0xCC] = WX_PTT(IP, IPV6, IPIP, IPV6, TCP, PAY4), + [0xCD] = WX_PTT(IP, IPV6, IPIP, IPV6, SCTP, PAY4), + + /* IPv6 --> GRE/NAT -> NONE/IPv4/IPv6 */ + [0xD0] = WX_PTT(IP, IPV6, IG, NONE, NONE, PAY3), + [0xD1] = WX_PTT(IP, IPV6, IG, FGV4, NONE, PAY3), + [0xD2] = WX_PTT(IP, IPV6, IG, IPV4, NONE, PAY3), + [0xD3] = WX_PTT(IP, IPV6, IG, IPV4, UDP, PAY4), + [0xD4] = WX_PTT(IP, IPV6, IG, IPV4, TCP, PAY4), + [0xD5] = WX_PTT(IP, IPV6, IG, IPV4, SCTP, PAY4), + [0xD9] = WX_PTT(IP, IPV6, IG, FGV6, NONE, PAY3), + [0xDA] = WX_PTT(IP, IPV6, IG, IPV6, NONE, PAY3), + [0xDB] = WX_PTT(IP, IPV6, IG, IPV6, UDP, PAY4), + [0xDC] = WX_PTT(IP, IPV6, IG, IPV6, TCP, PAY4), + [0xDD] = WX_PTT(IP, IPV6, IG, IPV6, SCTP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC -> NONE/IPv4/IPv6 */ + [0xE0] = WX_PTT(IP, IPV6, IGM, NONE, NONE, PAY3), + [0xE1] = WX_PTT(IP, IPV6, IGM, FGV4, NONE, PAY3), + [0xE2] = WX_PTT(IP, IPV6, IGM, IPV4, NONE, PAY3), + [0xE3] = WX_PTT(IP, IPV6, IGM, IPV4, UDP, PAY4), + [0xE4] = WX_PTT(IP, IPV6, IGM, IPV4, TCP, PAY4), + [0xE5] = WX_PTT(IP, IPV6, IGM, IPV4, SCTP, PAY4), + [0xE9] = WX_PTT(IP, IPV6, IGM, FGV6, NONE, PAY3), + [0xEA] = WX_PTT(IP, IPV6, IGM, IPV6, NONE, PAY3), + [0xEB] = WX_PTT(IP, IPV6, IGM, IPV6, UDP, PAY4), + [0xEC] = WX_PTT(IP, IPV6, IGM, IPV6, TCP, PAY4), + [0xED] = WX_PTT(IP, IPV6, IGM, IPV6, SCTP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC--> NONE/IPv */ + [0xF0] = WX_PTT(IP, IPV6, IGMV, NONE, NONE, PAY3), + [0xF1] = WX_PTT(IP, IPV6, IGMV, FGV4, NONE, PAY3), + [0xF2] = WX_PTT(IP, IPV6, IGMV, IPV4, NONE, PAY3), + [0xF3] = WX_PTT(IP, IPV6, IGMV, IPV4, UDP, PAY4), + [0xF4] = WX_PTT(IP, IPV6, IGMV, IPV4, TCP, PAY4), + [0xF5] = WX_PTT(IP, IPV6, IGMV, IPV4, SCTP, PAY4), + [0xF9] = WX_PTT(IP, IPV6, IGMV, FGV6, NONE, PAY3), + [0xFA] = WX_PTT(IP, IPV6, IGMV, IPV6, NONE, PAY3), + [0xFB] = WX_PTT(IP, IPV6, IGMV, IPV6, UDP, PAY4), + [0xFC] = WX_PTT(IP, IPV6, IGMV, IPV6, TCP, PAY4), + [0xFD] = WX_PTT(IP, IPV6, IGMV, IPV6, SCTP, PAY4), +}; + +static struct wx_dec_ptype wx_decode_ptype(const u8 ptype) +{ + return wx_ptype_lookup[ptype]; +} + +/* wx_test_staterr - tests bits in Rx descriptor status and error fields */ +static __le32 wx_test_staterr(union wx_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +static void wx_dma_sync_frag(struct wx_ring *rx_ring, + struct wx_rx_buffer *rx_buffer) +{ + struct sk_buff *skb = rx_buffer->skb; + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + + dma_sync_single_range_for_cpu(rx_ring->dev, + WX_CB(skb)->dma, + skb_frag_off(frag), + skb_frag_size(frag), + DMA_FROM_DEVICE); + + /* If the page was released, just unmap it. */ + if (unlikely(WX_CB(skb)->page_released)) + page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); +} + +static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring, + union wx_rx_desc *rx_desc, + struct sk_buff **skb, + int *rx_buffer_pgcnt) +{ + struct wx_rx_buffer *rx_buffer; + unsigned int size; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + size = le16_to_cpu(rx_desc->wb.upper.length); + +#if (PAGE_SIZE < 8192) + *rx_buffer_pgcnt = page_count(rx_buffer->page); +#else + *rx_buffer_pgcnt = 0; +#endif + + prefetchw(rx_buffer->page); + *skb = rx_buffer->skb; + + /* Delay unmapping of the first packet. It carries the header + * information, HW may still access the header after the writeback. + * Only unmap it when EOP is reached + */ + if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP)) { + if (!*skb) + goto skip_sync; + } else { + if (*skb) + wx_dma_sync_frag(rx_ring, rx_buffer); + } + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); +skip_sync: + return rx_buffer; +} + +static void wx_put_rx_buffer(struct wx_ring *rx_ring, + struct wx_rx_buffer *rx_buffer, + struct sk_buff *skb, + int rx_buffer_pgcnt) +{ + if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma) + /* the page has been released from the ring */ + WX_CB(skb)->page_released = true; + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; + rx_buffer->skb = NULL; +} + +static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring, + struct wx_rx_buffer *rx_buffer, + union wx_rx_desc *rx_desc) +{ + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); +#if (PAGE_SIZE < 8192) + unsigned int truesize = WX_RX_BUFSZ; +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); +#endif + struct sk_buff *skb = rx_buffer->skb; + + if (!skb) { + void *page_addr = page_address(rx_buffer->page) + + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, WX_RXBUFFER_256); + if (unlikely(!skb)) + return NULL; + + /* we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + + if (size <= WX_RXBUFFER_256) { + memcpy(__skb_put(skb, size), page_addr, + ALIGN(size, sizeof(long))); + page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, true); + return skb; + } + + skb_mark_for_recycle(skb); + + if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP)) + WX_CB(skb)->dma = rx_buffer->dma; + + skb_add_rx_frag(skb, 0, rx_buffer->page, + rx_buffer->page_offset, + size, truesize); + goto out; + + } else { + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); + } + +out: +#if (PAGE_SIZE < 8192) + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= truesize; +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; +#endif + + return skb; +} + +static bool wx_alloc_mapped_page(struct wx_ring *rx_ring, + struct wx_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + page = page_pool_dev_alloc_pages(rx_ring->page_pool); + WARN_ON(!page); + dma = page_pool_get_dma_addr(page); + + bi->page_dma = dma; + bi->page = page; + bi->page_offset = 0; + + return true; +} + +/** + * wx_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count) +{ + u16 i = rx_ring->next_to_use; + union wx_rx_desc *rx_desc; + struct wx_rx_buffer *bi; + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = WX_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + do { + if (!wx_alloc_mapped_page(rx_ring, bi)) + break; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, + WX_RX_BUFSZ, + DMA_FROM_DEVICE); + + rx_desc->read.pkt_addr = + cpu_to_le64(bi->page_dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = WX_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.upper.status_error = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } +} + +u16 wx_desc_unused(struct wx_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +/** + * wx_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool wx_is_non_eop(struct wx_ring *rx_ring, + union wx_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(WX_RX_DESC(rx_ring, ntc)); + + /* if we are the last buffer then there is nothing else to do */ + if (likely(wx_test_staterr(rx_desc, WX_RXD_STAT_EOP))) + return false; + + rx_ring->rx_buffer_info[ntc].skb = skb; + + return true; +} + +static void wx_pull_tail(struct sk_buff *skb) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned int pull_len; + unsigned char *va; + + /* it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(skb->dev, va, WX_RXBUFFER_256); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + skb_frag_off_add(frag, pull_len); + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/** + * wx_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Check for corrupted packet headers caused by senders on the local L2 + * embedded NIC switch not setting up their Tx Descriptors right. These + * should be very rare. + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool wx_cleanup_headers(struct wx_ring *rx_ring, + union wx_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *netdev = rx_ring->netdev; + + /* verify that the packet does not have any known errors */ + if (!netdev || + unlikely(wx_test_staterr(rx_desc, WX_RXD_ERR_RXE) && + !(netdev->features & NETIF_F_RXALL))) { + dev_kfree_skb_any(skb); + return true; + } + + /* place header in linear portion of buffer */ + if (!skb_headlen(skb)) + wx_pull_tail(skb); + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +static void wx_rx_hash(struct wx_ring *ring, + union wx_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u16 rss_type; + + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; + + rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + WX_RXD_RSSTYPE_MASK; + + if (!rss_type) + return; + + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + (WX_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); +} + +/** + * wx_rx_checksum - indicate in skb if hw indicated a good cksum + * @ring: structure containing ring specific data + * @rx_desc: current Rx descriptor being processed + * @skb: skb currently being received and modified + **/ +static void wx_rx_checksum(struct wx_ring *ring, + union wx_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct wx_dec_ptype dptype = wx_decode_ptype(WX_RXD_PKTTYPE(rx_desc)); + + skb_checksum_none_assert(skb); + /* Rx csum disabled */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* if IPv4 header checksum error */ + if ((wx_test_staterr(rx_desc, WX_RXD_STAT_IPCS) && + wx_test_staterr(rx_desc, WX_RXD_ERR_IPE)) || + (wx_test_staterr(rx_desc, WX_RXD_STAT_OUTERIPCS) && + wx_test_staterr(rx_desc, WX_RXD_ERR_OUTERIPER))) { + ring->rx_stats.csum_err++; + return; + } + + /* L4 checksum offload flag must set for the below code to work */ + if (!wx_test_staterr(rx_desc, WX_RXD_STAT_L4CS)) + return; + + /* Hardware can't guarantee csum if IPv6 Dest Header found */ + if (dptype.prot != WX_DEC_PTYPE_PROT_SCTP && WX_RXD_IPV6EX(rx_desc)) + return; + + /* if L4 checksum error */ + if (wx_test_staterr(rx_desc, WX_RXD_ERR_TCPE)) { + ring->rx_stats.csum_err++; + return; + } + + /* It must be a TCP or UDP or SCTP packet with a valid checksum */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* If there is an outer header present that might contain a checksum + * we need to bump the checksum level by 1 to reflect the fact that + * we are indicating we validated the inner checksum. + */ + if (dptype.etype >= WX_DEC_PTYPE_ETYPE_IG) + __skb_incr_checksum_unnecessary(skb); + ring->rx_stats.csum_good_cnt++; +} + +static void wx_rx_vlan(struct wx_ring *ring, union wx_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u16 ethertype; + u8 idx = 0; + + if ((ring->netdev->features & + (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) && + wx_test_staterr(rx_desc, WX_RXD_STAT_VP)) { + idx = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + 0x1c0) >> 6; + ethertype = ring->q_vector->wx->tpid[idx]; + __vlan_hwaccel_put_tag(skb, htons(ethertype), + le16_to_cpu(rx_desc->wb.upper.vlan)); + } +} + +/** + * wx_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, protocol, and + * other fields within the skb. + **/ +static void wx_process_skb_fields(struct wx_ring *rx_ring, + union wx_rx_desc *rx_desc, + struct sk_buff *skb) +{ + wx_rx_hash(rx_ring, rx_desc, skb); + wx_rx_checksum(rx_ring, rx_desc, skb); + wx_rx_vlan(rx_ring, rx_desc, skb); + skb_record_rx_queue(skb, rx_ring->queue_index); + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +/** + * wx_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * @q_vector: structure containing interrupt and ring information + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the system. + * + * Returns amount of work completed. + **/ +static int wx_clean_rx_irq(struct wx_q_vector *q_vector, + struct wx_ring *rx_ring, + int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = wx_desc_unused(rx_ring); + + do { + struct wx_rx_buffer *rx_buffer; + union wx_rx_desc *rx_desc; + struct sk_buff *skb; + int rx_buffer_pgcnt; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= WX_RX_BUFFER_WRITE) { + wx_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = WX_RX_DESC(rx_ring, rx_ring->next_to_clean); + if (!wx_test_staterr(rx_desc, WX_RXD_STAT_DD)) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + rx_buffer = wx_get_rx_buffer(rx_ring, rx_desc, &skb, &rx_buffer_pgcnt); + + /* retrieve a buffer from the ring */ + skb = wx_build_skb(rx_ring, rx_buffer, rx_desc); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + break; + } + + wx_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt); + cleaned_count++; + + /* place incomplete frames back on ring for completion */ + if (wx_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + /* verify the packet layout is correct */ + if (wx_cleanup_headers(rx_ring, rx_desc, skb)) + continue; + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + wx_process_skb_fields(rx_ring, rx_desc, skb); + napi_gro_receive(&q_vector->napi, skb); + + /* update budget accounting */ + total_rx_packets++; + } while (likely(total_rx_packets < budget)); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +static struct netdev_queue *wx_txring_txq(const struct wx_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->queue_index); +} + +/** + * wx_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: structure containing interrupt and ring information + * @tx_ring: tx ring to clean + * @napi_budget: Used to determine if we are in netpoll + **/ +static bool wx_clean_tx_irq(struct wx_q_vector *q_vector, + struct wx_ring *tx_ring, int napi_budget) +{ + unsigned int budget = q_vector->wx->tx_work_limit; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int i = tx_ring->next_to_clean; + struct wx_tx_buffer *tx_buffer; + union wx_tx_desc *tx_desc; + + if (!netif_carrier_ok(tx_ring->netdev)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = WX_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + union wx_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(WX_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ + napi_consume_skb(tx_buffer->skb, napi_budget); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + dma_unmap_len_set(tx_buffer, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = WX_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = WX_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + netdev_tx_completed_queue(wx_txring_txq(tx_ring), + total_packets, total_bytes); + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + (wx_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + netif_running(tx_ring->netdev)) + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + } + + return !!budget; +} + +/** + * wx_poll - NAPI polling RX/TX cleanup routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean all queues associated with a q_vector. + **/ +static int wx_poll(struct napi_struct *napi, int budget) +{ + struct wx_q_vector *q_vector = container_of(napi, struct wx_q_vector, napi); + int per_ring_budget, work_done = 0; + struct wx *wx = q_vector->wx; + bool clean_complete = true; + struct wx_ring *ring; + + wx_for_each_ring(ring, q_vector->tx) { + if (!wx_clean_tx_irq(q_vector, ring, budget)) + clean_complete = false; + } + + /* Exit if we are called by netpoll */ + if (budget <= 0) + return budget; + + /* attempt to distribute budget to each queue fairly, but don't allow + * the budget to go below 1 because we'll exit polling + */ + if (q_vector->rx.count > 1) + per_ring_budget = max(budget / q_vector->rx.count, 1); + else + per_ring_budget = budget; + + wx_for_each_ring(ring, q_vector->rx) { + int cleaned = wx_clean_rx_irq(q_vector, ring, per_ring_budget); + + work_done += cleaned; + if (cleaned >= per_ring_budget) + clean_complete = false; + } + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* all work done, exit the polling mode */ + if (likely(napi_complete_done(napi, work_done))) { + if (netif_running(wx->netdev)) + wx_intr_enable(wx, WX_INTR_Q(q_vector->v_idx)); + } + + return min(work_done, budget - 1); +} + +static int wx_maybe_stop_tx(struct wx_ring *tx_ring, u16 size) +{ + if (likely(wx_desc_unused(tx_ring) >= size)) + return 0; + + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + /* For the next check */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(wx_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + + return 0; +} + +static u32 wx_tx_cmd_type(u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = WX_TXD_DTYP_DATA | WX_TXD_IFCS; + + /* set HW vlan bit if vlan is present */ + cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_HW_VLAN, WX_TXD_VLE); + /* set segmentation enable bits for TSO/FSO */ + cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_TSO, WX_TXD_TSE); + /* set timestamp bit if present */ + cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_TSTAMP, WX_TXD_MAC_TSTAMP); + cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_LINKSEC, WX_TXD_LINKSEC); + + return cmd_type; +} + +static void wx_tx_olinfo_status(union wx_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + u32 olinfo_status = paylen << WX_TXD_PAYLEN_SHIFT; + + /* enable L4 checksum for TSO and TX checksum offload */ + olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_CSUM, WX_TXD_L4CS); + /* enable IPv4 checksum for TSO */ + olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_IPV4, WX_TXD_IIPCS); + /* enable outer IPv4 checksum for TSO */ + olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_OUTER_IPV4, + WX_TXD_EIPCS); + /* Check Context must be set if Tx switch is enabled, which it + * always is for case where virtual functions are running + */ + olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_CC, WX_TXD_CC); + olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_IPSEC, + WX_TXD_IPSEC); + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +} + +static void wx_tx_map(struct wx_ring *tx_ring, + struct wx_tx_buffer *first, + const u8 hdr_len) +{ + struct sk_buff *skb = first->skb; + struct wx_tx_buffer *tx_buffer; + u32 tx_flags = first->tx_flags; + u16 i = tx_ring->next_to_use; + unsigned int data_len, size; + union wx_tx_desc *tx_desc; + skb_frag_t *frag; + dma_addr_t dma; + u32 cmd_type; + + cmd_type = wx_tx_cmd_type(tx_flags); + tx_desc = WX_TX_DESC(tx_ring, i); + wx_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > WX_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type ^ WX_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = WX_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + dma += WX_MAX_DATA_PER_TXD; + size -= WX_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = WX_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + size = skb_frag_size(frag); + + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= size | WX_TXD_EOP | WX_TXD_RS; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + netdev_tx_sent_queue(wx_txring_txq(tx_ring), first->bytecount); + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + wx_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(wx_txring_txq(tx_ring)) || !netdev_xmit_more()) + writel(i, tx_ring->tail); + + return; +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_buffer_info map */ + for (;;) { + tx_buffer = &tx_ring->tx_buffer_info[i]; + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + if (tx_buffer == first) + break; + if (i == 0) + i += tx_ring->count; + i--; + } + + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + tx_ring->next_to_use = i; +} + +static void wx_tx_ctxtdesc(struct wx_ring *tx_ring, u32 vlan_macip_lens, + u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) +{ + struct wx_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = WX_TX_CTXTDESC(tx_ring, i); + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= WX_TXD_DTYP_CTXT; + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); +} + +static void wx_get_ipv6_proto(struct sk_buff *skb, int offset, u8 *nexthdr) +{ + struct ipv6hdr *hdr = (struct ipv6hdr *)(skb->data + offset); + + *nexthdr = hdr->nexthdr; + offset += sizeof(struct ipv6hdr); + while (ipv6_ext_hdr(*nexthdr)) { + struct ipv6_opt_hdr _hdr, *hp; + + if (*nexthdr == NEXTHDR_NONE) + return; + hp = skb_header_pointer(skb, offset, sizeof(_hdr), &_hdr); + if (!hp) + return; + if (*nexthdr == NEXTHDR_FRAGMENT) + break; + *nexthdr = hp->nexthdr; + } +} + +union network_header { + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + void *raw; +}; + +static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first) +{ + u8 tun_prot = 0, l4_prot = 0, ptype = 0; + struct sk_buff *skb = first->skb; + + if (skb->encapsulation) { + union network_header hdr; + + switch (first->protocol) { + case htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + ptype = WX_PTYPE_TUN_IPV4; + break; + case htons(ETH_P_IPV6): + wx_get_ipv6_proto(skb, skb_network_offset(skb), &tun_prot); + ptype = WX_PTYPE_TUN_IPV6; + break; + default: + return ptype; + } + + if (tun_prot == IPPROTO_IPIP) { + hdr.raw = (void *)inner_ip_hdr(skb); + ptype |= WX_PTYPE_PKT_IPIP; + } else if (tun_prot == IPPROTO_UDP) { + hdr.raw = (void *)inner_ip_hdr(skb); + if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || + skb->inner_protocol != htons(ETH_P_TEB)) { + ptype |= WX_PTYPE_PKT_IG; + } else { + if (((struct ethhdr *)skb_inner_mac_header(skb))->h_proto + == htons(ETH_P_8021Q)) + ptype |= WX_PTYPE_PKT_IGMV; + else + ptype |= WX_PTYPE_PKT_IGM; + } + + } else if (tun_prot == IPPROTO_GRE) { + hdr.raw = (void *)inner_ip_hdr(skb); + if (skb->inner_protocol == htons(ETH_P_IP) || + skb->inner_protocol == htons(ETH_P_IPV6)) { + ptype |= WX_PTYPE_PKT_IG; + } else { + if (((struct ethhdr *)skb_inner_mac_header(skb))->h_proto + == htons(ETH_P_8021Q)) + ptype |= WX_PTYPE_PKT_IGMV; + else + ptype |= WX_PTYPE_PKT_IGM; + } + } else { + return ptype; + } + + switch (hdr.ipv4->version) { + case IPVERSION: + l4_prot = hdr.ipv4->protocol; + break; + case 6: + wx_get_ipv6_proto(skb, skb_inner_network_offset(skb), &l4_prot); + ptype |= WX_PTYPE_PKT_IPV6; + break; + default: + return ptype; + } + } else { + switch (first->protocol) { + case htons(ETH_P_IP): + l4_prot = ip_hdr(skb)->protocol; + ptype = WX_PTYPE_PKT_IP; + break; + case htons(ETH_P_IPV6): + wx_get_ipv6_proto(skb, skb_network_offset(skb), &l4_prot); + ptype = WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6; + break; + default: + return WX_PTYPE_PKT_MAC | WX_PTYPE_TYP_MAC; + } + } + switch (l4_prot) { + case IPPROTO_TCP: + ptype |= WX_PTYPE_TYP_TCP; + break; + case IPPROTO_UDP: + ptype |= WX_PTYPE_TYP_UDP; + break; + case IPPROTO_SCTP: + ptype |= WX_PTYPE_TYP_SCTP; + break; + default: + ptype |= WX_PTYPE_TYP_IP; + break; + } + + return ptype; +} + +static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first, + u8 *hdr_len, u8 ptype) +{ + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; + struct net_device *netdev = tx_ring->netdev; + u32 l4len, tunhdr_eiplen_tunlen = 0; + struct sk_buff *skb = first->skb; + bool enc = skb->encapsulation; + struct ipv6hdr *ipv6h; + struct tcphdr *tcph; + struct iphdr *iph; + u8 tun_prot = 0; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + /* indicates the inner headers in the skbuff are valid. */ + iph = enc ? inner_ip_hdr(skb) : ip_hdr(skb); + if (iph->version == 4) { + tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcph->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, 0); + first->tx_flags |= WX_TX_FLAGS_TSO | + WX_TX_FLAGS_CSUM | + WX_TX_FLAGS_IPV4 | + WX_TX_FLAGS_CC; + } else if (iph->version == 6 && skb_is_gso_v6(skb)) { + ipv6h = enc ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); + tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb); + ipv6h->payload_len = 0; + tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, + &ipv6h->daddr, 0, + IPPROTO_TCP, 0); + first->tx_flags |= WX_TX_FLAGS_TSO | + WX_TX_FLAGS_CSUM | + WX_TX_FLAGS_CC; + } + + /* compute header lengths */ + l4len = enc ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb); + *hdr_len = enc ? (skb_inner_transport_header(skb) - skb->data) : + skb_transport_offset(skb); + *hdr_len += l4len; + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* mss_l4len_id: use 0 as index for TSO */ + mss_l4len_idx = l4len << WX_TXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << WX_TXD_MSS_SHIFT; + + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ + if (enc) { + switch (first->protocol) { + case htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + first->tx_flags |= WX_TX_FLAGS_OUTER_IPV4; + break; + case htons(ETH_P_IPV6): + tun_prot = ipv6_hdr(skb)->nexthdr; + break; + default: + break; + } + switch (tun_prot) { + case IPPROTO_UDP: + tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_UDP; + tunhdr_eiplen_tunlen |= ((skb_network_header_len(skb) >> 2) << + WX_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + WX_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_GRE: + tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_GRE; + tunhdr_eiplen_tunlen |= ((skb_network_header_len(skb) >> 2) << + WX_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + WX_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_IPIP: + tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) - + (char *)ip_hdr(skb)) >> 2) << + WX_TXD_OUTER_IPLEN_SHIFT; + break; + default: + break; + } + vlan_macip_lens = skb_inner_network_header_len(skb) >> 1; + } else { + vlan_macip_lens = skb_network_header_len(skb) >> 1; + } + + vlan_macip_lens |= skb_network_offset(skb) << WX_TXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & WX_TX_FLAGS_VLAN_MASK; + + type_tucmd = ptype << 24; + if (skb->vlan_proto == htons(ETH_P_8021AD) && + netdev->features & NETIF_F_HW_VLAN_STAG_TX) + type_tucmd |= WX_SET_FLAG(first->tx_flags, + WX_TX_FLAGS_HW_VLAN, + 0x1 << WX_TXD_TAG_TPID_SEL_SHIFT); + wx_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, + type_tucmd, mss_l4len_idx); + + return 1; +} + +static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first, + u8 ptype) +{ + u32 tunhdr_eiplen_tunlen = 0, vlan_macip_lens = 0; + struct net_device *netdev = tx_ring->netdev; + u32 mss_l4len_idx = 0, type_tucmd; + struct sk_buff *skb = first->skb; + u8 tun_prot = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { + if (!(first->tx_flags & WX_TX_FLAGS_HW_VLAN) && + !(first->tx_flags & WX_TX_FLAGS_CC)) + return; + vlan_macip_lens = skb_network_offset(skb) << + WX_TXD_MACLEN_SHIFT; + } else { + u8 l4_prot = 0; + union { + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + u8 *raw; + } network_hdr; + union { + struct tcphdr *tcphdr; + u8 *raw; + } transport_hdr; + + if (skb->encapsulation) { + network_hdr.raw = skb_inner_network_header(skb); + transport_hdr.raw = skb_inner_transport_header(skb); + vlan_macip_lens = skb_network_offset(skb) << + WX_TXD_MACLEN_SHIFT; + switch (first->protocol) { + case htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + tun_prot = ipv6_hdr(skb)->nexthdr; + break; + default: + return; + } + switch (tun_prot) { + case IPPROTO_UDP: + tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_UDP; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + WX_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + WX_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_GRE: + tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_GRE; + tunhdr_eiplen_tunlen |= ((skb_network_header_len(skb) >> 2) << + WX_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + WX_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_IPIP: + tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) - + (char *)ip_hdr(skb)) >> 2) << + WX_TXD_OUTER_IPLEN_SHIFT; + break; + default: + break; + } + + } else { + network_hdr.raw = skb_network_header(skb); + transport_hdr.raw = skb_transport_header(skb); + vlan_macip_lens = skb_network_offset(skb) << + WX_TXD_MACLEN_SHIFT; + } + + switch (network_hdr.ipv4->version) { + case IPVERSION: + vlan_macip_lens |= (transport_hdr.raw - network_hdr.raw) >> 1; + l4_prot = network_hdr.ipv4->protocol; + break; + case 6: + vlan_macip_lens |= (transport_hdr.raw - network_hdr.raw) >> 1; + l4_prot = network_hdr.ipv6->nexthdr; + break; + default: + break; + } + + switch (l4_prot) { + case IPPROTO_TCP: + mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) << + WX_TXD_L4LEN_SHIFT; + break; + case IPPROTO_SCTP: + mss_l4len_idx = sizeof(struct sctphdr) << + WX_TXD_L4LEN_SHIFT; + break; + case IPPROTO_UDP: + mss_l4len_idx = sizeof(struct udphdr) << + WX_TXD_L4LEN_SHIFT; + break; + default: + break; + } + + /* update TX checksum flag */ + first->tx_flags |= WX_TX_FLAGS_CSUM; + } + first->tx_flags |= WX_TX_FLAGS_CC; + /* vlan_macip_lens: MACLEN, VLAN tag */ + vlan_macip_lens |= first->tx_flags & WX_TX_FLAGS_VLAN_MASK; + + type_tucmd = ptype << 24; + if (skb->vlan_proto == htons(ETH_P_8021AD) && + netdev->features & NETIF_F_HW_VLAN_STAG_TX) + type_tucmd |= WX_SET_FLAG(first->tx_flags, + WX_TX_FLAGS_HW_VLAN, + 0x1 << WX_TXD_TAG_TPID_SEL_SHIFT); + wx_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, + type_tucmd, mss_l4len_idx); +} + +static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb, + struct wx_ring *tx_ring) +{ + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + struct wx_tx_buffer *first; + u8 hdr_len = 0, ptype; + unsigned short f; + u32 tx_flags = 0; + int tso; + + /* need: 1 descriptor per page * PAGE_SIZE/WX_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/WX_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)-> + frags[f])); + + if (wx_maybe_stop_tx(tx_ring, count + 3)) + return NETDEV_TX_BUSY; + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + /* if we have a HW VLAN tag being added default to the HW one */ + if (skb_vlan_tag_present(skb)) { + tx_flags |= skb_vlan_tag_get(skb) << WX_TX_FLAGS_VLAN_SHIFT; + tx_flags |= WX_TX_FLAGS_HW_VLAN; + } + + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = vlan_get_protocol(skb); + + ptype = wx_encode_tx_desc_ptype(first); + + tso = wx_tso(tx_ring, first, &hdr_len, ptype); + if (tso < 0) + goto out_drop; + else if (!tso) + wx_tx_csum(tx_ring, first, ptype); + wx_tx_map(tx_ring, first, hdr_len); + + return NETDEV_TX_OK; +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + return NETDEV_TX_OK; +} + +netdev_tx_t wx_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + unsigned int r_idx = skb->queue_mapping; + struct wx *wx = netdev_priv(netdev); + struct wx_ring *tx_ring; + + if (!netif_carrier_ok(netdev)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* The minimum packet size for olinfo paylen is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; + + if (r_idx >= wx->num_tx_queues) + r_idx = r_idx % wx->num_tx_queues; + tx_ring = wx->tx_ring[r_idx]; + + return wx_xmit_frame_ring(skb, tx_ring); +} +EXPORT_SYMBOL(wx_xmit_frame); + +void wx_napi_enable_all(struct wx *wx) +{ + struct wx_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) { + q_vector = wx->q_vector[q_idx]; + napi_enable(&q_vector->napi); + } +} +EXPORT_SYMBOL(wx_napi_enable_all); + +void wx_napi_disable_all(struct wx *wx) +{ + struct wx_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) { + q_vector = wx->q_vector[q_idx]; + napi_disable(&q_vector->napi); + } +} +EXPORT_SYMBOL(wx_napi_disable_all); + +/** + * wx_set_rss_queues: Allocate queues for RSS + * @wx: board private structure to initialize + * + * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try + * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. + * + **/ +static void wx_set_rss_queues(struct wx *wx) +{ + wx->num_rx_queues = wx->mac.max_rx_queues; + wx->num_tx_queues = wx->mac.max_tx_queues; +} + +static void wx_set_num_queues(struct wx *wx) +{ + /* Start with base case */ + wx->num_rx_queues = 1; + wx->num_tx_queues = 1; + wx->queues_per_pool = 1; + + wx_set_rss_queues(wx); +} + +/** + * wx_acquire_msix_vectors - acquire MSI-X vectors + * @wx: board private structure + * + * Attempts to acquire a suitable range of MSI-X vector interrupts. Will + * return a negative error code if unable to acquire MSI-X vectors for any + * reason. + */ +static int wx_acquire_msix_vectors(struct wx *wx) +{ + struct irq_affinity affd = {0, }; + int nvecs, i; + + nvecs = min_t(int, num_online_cpus(), wx->mac.max_msix_vectors); + + wx->msix_entries = kcalloc(nvecs, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!wx->msix_entries) + return -ENOMEM; + + nvecs = pci_alloc_irq_vectors_affinity(wx->pdev, nvecs, + nvecs, + PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, + &affd); + if (nvecs < 0) { + wx_err(wx, "Failed to allocate MSI-X interrupts. Err: %d\n", nvecs); + kfree(wx->msix_entries); + wx->msix_entries = NULL; + return nvecs; + } + + for (i = 0; i < nvecs; i++) { + wx->msix_entries[i].entry = i; + wx->msix_entries[i].vector = pci_irq_vector(wx->pdev, i); + } + + /* one for msix_other */ + nvecs -= 1; + wx->num_q_vectors = nvecs; + wx->num_rx_queues = nvecs; + wx->num_tx_queues = nvecs; + + return 0; +} + +/** + * wx_set_interrupt_capability - set MSI-X or MSI if supported + * @wx: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +static int wx_set_interrupt_capability(struct wx *wx) +{ + struct pci_dev *pdev = wx->pdev; + int nvecs, ret; + + /* We will try to get MSI-X interrupts first */ + ret = wx_acquire_msix_vectors(wx); + if (ret == 0 || (ret == -ENOMEM)) + return ret; + + wx->num_rx_queues = 1; + wx->num_tx_queues = 1; + wx->num_q_vectors = 1; + + /* minmum one for queue, one for misc*/ + nvecs = 1; + nvecs = pci_alloc_irq_vectors(pdev, nvecs, + nvecs, PCI_IRQ_MSI | PCI_IRQ_LEGACY); + if (nvecs == 1) { + if (pdev->msi_enabled) + wx_err(wx, "Fallback to MSI.\n"); + else + wx_err(wx, "Fallback to LEGACY.\n"); + } else { + wx_err(wx, "Failed to allocate MSI/LEGACY interrupts. Error: %d\n", nvecs); + return nvecs; + } + + pdev->irq = pci_irq_vector(pdev, 0); + + return 0; +} + +/** + * wx_cache_ring_rss - Descriptor ring to register mapping for RSS + * @wx: board private structure to initialize + * + * Cache the descriptor ring offsets for RSS, ATR, FCoE, and SR-IOV. + * + **/ +static void wx_cache_ring_rss(struct wx *wx) +{ + u16 i; + + for (i = 0; i < wx->num_rx_queues; i++) + wx->rx_ring[i]->reg_idx = i; + + for (i = 0; i < wx->num_tx_queues; i++) + wx->tx_ring[i]->reg_idx = i; +} + +static void wx_add_ring(struct wx_ring *ring, struct wx_ring_container *head) +{ + ring->next = head->ring; + head->ring = ring; + head->count++; +} + +/** + * wx_alloc_q_vector - Allocate memory for a single interrupt vector + * @wx: board private structure to initialize + * @v_count: q_vectors allocated on wx, used for ring interleaving + * @v_idx: index of vector in wx struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int wx_alloc_q_vector(struct wx *wx, + unsigned int v_count, unsigned int v_idx, + unsigned int txr_count, unsigned int txr_idx, + unsigned int rxr_count, unsigned int rxr_idx) +{ + struct wx_q_vector *q_vector; + int ring_count, default_itr; + struct wx_ring *ring; + + /* note this will allocate space for the ring structure as well! */ + ring_count = txr_count + rxr_count; + + q_vector = kzalloc(struct_size(q_vector, ring, ring_count), + GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + /* initialize NAPI */ + netif_napi_add(wx->netdev, &q_vector->napi, + wx_poll); + + /* tie q_vector and wx together */ + wx->q_vector[v_idx] = q_vector; + q_vector->wx = wx; + q_vector->v_idx = v_idx; + if (cpu_online(v_idx)) + q_vector->numa_node = cpu_to_node(v_idx); + + /* initialize pointer to rings */ + ring = q_vector->ring; + + if (wx->mac.type == wx_mac_sp) + default_itr = WX_12K_ITR; + else + default_itr = WX_7K_ITR; + /* initialize ITR */ + if (txr_count && !rxr_count) + /* tx only vector */ + q_vector->itr = wx->tx_itr_setting ? + default_itr : wx->tx_itr_setting; + else + /* rx or rx/tx vector */ + q_vector->itr = wx->rx_itr_setting ? + default_itr : wx->rx_itr_setting; + + while (txr_count) { + /* assign generic ring traits */ + ring->dev = &wx->pdev->dev; + ring->netdev = wx->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + wx_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = wx->tx_ring_count; + + ring->queue_index = txr_idx; + + /* assign ring to wx */ + wx->tx_ring[txr_idx] = ring; + + /* update count and index */ + txr_count--; + txr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + while (rxr_count) { + /* assign generic ring traits */ + ring->dev = &wx->pdev->dev; + ring->netdev = wx->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + wx_add_ring(ring, &q_vector->rx); + + /* apply Rx specific ring traits */ + ring->count = wx->rx_ring_count; + ring->queue_index = rxr_idx; + + /* assign ring to wx */ + wx->rx_ring[rxr_idx] = ring; + + /* update count and index */ + rxr_count--; + rxr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + return 0; +} + +/** + * wx_free_q_vector - Free memory allocated for specific interrupt vector + * @wx: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void wx_free_q_vector(struct wx *wx, int v_idx) +{ + struct wx_q_vector *q_vector = wx->q_vector[v_idx]; + struct wx_ring *ring; + + wx_for_each_ring(ring, q_vector->tx) + wx->tx_ring[ring->queue_index] = NULL; + + wx_for_each_ring(ring, q_vector->rx) + wx->rx_ring[ring->queue_index] = NULL; + + wx->q_vector[v_idx] = NULL; + netif_napi_del(&q_vector->napi); + kfree_rcu(q_vector, rcu); +} + +/** + * wx_alloc_q_vectors - Allocate memory for interrupt vectors + * @wx: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int wx_alloc_q_vectors(struct wx *wx) +{ + unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0; + unsigned int rxr_remaining = wx->num_rx_queues; + unsigned int txr_remaining = wx->num_tx_queues; + unsigned int q_vectors = wx->num_q_vectors; + int rqpv, tqpv; + int err; + + for (; v_idx < q_vectors; v_idx++) { + rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + err = wx_alloc_q_vector(wx, q_vectors, v_idx, + tqpv, txr_idx, + rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + return 0; + +err_out: + wx->num_tx_queues = 0; + wx->num_rx_queues = 0; + wx->num_q_vectors = 0; + + while (v_idx--) + wx_free_q_vector(wx, v_idx); + + return -ENOMEM; +} + +/** + * wx_free_q_vectors - Free memory allocated for interrupt vectors + * @wx: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void wx_free_q_vectors(struct wx *wx) +{ + int v_idx = wx->num_q_vectors; + + wx->num_tx_queues = 0; + wx->num_rx_queues = 0; + wx->num_q_vectors = 0; + + while (v_idx--) + wx_free_q_vector(wx, v_idx); +} + +void wx_reset_interrupt_capability(struct wx *wx) +{ + struct pci_dev *pdev = wx->pdev; + + if (!pdev->msi_enabled && !pdev->msix_enabled) + return; + + if (pdev->msix_enabled) { + kfree(wx->msix_entries); + wx->msix_entries = NULL; + } + pci_free_irq_vectors(wx->pdev); +} +EXPORT_SYMBOL(wx_reset_interrupt_capability); + +/** + * wx_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @wx: board private structure to clear interrupt scheme on + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +void wx_clear_interrupt_scheme(struct wx *wx) +{ + wx_free_q_vectors(wx); + wx_reset_interrupt_capability(wx); +} +EXPORT_SYMBOL(wx_clear_interrupt_scheme); + +int wx_init_interrupt_scheme(struct wx *wx) +{ + int ret; + + /* Number of supported queues */ + wx_set_num_queues(wx); + + /* Set interrupt mode */ + ret = wx_set_interrupt_capability(wx); + if (ret) { + wx_err(wx, "Allocate irq vectors for failed.\n"); + return ret; + } + + /* Allocate memory for queues */ + ret = wx_alloc_q_vectors(wx); + if (ret) { + wx_err(wx, "Unable to allocate memory for queue vectors.\n"); + wx_reset_interrupt_capability(wx); + return ret; + } + + wx_cache_ring_rss(wx); + + return 0; +} +EXPORT_SYMBOL(wx_init_interrupt_scheme); + +irqreturn_t wx_msix_clean_rings(int __always_unused irq, void *data) +{ + struct wx_q_vector *q_vector = data; + + /* EIAM disabled interrupts (on this vector) for us */ + if (q_vector->rx.ring || q_vector->tx.ring) + napi_schedule_irqoff(&q_vector->napi); + + return IRQ_HANDLED; +} +EXPORT_SYMBOL(wx_msix_clean_rings); + +void wx_free_irq(struct wx *wx) +{ + struct pci_dev *pdev = wx->pdev; + int vector; + + if (!(pdev->msix_enabled)) { + free_irq(pdev->irq, wx); + return; + } + + for (vector = 0; vector < wx->num_q_vectors; vector++) { + struct wx_q_vector *q_vector = wx->q_vector[vector]; + struct msix_entry *entry = &wx->msix_entries[vector]; + + /* free only the irqs that were actually requested */ + if (!q_vector->rx.ring && !q_vector->tx.ring) + continue; + + free_irq(entry->vector, q_vector); + } + + if (wx->mac.type == wx_mac_em) + free_irq(wx->msix_entries[vector].vector, wx); +} +EXPORT_SYMBOL(wx_free_irq); + +/** + * wx_setup_isb_resources - allocate interrupt status resources + * @wx: board private structure + * + * Return 0 on success, negative on failure + **/ +int wx_setup_isb_resources(struct wx *wx) +{ + struct pci_dev *pdev = wx->pdev; + + wx->isb_mem = dma_alloc_coherent(&pdev->dev, + sizeof(u32) * 4, + &wx->isb_dma, + GFP_KERNEL); + if (!wx->isb_mem) { + wx_err(wx, "Alloc isb_mem failed\n"); + return -ENOMEM; + } + + return 0; +} +EXPORT_SYMBOL(wx_setup_isb_resources); + +/** + * wx_free_isb_resources - allocate all queues Rx resources + * @wx: board private structure + * + * Return 0 on success, negative on failure + **/ +void wx_free_isb_resources(struct wx *wx) +{ + struct pci_dev *pdev = wx->pdev; + + dma_free_coherent(&pdev->dev, sizeof(u32) * 4, + wx->isb_mem, wx->isb_dma); + wx->isb_mem = NULL; +} +EXPORT_SYMBOL(wx_free_isb_resources); + +u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx) +{ + u32 cur_tag = 0; + + cur_tag = wx->isb_mem[WX_ISB_HEADER]; + wx->isb_tag[idx] = cur_tag; + + return (__force u32)cpu_to_le32(wx->isb_mem[idx]); +} +EXPORT_SYMBOL(wx_misc_isb); + +/** + * wx_set_ivar - set the IVAR registers, mapping interrupt causes to vectors + * @wx: pointer to wx struct + * @direction: 0 for Rx, 1 for Tx, -1 for other causes + * @queue: queue to map the corresponding interrupt to + * @msix_vector: the vector to map to the corresponding queue + * + **/ +static void wx_set_ivar(struct wx *wx, s8 direction, + u16 queue, u16 msix_vector) +{ + u32 ivar, index; + + if (direction == -1) { + /* other causes */ + msix_vector |= WX_PX_IVAR_ALLOC_VAL; + index = 0; + ivar = rd32(wx, WX_PX_MISC_IVAR); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + wr32(wx, WX_PX_MISC_IVAR, ivar); + } else { + /* tx or rx causes */ + msix_vector |= WX_PX_IVAR_ALLOC_VAL; + index = ((16 * (queue & 1)) + (8 * direction)); + ivar = rd32(wx, WX_PX_IVAR(queue >> 1)); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + wr32(wx, WX_PX_IVAR(queue >> 1), ivar); + } +} + +/** + * wx_write_eitr - write EITR register in hardware specific way + * @q_vector: structure containing interrupt and ring information + * + * This function is made to be called by ethtool and by the driver + * when it needs to update EITR registers at runtime. Hardware + * specific quirks/differences are taken care of here. + */ +static void wx_write_eitr(struct wx_q_vector *q_vector) +{ + struct wx *wx = q_vector->wx; + int v_idx = q_vector->v_idx; + u32 itr_reg; + + if (wx->mac.type == wx_mac_sp) + itr_reg = q_vector->itr & WX_SP_MAX_EITR; + else + itr_reg = q_vector->itr & WX_EM_MAX_EITR; + + itr_reg |= WX_PX_ITR_CNT_WDIS; + + wr32(wx, WX_PX_ITR(v_idx), itr_reg); +} + +/** + * wx_configure_vectors - Configure vectors for hardware + * @wx: board private structure + * + * wx_configure_vectors sets up the hardware to properly generate MSI-X/MSI/LEGACY + * interrupts. + **/ +void wx_configure_vectors(struct wx *wx) +{ + struct pci_dev *pdev = wx->pdev; + u32 eitrsel = 0; + u16 v_idx; + + if (pdev->msix_enabled) { + /* Populate MSIX to EITR Select */ + wr32(wx, WX_PX_ITRSEL, eitrsel); + /* use EIAM to auto-mask when MSI-X interrupt is asserted + * this saves a register write for every interrupt + */ + wr32(wx, WX_PX_GPIE, WX_PX_GPIE_MODEL); + } else { + /* legacy interrupts, use EIAM to auto-mask when reading EICR, + * specifically only auto mask tx and rx interrupts. + */ + wr32(wx, WX_PX_GPIE, 0); + } + + /* Populate the IVAR table and set the ITR values to the + * corresponding register. + */ + for (v_idx = 0; v_idx < wx->num_q_vectors; v_idx++) { + struct wx_q_vector *q_vector = wx->q_vector[v_idx]; + struct wx_ring *ring; + + wx_for_each_ring(ring, q_vector->rx) + wx_set_ivar(wx, 0, ring->reg_idx, v_idx); + + wx_for_each_ring(ring, q_vector->tx) + wx_set_ivar(wx, 1, ring->reg_idx, v_idx); + + wx_write_eitr(q_vector); + } + + wx_set_ivar(wx, -1, 0, v_idx); + if (pdev->msix_enabled) + wr32(wx, WX_PX_ITR(v_idx), 1950); +} +EXPORT_SYMBOL(wx_configure_vectors); + +/** + * wx_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void wx_clean_rx_ring(struct wx_ring *rx_ring) +{ + struct wx_rx_buffer *rx_buffer; + u16 i = rx_ring->next_to_clean; + + rx_buffer = &rx_ring->rx_buffer_info[i]; + + /* Free all the Rx ring sk_buffs */ + while (i != rx_ring->next_to_alloc) { + if (rx_buffer->skb) { + struct sk_buff *skb = rx_buffer->skb; + + if (WX_CB(skb)->page_released) + page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); + + dev_kfree_skb(skb); + } + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + WX_RX_BUFSZ, + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); + + i++; + rx_buffer++; + if (i == rx_ring->count) { + i = 0; + rx_buffer = rx_ring->rx_buffer_info; + } + } + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * wx_clean_all_rx_rings - Free Rx Buffers for all queues + * @wx: board private structure + **/ +void wx_clean_all_rx_rings(struct wx *wx) +{ + int i; + + for (i = 0; i < wx->num_rx_queues; i++) + wx_clean_rx_ring(wx->rx_ring[i]); +} +EXPORT_SYMBOL(wx_clean_all_rx_rings); + +/** + * wx_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +static void wx_free_rx_resources(struct wx_ring *rx_ring) +{ + wx_clean_rx_ring(rx_ring); + kvfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; + + if (rx_ring->page_pool) { + page_pool_destroy(rx_ring->page_pool); + rx_ring->page_pool = NULL; + } +} + +/** + * wx_free_all_rx_resources - Free Rx Resources for All Queues + * @wx: pointer to hardware structure + * + * Free all receive software resources + **/ +static void wx_free_all_rx_resources(struct wx *wx) +{ + int i; + + for (i = 0; i < wx->num_rx_queues; i++) + wx_free_rx_resources(wx->rx_ring[i]); +} + +/** + * wx_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void wx_clean_tx_ring(struct wx_ring *tx_ring) +{ + struct wx_tx_buffer *tx_buffer; + u16 i = tx_ring->next_to_clean; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + + while (i != tx_ring->next_to_use) { + union wx_tx_desc *eop_desc, *tx_desc; + + /* Free all the Tx ring sk_buffs */ + dev_kfree_skb_any(tx_buffer->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* check for eop_desc to determine the end of the packet */ + eop_desc = tx_buffer->next_to_watch; + tx_desc = WX_TX_DESC(tx_ring, i); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = WX_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } + } + + netdev_tx_reset_queue(wx_txring_txq(tx_ring)); + + /* reset next_to_use and next_to_clean */ + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * wx_clean_all_tx_rings - Free Tx Buffers for all queues + * @wx: board private structure + **/ +void wx_clean_all_tx_rings(struct wx *wx) +{ + int i; + + for (i = 0; i < wx->num_tx_queues; i++) + wx_clean_tx_ring(wx->tx_ring[i]); +} +EXPORT_SYMBOL(wx_clean_all_tx_rings); + +/** + * wx_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +static void wx_free_tx_resources(struct wx_ring *tx_ring) +{ + wx_clean_tx_ring(tx_ring); + kvfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; +} + +/** + * wx_free_all_tx_resources - Free Tx Resources for All Queues + * @wx: pointer to hardware structure + * + * Free all transmit software resources + **/ +static void wx_free_all_tx_resources(struct wx *wx) +{ + int i; + + for (i = 0; i < wx->num_tx_queues; i++) + wx_free_tx_resources(wx->tx_ring[i]); +} + +void wx_free_resources(struct wx *wx) +{ + wx_free_isb_resources(wx); + wx_free_all_rx_resources(wx); + wx_free_all_tx_resources(wx); +} +EXPORT_SYMBOL(wx_free_resources); + +static int wx_alloc_page_pool(struct wx_ring *rx_ring) +{ + int ret = 0; + + struct page_pool_params pp_params = { + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .order = 0, + .pool_size = rx_ring->size, + .nid = dev_to_node(rx_ring->dev), + .dev = rx_ring->dev, + .dma_dir = DMA_FROM_DEVICE, + .offset = 0, + .max_len = PAGE_SIZE, + }; + + rx_ring->page_pool = page_pool_create(&pp_params); + if (IS_ERR(rx_ring->page_pool)) { + ret = PTR_ERR(rx_ring->page_pool); + rx_ring->page_pool = NULL; + } + + return ret; +} + +/** + * wx_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +static int wx_setup_rx_resources(struct wx_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = NUMA_NO_NODE; + int size, ret; + + size = sizeof(struct wx_rx_buffer) * rx_ring->count; + + if (rx_ring->q_vector) + numa_node = rx_ring->q_vector->numa_node; + + rx_ring->rx_buffer_info = kvmalloc_node(size, GFP_KERNEL, numa_node); + if (!rx_ring->rx_buffer_info) + rx_ring->rx_buffer_info = kvmalloc(size, GFP_KERNEL); + if (!rx_ring->rx_buffer_info) + goto err; + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union wx_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + set_dev_node(dev, numa_node); + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) { + set_dev_node(dev, orig_node); + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + } + + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + ret = wx_alloc_page_pool(rx_ring); + if (ret < 0) { + dev_err(rx_ring->dev, "Page pool creation failed: %d\n", ret); + goto err_desc; + } + + return 0; + +err_desc: + dma_free_coherent(dev, rx_ring->size, rx_ring->desc, rx_ring->dma); +err: + kvfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * wx_setup_all_rx_resources - allocate all queues Rx resources + * @wx: pointer to hardware structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int wx_setup_all_rx_resources(struct wx *wx) +{ + int i, err = 0; + + for (i = 0; i < wx->num_rx_queues; i++) { + err = wx_setup_rx_resources(wx->rx_ring[i]); + if (!err) + continue; + + wx_err(wx, "Allocation for Rx Queue %u failed\n", i); + goto err_setup_rx; + } + + return 0; +err_setup_rx: + /* rewind the index freeing the rings as we go */ + while (i--) + wx_free_rx_resources(wx->rx_ring[i]); + return err; +} + +/** + * wx_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +static int wx_setup_tx_resources(struct wx_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = NUMA_NO_NODE; + int size; + + size = sizeof(struct wx_tx_buffer) * tx_ring->count; + + if (tx_ring->q_vector) + numa_node = tx_ring->q_vector->numa_node; + + tx_ring->tx_buffer_info = kvmalloc_node(size, GFP_KERNEL, numa_node); + if (!tx_ring->tx_buffer_info) + tx_ring->tx_buffer_info = kvmalloc(size, GFP_KERNEL); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union wx_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + set_dev_node(dev, numa_node); + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) { + set_dev_node(dev, orig_node); + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + } + + if (!tx_ring->desc) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; + +err: + kvfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * wx_setup_all_tx_resources - allocate all queues Tx resources + * @wx: pointer to private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int wx_setup_all_tx_resources(struct wx *wx) +{ + int i, err = 0; + + for (i = 0; i < wx->num_tx_queues; i++) { + err = wx_setup_tx_resources(wx->tx_ring[i]); + if (!err) + continue; + + wx_err(wx, "Allocation for Tx Queue %u failed\n", i); + goto err_setup_tx; + } + + return 0; +err_setup_tx: + /* rewind the index freeing the rings as we go */ + while (i--) + wx_free_tx_resources(wx->tx_ring[i]); + return err; +} + +int wx_setup_resources(struct wx *wx) +{ + int err; + + /* allocate transmit descriptors */ + err = wx_setup_all_tx_resources(wx); + if (err) + return err; + + /* allocate receive descriptors */ + err = wx_setup_all_rx_resources(wx); + if (err) + goto err_free_tx; + + err = wx_setup_isb_resources(wx); + if (err) + goto err_free_rx; + + return 0; + +err_free_rx: + wx_free_all_rx_resources(wx); +err_free_tx: + wx_free_all_tx_resources(wx); + + return err; +} +EXPORT_SYMBOL(wx_setup_resources); + +/** + * wx_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: storage space for 64bit statistics + */ +void wx_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct wx *wx = netdev_priv(netdev); + int i; + + rcu_read_lock(); + for (i = 0; i < wx->num_rx_queues; i++) { + struct wx_ring *ring = READ_ONCE(wx->rx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + } + + for (i = 0; i < wx->num_tx_queues; i++) { + struct wx_ring *ring = READ_ONCE(wx->tx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, + start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } + + rcu_read_unlock(); +} +EXPORT_SYMBOL(wx_get_stats64); + +int wx_set_features(struct net_device *netdev, netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + struct wx *wx = netdev_priv(netdev); + + if (changed & NETIF_F_RXHASH) + wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, + WX_RDB_RA_CTL_RSS_EN); + else + wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, 0); + + if (changed & + (NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_RX)) + wx_set_rx_mode(netdev); + + return 1; +} +EXPORT_SYMBOL(wx_set_features); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h new file mode 100644 index 0000000000..df1f4a5951 --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. + */ + +#ifndef _WX_LIB_H_ +#define _WX_LIB_H_ + +void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count); +u16 wx_desc_unused(struct wx_ring *ring); +netdev_tx_t wx_xmit_frame(struct sk_buff *skb, + struct net_device *netdev); +void wx_napi_enable_all(struct wx *wx); +void wx_napi_disable_all(struct wx *wx); +void wx_reset_interrupt_capability(struct wx *wx); +void wx_clear_interrupt_scheme(struct wx *wx); +int wx_init_interrupt_scheme(struct wx *wx); +irqreturn_t wx_msix_clean_rings(int __always_unused irq, void *data); +void wx_free_irq(struct wx *wx); +int wx_setup_isb_resources(struct wx *wx); +void wx_free_isb_resources(struct wx *wx); +u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx); +void wx_configure_vectors(struct wx *wx); +void wx_clean_all_rx_rings(struct wx *wx); +void wx_clean_all_tx_rings(struct wx *wx); +void wx_free_resources(struct wx *wx); +int wx_setup_resources(struct wx *wx); +void wx_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats); +int wx_set_features(struct net_device *netdev, netdev_features_t features); + +#endif /* _NGBE_LIB_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h new file mode 100644 index 0000000000..c555af9ed5 --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -0,0 +1,965 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _WX_TYPE_H_ +#define _WX_TYPE_H_ + +#include <linux/bitfield.h> +#include <linux/netdevice.h> +#include <linux/if_vlan.h> +#include <net/ip.h> + +#define WX_NCSI_SUP 0x8000 +#define WX_NCSI_MASK 0x8000 +#define WX_WOL_SUP 0x4000 +#define WX_WOL_MASK 0x4000 + +/* MSI-X capability fields masks */ +#define WX_PCIE_MSIX_TBL_SZ_MASK 0x7FF +#define WX_PCI_LINK_STATUS 0xB2 + +/**************** Global Registers ****************************/ +/* chip control Registers */ +#define WX_MIS_PWR 0x10000 +#define WX_MIS_RST 0x1000C +#define WX_MIS_RST_LAN_RST(_i) BIT((_i) + 1) +#define WX_MIS_RST_SW_RST BIT(0) +#define WX_MIS_ST 0x10028 +#define WX_MIS_ST_MNG_INIT_DN BIT(0) +#define WX_MIS_SWSM 0x1002C +#define WX_MIS_SWSM_SMBI BIT(0) +#define WX_MIS_RST_ST 0x10030 +#define WX_MIS_RST_ST_RST_INI_SHIFT 8 +#define WX_MIS_RST_ST_RST_INIT (0xFF << WX_MIS_RST_ST_RST_INI_SHIFT) + +/* FMGR Registers */ +#define WX_SPI_CMD 0x10104 +#define WX_SPI_CMD_READ_DWORD 0x1 +#define WX_SPI_CLK_DIV 0x3 +#define WX_SPI_CMD_CMD(_v) FIELD_PREP(GENMASK(30, 28), _v) +#define WX_SPI_CMD_CLK(_v) FIELD_PREP(GENMASK(27, 25), _v) +#define WX_SPI_CMD_ADDR(_v) FIELD_PREP(GENMASK(23, 0), _v) +#define WX_SPI_DATA 0x10108 +#define WX_SPI_DATA_BYPASS BIT(31) +#define WX_SPI_DATA_OP_DONE BIT(0) +#define WX_SPI_STATUS 0x1010C +#define WX_SPI_STATUS_OPDONE BIT(0) +#define WX_SPI_STATUS_FLASH_BYPASS BIT(31) +#define WX_SPI_ILDR_STATUS 0x10120 + +/* Sensors for PVT(Process Voltage Temperature) */ +#define WX_TS_EN 0x10304 +#define WX_TS_EN_ENA BIT(0) +#define WX_TS_ALARM_THRE 0x1030C +#define WX_TS_DALARM_THRE 0x10310 +#define WX_TS_INT_EN 0x10314 +#define WX_TS_INT_EN_DALARM_INT_EN BIT(1) +#define WX_TS_INT_EN_ALARM_INT_EN BIT(0) +#define WX_TS_ALARM_ST 0x10318 +#define WX_TS_ALARM_ST_DALARM BIT(1) +#define WX_TS_ALARM_ST_ALARM BIT(0) + +/************************* Port Registers ************************************/ +/* port cfg Registers */ +#define WX_CFG_PORT_CTL 0x14400 +#define WX_CFG_PORT_CTL_DRV_LOAD BIT(3) +#define WX_CFG_PORT_CTL_QINQ BIT(2) +#define WX_CFG_PORT_CTL_D_VLAN BIT(0) /* double vlan*/ +#define WX_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4)) +#define WX_CFG_PORT_CTL_NUM_VT_MASK GENMASK(13, 12) /* number of TVs */ + + +/* GPIO Registers */ +#define WX_GPIO_DR 0x14800 +#define WX_GPIO_DR_0 BIT(0) /* SDP0 Data Value */ +#define WX_GPIO_DR_1 BIT(1) /* SDP1 Data Value */ +#define WX_GPIO_DDR 0x14804 +#define WX_GPIO_DDR_0 BIT(0) /* SDP0 IO direction */ +#define WX_GPIO_DDR_1 BIT(1) /* SDP1 IO direction */ +#define WX_GPIO_CTL 0x14808 +#define WX_GPIO_INTEN 0x14830 +#define WX_GPIO_INTEN_0 BIT(0) +#define WX_GPIO_INTEN_1 BIT(1) +#define WX_GPIO_INTMASK 0x14834 +#define WX_GPIO_INTTYPE_LEVEL 0x14838 +#define WX_GPIO_POLARITY 0x1483C +#define WX_GPIO_INTSTATUS 0x14844 +#define WX_GPIO_EOI 0x1484C +#define WX_GPIO_EXT 0x14850 + +/*********************** Transmit DMA registers **************************/ +/* transmit global control */ +#define WX_TDM_CTL 0x18000 +/* TDM CTL BIT */ +#define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */ +#define WX_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4)) +#define WX_TDM_RP_IDX 0x1820C +#define WX_TDM_RP_RATE 0x18404 + +/***************************** RDB registers *********************************/ +/* receive packet buffer */ +#define WX_RDB_PB_CTL 0x19000 +#define WX_RDB_PB_CTL_RXEN BIT(31) /* Enable Receiver */ +#define WX_RDB_PB_CTL_DISABLED BIT(0) +#define WX_RDB_PB_SZ(_i) (0x19020 + ((_i) * 4)) +#define WX_RDB_PB_SZ_SHIFT 10 +/* statistic */ +#define WX_RDB_PFCMACDAL 0x19210 +#define WX_RDB_PFCMACDAH 0x19214 +/* ring assignment */ +#define WX_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4)) +#define WX_RDB_PL_CFG_L4HDR BIT(1) +#define WX_RDB_PL_CFG_L3HDR BIT(2) +#define WX_RDB_PL_CFG_L2HDR BIT(3) +#define WX_RDB_PL_CFG_TUN_TUNHDR BIT(4) +#define WX_RDB_PL_CFG_TUN_OUTL2HDR BIT(5) +#define WX_RDB_RA_CTL 0x194F4 +#define WX_RDB_RA_CTL_RSS_EN BIT(2) /* RSS Enable */ + +/******************************* PSR Registers *******************************/ +/* psr control */ +#define WX_PSR_CTL 0x15000 +/* Header split receive */ +#define WX_PSR_CTL_SW_EN BIT(18) +#define WX_PSR_CTL_RSC_ACK BIT(17) +#define WX_PSR_CTL_RSC_DIS BIT(16) +#define WX_PSR_CTL_PCSD BIT(13) +#define WX_PSR_CTL_IPPCSE BIT(12) +#define WX_PSR_CTL_BAM BIT(10) +#define WX_PSR_CTL_UPE BIT(9) +#define WX_PSR_CTL_MPE BIT(8) +#define WX_PSR_CTL_MFE BIT(7) +#define WX_PSR_CTL_MO_SHIFT 5 +#define WX_PSR_CTL_MO (0x3 << WX_PSR_CTL_MO_SHIFT) +#define WX_PSR_CTL_TPE BIT(4) +#define WX_PSR_MAX_SZ 0x15020 +#define WX_PSR_VLAN_CTL 0x15088 +#define WX_PSR_VLAN_CTL_CFIEN BIT(29) /* bit 29 */ +#define WX_PSR_VLAN_CTL_VFE BIT(30) /* bit 30 */ +/* mcasst/ucast overflow tbl */ +#define WX_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4)) +#define WX_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4)) + +/* VM L2 contorl */ +#define WX_PSR_VM_L2CTL(_i) (0x15600 + ((_i) * 4)) +#define WX_PSR_VM_L2CTL_UPE BIT(4) /* unicast promiscuous */ +#define WX_PSR_VM_L2CTL_VACC BIT(6) /* accept nomatched vlan */ +#define WX_PSR_VM_L2CTL_AUPE BIT(8) /* accept untagged packets */ +#define WX_PSR_VM_L2CTL_ROMPE BIT(9) /* accept packets in MTA tbl */ +#define WX_PSR_VM_L2CTL_ROPE BIT(10) /* accept packets in UC tbl */ +#define WX_PSR_VM_L2CTL_BAM BIT(11) /* accept broadcast packets */ +#define WX_PSR_VM_L2CTL_MPE BIT(12) /* multicast promiscuous */ + +/* Management */ +#define WX_PSR_MNG_FLEX_SEL 0x1582C +#define WX_PSR_MNG_FLEX_DW_L(_i) (0x15A00 + ((_i) * 16)) +#define WX_PSR_MNG_FLEX_DW_H(_i) (0x15A04 + ((_i) * 16)) +#define WX_PSR_MNG_FLEX_MSK(_i) (0x15A08 + ((_i) * 16)) +#define WX_PSR_LAN_FLEX_SEL 0x15B8C +#define WX_PSR_LAN_FLEX_DW_L(_i) (0x15C00 + ((_i) * 16)) +#define WX_PSR_LAN_FLEX_DW_H(_i) (0x15C04 + ((_i) * 16)) +#define WX_PSR_LAN_FLEX_MSK(_i) (0x15C08 + ((_i) * 16)) + +#define WX_PSR_WKUP_CTL 0x15B80 +/* Wake Up Filter Control Bit */ +#define WX_PSR_WKUP_CTL_MAG BIT(1) /* Magic Packet Wakeup Enable */ + +/* vlan tbl */ +#define WX_PSR_VLAN_TBL(_i) (0x16000 + ((_i) * 4)) + +/* mac switcher */ +#define WX_PSR_MAC_SWC_AD_L 0x16200 +#define WX_PSR_MAC_SWC_AD_H 0x16204 +#define WX_PSR_MAC_SWC_AD_H_AD(v) FIELD_PREP(U16_MAX, v) +#define WX_PSR_MAC_SWC_AD_H_ADTYPE(v) FIELD_PREP(BIT(30), v) +#define WX_PSR_MAC_SWC_AD_H_AV BIT(31) +#define WX_PSR_MAC_SWC_VM_L 0x16208 +#define WX_PSR_MAC_SWC_VM_H 0x1620C +#define WX_PSR_MAC_SWC_IDX 0x16210 +#define WX_CLEAR_VMDQ_ALL 0xFFFFFFFFU + +/* vlan switch */ +#define WX_PSR_VLAN_SWC 0x16220 +#define WX_PSR_VLAN_SWC_VM_L 0x16224 +#define WX_PSR_VLAN_SWC_VM_H 0x16228 +#define WX_PSR_VLAN_SWC_IDX 0x16230 /* 64 vlan entries */ +/* VLAN pool filtering masks */ +#define WX_PSR_VLAN_SWC_VIEN BIT(31) /* filter is valid */ +#define WX_PSR_VLAN_SWC_ENTRIES 64 + +/********************************* RSEC **************************************/ +/* general rsec */ +#define WX_RSC_CTL 0x17000 +#define WX_RSC_CTL_SAVE_MAC_ERR BIT(6) +#define WX_RSC_CTL_CRC_STRIP BIT(2) +#define WX_RSC_CTL_RX_DIS BIT(1) +#define WX_RSC_ST 0x17004 +#define WX_RSC_ST_RSEC_RDY BIT(0) + +/****************************** TDB ******************************************/ +#define WX_TDB_PB_SZ(_i) (0x1CC00 + ((_i) * 4)) +#define WX_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ + +/****************************** TSEC *****************************************/ +/* Security Control Registers */ +#define WX_TSC_CTL 0x1D000 +#define WX_TSC_CTL_TX_DIS BIT(1) +#define WX_TSC_CTL_TSEC_DIS BIT(0) +#define WX_TSC_ST 0x1D004 +#define WX_TSC_ST_SECTX_RDY BIT(0) +#define WX_TSC_BUF_AE 0x1D00C +#define WX_TSC_BUF_AE_THR GENMASK(9, 0) + +/************************************** MNG ********************************/ +#define WX_MNG_SWFW_SYNC 0x1E008 +#define WX_MNG_SWFW_SYNC_SW_MB BIT(2) +#define WX_MNG_SWFW_SYNC_SW_FLASH BIT(3) +#define WX_MNG_MBOX 0x1E100 +#define WX_MNG_MBOX_CTL 0x1E044 +#define WX_MNG_MBOX_CTL_SWRDY BIT(0) +#define WX_MNG_MBOX_CTL_FWRDY BIT(2) + +/************************************* ETH MAC *****************************/ +#define WX_MAC_TX_CFG 0x11000 +#define WX_MAC_TX_CFG_TE BIT(0) +#define WX_MAC_TX_CFG_SPEED_MASK GENMASK(30, 29) +#define WX_MAC_TX_CFG_SPEED_10G FIELD_PREP(WX_MAC_TX_CFG_SPEED_MASK, 0) +#define WX_MAC_TX_CFG_SPEED_1G FIELD_PREP(WX_MAC_TX_CFG_SPEED_MASK, 3) +#define WX_MAC_RX_CFG 0x11004 +#define WX_MAC_RX_CFG_RE BIT(0) +#define WX_MAC_RX_CFG_JE BIT(8) +#define WX_MAC_PKT_FLT 0x11008 +#define WX_MAC_PKT_FLT_PR BIT(0) /* promiscuous mode */ +#define WX_MAC_WDG_TIMEOUT 0x1100C +#define WX_MAC_RX_FLOW_CTRL 0x11090 +#define WX_MAC_RX_FLOW_CTRL_RFE BIT(0) /* receive fc enable */ +/* MDIO Registers */ +#define WX_MSCA 0x11200 +#define WX_MSCA_RA(v) FIELD_PREP(U16_MAX, v) +#define WX_MSCA_PA(v) FIELD_PREP(GENMASK(20, 16), v) +#define WX_MSCA_DA(v) FIELD_PREP(GENMASK(25, 21), v) +#define WX_MSCC 0x11204 +#define WX_MSCC_CMD(v) FIELD_PREP(GENMASK(17, 16), v) + +enum WX_MSCA_CMD_value { + WX_MSCA_CMD_RSV = 0, + WX_MSCA_CMD_WRITE, + WX_MSCA_CMD_POST_READ, + WX_MSCA_CMD_READ, +}; + +#define WX_MSCC_SADDR BIT(18) +#define WX_MSCC_BUSY BIT(22) +#define WX_MDIO_CLK(v) FIELD_PREP(GENMASK(21, 19), v) +#define WX_MMC_CONTROL 0x11800 +#define WX_MMC_CONTROL_RSTONRD BIT(2) /* reset on read */ + +/********************************* BAR registers ***************************/ +/* Interrupt Registers */ +#define WX_BME_CTL 0x12020 +#define WX_PX_MISC_IC 0x100 +#define WX_PX_MISC_ICS 0x104 +#define WX_PX_MISC_IEN 0x108 +#define WX_PX_INTA 0x110 +#define WX_PX_GPIE 0x118 +#define WX_PX_GPIE_MODEL BIT(0) +#define WX_PX_IC(_i) (0x120 + (_i) * 4) +#define WX_PX_IMS(_i) (0x140 + (_i) * 4) +#define WX_PX_IMC(_i) (0x150 + (_i) * 4) +#define WX_PX_ISB_ADDR_L 0x160 +#define WX_PX_ISB_ADDR_H 0x164 +#define WX_PX_TRANSACTION_PENDING 0x168 +#define WX_PX_ITRSEL 0x180 +#define WX_PX_ITR(_i) (0x200 + (_i) * 4) +#define WX_PX_ITR_CNT_WDIS BIT(31) +#define WX_PX_MISC_IVAR 0x4FC +#define WX_PX_IVAR(_i) (0x500 + (_i) * 4) + +#define WX_PX_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ +#define WX_7K_ITR 595 +#define WX_12K_ITR 336 +#define WX_SP_MAX_EITR 0x00000FF8U +#define WX_EM_MAX_EITR 0x00007FFCU + +/* transmit DMA Registers */ +#define WX_PX_TR_BAL(_i) (0x03000 + ((_i) * 0x40)) +#define WX_PX_TR_BAH(_i) (0x03004 + ((_i) * 0x40)) +#define WX_PX_TR_WP(_i) (0x03008 + ((_i) * 0x40)) +#define WX_PX_TR_RP(_i) (0x0300C + ((_i) * 0x40)) +#define WX_PX_TR_CFG(_i) (0x03010 + ((_i) * 0x40)) +/* Transmit Config masks */ +#define WX_PX_TR_CFG_ENABLE BIT(0) /* Ena specific Tx Queue */ +#define WX_PX_TR_CFG_TR_SIZE_SHIFT 1 /* tx desc number per ring */ +#define WX_PX_TR_CFG_SWFLSH BIT(26) /* Tx Desc. wr-bk flushing */ +#define WX_PX_TR_CFG_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ +#define WX_PX_TR_CFG_THRE_SHIFT 8 + +/* Receive DMA Registers */ +#define WX_PX_RR_BAL(_i) (0x01000 + ((_i) * 0x40)) +#define WX_PX_RR_BAH(_i) (0x01004 + ((_i) * 0x40)) +#define WX_PX_RR_WP(_i) (0x01008 + ((_i) * 0x40)) +#define WX_PX_RR_RP(_i) (0x0100C + ((_i) * 0x40)) +#define WX_PX_RR_CFG(_i) (0x01010 + ((_i) * 0x40)) +/* PX_RR_CFG bit definitions */ +#define WX_PX_RR_CFG_VLAN BIT(31) +#define WX_PX_RR_CFG_SPLIT_MODE BIT(26) +#define WX_PX_RR_CFG_RR_THER_SHIFT 16 +#define WX_PX_RR_CFG_RR_HDR_SZ GENMASK(15, 12) +#define WX_PX_RR_CFG_RR_BUF_SZ GENMASK(11, 8) +#define WX_PX_RR_CFG_BHDRSIZE_SHIFT 6 /* 64byte resolution (>> 6) + * + at bit 8 offset (<< 12) + * = (<< 6) + */ +#define WX_PX_RR_CFG_BSIZEPKT_SHIFT 2 /* so many KBs */ +#define WX_PX_RR_CFG_RR_SIZE_SHIFT 1 +#define WX_PX_RR_CFG_RR_EN BIT(0) + +/* Number of 80 microseconds we wait for PCI Express master disable */ +#define WX_PCI_MASTER_DISABLE_TIMEOUT 80000 + +/****************** Manageablility Host Interface defines ********************/ +#define WX_HI_MAX_BLOCK_BYTE_LENGTH 256 /* Num of bytes in range */ +#define WX_HI_COMMAND_TIMEOUT 1000 /* Process HI command limit */ + +#define FW_READ_SHADOW_RAM_CMD 0x31 +#define FW_READ_SHADOW_RAM_LEN 0x6 +#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */ +#define FW_NVM_DATA_OFFSET 3 +#define FW_MAX_READ_BUFFER_SIZE 244 +#define FW_RESET_CMD 0xDF +#define FW_RESET_LEN 0x2 +#define FW_CEM_HDR_LEN 0x4 +#define FW_CEM_CMD_RESERVED 0X0 +#define FW_CEM_MAX_RETRIES 3 +#define FW_CEM_RESP_STATUS_SUCCESS 0x1 + +#define WX_SW_REGION_PTR 0x1C + +#define WX_MAC_STATE_DEFAULT 0x1 +#define WX_MAC_STATE_MODIFIED 0x2 +#define WX_MAC_STATE_IN_USE 0x4 + +#define WX_MAX_RXD 8192 +#define WX_MAX_TXD 8192 + +#define WX_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */ +#define VMDQ_P(p) p + +/* Supported Rx Buffer Sizes */ +#define WX_RXBUFFER_256 256 /* Used for skb receive header */ +#define WX_RXBUFFER_2K 2048 +#define WX_MAX_RXBUFFER 16384 /* largest size for single descriptor */ + +#if MAX_SKB_FRAGS < 8 +#define WX_RX_BUFSZ ALIGN(WX_MAX_RXBUFFER / MAX_SKB_FRAGS, 1024) +#else +#define WX_RX_BUFSZ WX_RXBUFFER_2K +#endif + +#define WX_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define WX_MAX_DATA_PER_TXD BIT(14) +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), WX_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +#define WX_CFG_PORT_ST 0x14404 + +/******************* Receive Descriptor bit definitions **********************/ +#define WX_RXD_STAT_DD BIT(0) /* Done */ +#define WX_RXD_STAT_EOP BIT(1) /* End of Packet */ +#define WX_RXD_STAT_VP BIT(5) /* IEEE VLAN Pkt */ +#define WX_RXD_STAT_L4CS BIT(7) /* L4 xsum calculated */ +#define WX_RXD_STAT_IPCS BIT(8) /* IP xsum calculated */ +#define WX_RXD_STAT_OUTERIPCS BIT(10) /* Cloud IP xsum calculated*/ + +#define WX_RXD_ERR_OUTERIPER BIT(26) /* CRC IP Header error */ +#define WX_RXD_ERR_RXE BIT(29) /* Any MAC Error */ +#define WX_RXD_ERR_TCPE BIT(30) /* TCP/UDP Checksum Error */ +#define WX_RXD_ERR_IPE BIT(31) /* IP Checksum Error */ + +/* RSS Hash results */ +#define WX_RXD_RSSTYPE_MASK GENMASK(3, 0) +#define WX_RXD_RSSTYPE_IPV4_TCP 0x00000001U +#define WX_RXD_RSSTYPE_IPV6_TCP 0x00000003U +#define WX_RXD_RSSTYPE_IPV4_SCTP 0x00000004U +#define WX_RXD_RSSTYPE_IPV6_SCTP 0x00000006U +#define WX_RXD_RSSTYPE_IPV4_UDP 0x00000007U +#define WX_RXD_RSSTYPE_IPV6_UDP 0x00000008U + +#define WX_RSS_L4_TYPES_MASK \ + ((1ul << WX_RXD_RSSTYPE_IPV4_TCP) | \ + (1ul << WX_RXD_RSSTYPE_IPV4_UDP) | \ + (1ul << WX_RXD_RSSTYPE_IPV4_SCTP) | \ + (1ul << WX_RXD_RSSTYPE_IPV6_TCP) | \ + (1ul << WX_RXD_RSSTYPE_IPV6_UDP) | \ + (1ul << WX_RXD_RSSTYPE_IPV6_SCTP)) +/* TUN */ +#define WX_PTYPE_TUN_IPV4 0x80 +#define WX_PTYPE_TUN_IPV6 0xC0 + +/* PKT for TUN */ +#define WX_PTYPE_PKT_IPIP 0x00 /* IP+IP */ +#define WX_PTYPE_PKT_IG 0x10 /* IP+GRE */ +#define WX_PTYPE_PKT_IGM 0x20 /* IP+GRE+MAC */ +#define WX_PTYPE_PKT_IGMV 0x30 /* IP+GRE+MAC+VLAN */ +/* PKT for !TUN */ +#define WX_PTYPE_PKT_MAC 0x10 +#define WX_PTYPE_PKT_IP 0x20 + +/* TYP for PKT=mac */ +#define WX_PTYPE_TYP_MAC 0x01 +/* TYP for PKT=ip */ +#define WX_PTYPE_PKT_IPV6 0x08 +#define WX_PTYPE_TYP_IPFRAG 0x01 +#define WX_PTYPE_TYP_IP 0x02 +#define WX_PTYPE_TYP_UDP 0x03 +#define WX_PTYPE_TYP_TCP 0x04 +#define WX_PTYPE_TYP_SCTP 0x05 + +#define WX_RXD_PKTTYPE(_rxd) \ + ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF) +#define WX_RXD_IPV6EX(_rxd) \ + ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 6) & 0x1) +/*********************** Transmit Descriptor Config Masks ****************/ +#define WX_TXD_STAT_DD BIT(0) /* Descriptor Done */ +#define WX_TXD_DTYP_DATA 0 /* Adv Data Descriptor */ +#define WX_TXD_PAYLEN_SHIFT 13 /* Desc PAYLEN shift */ +#define WX_TXD_EOP BIT(24) /* End of Packet */ +#define WX_TXD_IFCS BIT(25) /* Insert FCS */ +#define WX_TXD_RS BIT(27) /* Report Status */ + +/*********************** Adv Transmit Descriptor Config Masks ****************/ +#define WX_TXD_MAC_TSTAMP BIT(19) /* IEEE1588 time stamp */ +#define WX_TXD_DTYP_CTXT BIT(20) /* Adv Context Desc */ +#define WX_TXD_LINKSEC BIT(26) /* enable linksec */ +#define WX_TXD_VLE BIT(30) /* VLAN pkt enable */ +#define WX_TXD_TSE BIT(31) /* TCP Seg enable */ +#define WX_TXD_CC BIT(7) /* Check Context */ +#define WX_TXD_IPSEC BIT(8) /* enable ipsec esp */ +#define WX_TXD_L4CS BIT(9) +#define WX_TXD_IIPCS BIT(10) +#define WX_TXD_EIPCS BIT(11) +#define WX_TXD_PAYLEN_SHIFT 13 /* Adv desc PAYLEN shift */ +#define WX_TXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define WX_TXD_TAG_TPID_SEL_SHIFT 11 + +#define WX_TXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define WX_TXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +#define WX_TXD_OUTER_IPLEN_SHIFT 12 /* Adv ctxt OUTERIPLEN shift */ +#define WX_TXD_TUNNEL_LEN_SHIFT 21 /* Adv ctxt TUNNELLEN shift */ +#define WX_TXD_TUNNEL_TYPE_SHIFT 11 /* Adv Tx Desc Tunnel Type shift */ +#define WX_TXD_TUNNEL_UDP FIELD_PREP(BIT(WX_TXD_TUNNEL_TYPE_SHIFT), 0) +#define WX_TXD_TUNNEL_GRE FIELD_PREP(BIT(WX_TXD_TUNNEL_TYPE_SHIFT), 1) + +enum wx_tx_flags { + /* cmd_type flags */ + WX_TX_FLAGS_HW_VLAN = 0x01, + WX_TX_FLAGS_TSO = 0x02, + WX_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + WX_TX_FLAGS_CC = 0x08, + WX_TX_FLAGS_IPV4 = 0x10, + WX_TX_FLAGS_CSUM = 0x20, + WX_TX_FLAGS_OUTER_IPV4 = 0x100, + WX_TX_FLAGS_LINKSEC = 0x200, + WX_TX_FLAGS_IPSEC = 0x400, +}; + +/* VLAN info */ +#define WX_TX_FLAGS_VLAN_MASK GENMASK(31, 16) +#define WX_TX_FLAGS_VLAN_SHIFT 16 + +/* wx_dec_ptype.mac: outer mac */ +enum wx_dec_ptype_mac { + WX_DEC_PTYPE_MAC_IP = 0, + WX_DEC_PTYPE_MAC_L2 = 2, + WX_DEC_PTYPE_MAC_FCOE = 3, +}; + +/* wx_dec_ptype.[e]ip: outer&encaped ip */ +#define WX_DEC_PTYPE_IP_FRAG 0x4 +enum wx_dec_ptype_ip { + WX_DEC_PTYPE_IP_NONE = 0, + WX_DEC_PTYPE_IP_IPV4 = 1, + WX_DEC_PTYPE_IP_IPV6 = 2, + WX_DEC_PTYPE_IP_FGV4 = WX_DEC_PTYPE_IP_FRAG | WX_DEC_PTYPE_IP_IPV4, + WX_DEC_PTYPE_IP_FGV6 = WX_DEC_PTYPE_IP_FRAG | WX_DEC_PTYPE_IP_IPV6, +}; + +/* wx_dec_ptype.etype: encaped type */ +enum wx_dec_ptype_etype { + WX_DEC_PTYPE_ETYPE_NONE = 0, + WX_DEC_PTYPE_ETYPE_IPIP = 1, /* IP+IP */ + WX_DEC_PTYPE_ETYPE_IG = 2, /* IP+GRE */ + WX_DEC_PTYPE_ETYPE_IGM = 3, /* IP+GRE+MAC */ + WX_DEC_PTYPE_ETYPE_IGMV = 4, /* IP+GRE+MAC+VLAN */ +}; + +/* wx_dec_ptype.proto: payload proto */ +enum wx_dec_ptype_prot { + WX_DEC_PTYPE_PROT_NONE = 0, + WX_DEC_PTYPE_PROT_UDP = 1, + WX_DEC_PTYPE_PROT_TCP = 2, + WX_DEC_PTYPE_PROT_SCTP = 3, + WX_DEC_PTYPE_PROT_ICMP = 4, + WX_DEC_PTYPE_PROT_TS = 5, /* time sync */ +}; + +/* wx_dec_ptype.layer: payload layer */ +enum wx_dec_ptype_layer { + WX_DEC_PTYPE_LAYER_NONE = 0, + WX_DEC_PTYPE_LAYER_PAY2 = 1, + WX_DEC_PTYPE_LAYER_PAY3 = 2, + WX_DEC_PTYPE_LAYER_PAY4 = 3, +}; + +struct wx_dec_ptype { + u32 known:1; + u32 mac:2; /* outer mac */ + u32 ip:3; /* outer ip*/ + u32 etype:3; /* encaped type */ + u32 eip:3; /* encaped ip */ + u32 prot:4; /* payload proto */ + u32 layer:3; /* payload layer */ +}; + +/* macro to make the table lines short */ +#define WX_PTT(mac, ip, etype, eip, proto, layer)\ + {1, \ + WX_DEC_PTYPE_MAC_##mac, /* mac */\ + WX_DEC_PTYPE_IP_##ip, /* ip */ \ + WX_DEC_PTYPE_ETYPE_##etype, /* etype */\ + WX_DEC_PTYPE_IP_##eip, /* eip */\ + WX_DEC_PTYPE_PROT_##proto, /* proto */\ + WX_DEC_PTYPE_LAYER_##layer /* layer */} + +/* Host Interface Command Structures */ +struct wx_hic_hdr { + u8 cmd; + u8 buf_len; + union { + u8 cmd_resv; + u8 ret_status; + } cmd_or_resp; + u8 checksum; +}; + +struct wx_hic_hdr2_req { + u8 cmd; + u8 buf_lenh; + u8 buf_lenl; + u8 checksum; +}; + +struct wx_hic_hdr2_rsp { + u8 cmd; + u8 buf_lenl; + u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */ + u8 checksum; +}; + +union wx_hic_hdr2 { + struct wx_hic_hdr2_req req; + struct wx_hic_hdr2_rsp rsp; +}; + +/* These need to be dword aligned */ +struct wx_hic_read_shadow_ram { + union wx_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct wx_hic_reset { + struct wx_hic_hdr hdr; + u16 lan_id; + u16 reset_type; +}; + +/* Bus parameters */ +struct wx_bus_info { + u8 func; + u16 device; +}; + +struct wx_thermal_sensor_data { + s16 temp; + s16 alarm_thresh; + s16 dalarm_thresh; +}; + +enum wx_mac_type { + wx_mac_unknown = 0, + wx_mac_sp, + wx_mac_em +}; + +enum sp_media_type { + sp_media_unknown = 0, + sp_media_fiber, + sp_media_copper, + sp_media_backplane +}; + +enum em_mac_type { + em_mac_type_unknown = 0, + em_mac_type_mdi, + em_mac_type_rgmii +}; + +struct wx_mac_info { + enum wx_mac_type type; + bool set_lben; + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + u32 mta_shadow[128]; + s32 mc_filter_type; + u32 mcft_size; + u32 vft_shadow[128]; + u32 vft_size; + u32 num_rar_entries; + u32 rx_pb_size; + u32 tx_pb_size; + u32 max_tx_queues; + u32 max_rx_queues; + + u16 max_msix_vectors; + struct wx_thermal_sensor_data sensor; +}; + +enum wx_eeprom_type { + wx_eeprom_uninitialized = 0, + wx_eeprom_spi, + wx_flash, + wx_eeprom_none /* No NVM support */ +}; + +struct wx_eeprom_info { + enum wx_eeprom_type type; + u32 semaphore_delay; + u16 word_size; + u16 sw_region_offset; +}; + +struct wx_addr_filter_info { + u32 num_mc_addrs; + u32 mta_in_use; + bool user_set_promisc; +}; + +struct wx_mac_addr { + u8 addr[ETH_ALEN]; + u16 state; /* bitmask */ + u64 pools; +}; + +enum wx_reset_type { + WX_LAN_RESET = 0, + WX_SW_RESET, + WX_GLOBAL_RESET +}; + +struct wx_cb { + dma_addr_t dma; + u16 append_cnt; /* number of skb's appended */ + bool page_released; + bool dma_released; +}; + +#define WX_CB(skb) ((struct wx_cb *)(skb)->cb) + +/* Transmit Descriptor */ +union wx_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Receive Descriptor */ +union wx_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen */ + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +struct wx_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/* if _flag is in _input, return _result */ +#define WX_SET_FLAG(_input, _flag, _result) \ + (((_flag) <= (_result)) ? \ + ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \ + ((u32)((_input) & (_flag)) / ((_flag) / (_result)))) + +#define WX_RX_DESC(R, i) \ + (&(((union wx_rx_desc *)((R)->desc))[i])) +#define WX_TX_DESC(R, i) \ + (&(((union wx_tx_desc *)((R)->desc))[i])) +#define WX_TX_CTXTDESC(R, i) \ + (&(((struct wx_tx_context_desc *)((R)->desc))[i])) + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct wx_tx_buffer { + union wx_tx_desc *next_to_watch; + struct sk_buff *skb; + unsigned int bytecount; + unsigned short gso_segs; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + __be16 protocol; + u32 tx_flags; +}; + +struct wx_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + dma_addr_t page_dma; + struct page *page; + unsigned int page_offset; +}; + +struct wx_queue_stats { + u64 packets; + u64 bytes; +}; + +struct wx_rx_queue_stats { + u64 csum_good_cnt; + u64 csum_err; +}; + +/* iterator for handling rings in ring container */ +#define wx_for_each_ring(posm, headm) \ + for (posm = (headm).ring; posm; posm = posm->next) + +struct wx_ring_container { + struct wx_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; +struct wx_ring { + struct wx_ring *next; /* pointer to next ring in q_vector */ + struct wx_q_vector *q_vector; /* backpointer to host q_vector */ + struct net_device *netdev; /* netdev ring belongs to */ + struct device *dev; /* device for DMA mapping */ + struct page_pool *page_pool; + void *desc; /* descriptor ring memory */ + union { + struct wx_tx_buffer *tx_buffer_info; + struct wx_rx_buffer *rx_buffer_info; + }; + u8 __iomem *tail; + dma_addr_t dma; /* phys. address of descriptor ring */ + unsigned int size; /* length in bytes */ + + u16 count; /* amount of descriptors */ + + u8 queue_index; /* needed for multiqueue queue management */ + u8 reg_idx; /* holds the special value that gets + * the hardware register offset + * associated with this ring, which is + * different for DCB and RSS modes + */ + u16 next_to_use; + u16 next_to_clean; + u16 next_to_alloc; + + struct wx_queue_stats stats; + struct u64_stats_sync syncp; + union { + struct wx_rx_queue_stats rx_stats; + }; +} ____cacheline_internodealigned_in_smp; + +struct wx_q_vector { + struct wx *wx; + int cpu; /* CPU for DCA */ + int numa_node; + u16 v_idx; /* index of q_vector within array, also used for + * finding the bit in EICR and friends that + * represents the vector for this ring + */ + u16 itr; /* Interrupt throttle rate written to EITR */ + struct wx_ring_container rx, tx; + struct napi_struct napi; + struct rcu_head rcu; /* to avoid race with update stats on free */ + + char name[IFNAMSIZ + 17]; + + /* for dynamic allocation of rings associated with this q_vector */ + struct wx_ring ring[] ____cacheline_internodealigned_in_smp; +}; + +enum wx_isb_idx { + WX_ISB_HEADER, + WX_ISB_MISC, + WX_ISB_VEC0, + WX_ISB_VEC1, + WX_ISB_MAX +}; + +struct wx { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + + void *priv; + u8 __iomem *hw_addr; + struct pci_dev *pdev; + struct net_device *netdev; + struct wx_bus_info bus; + struct wx_mac_info mac; + enum em_mac_type mac_type; + enum sp_media_type media_type; + struct wx_eeprom_info eeprom; + struct wx_addr_filter_info addr_ctrl; + struct wx_mac_addr *mac_table; + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + u16 oem_ssid; + u16 oem_svid; + u16 msg_enable; + bool adapter_stopped; + u16 tpid[8]; + char eeprom_id[32]; + char *driver_name; + enum wx_reset_type reset_type; + + /* PHY stuff */ + unsigned int link; + int speed; + int duplex; + struct phy_device *phydev; + + bool wol_hw_supported; + bool ncsi_enabled; + bool gpio_ctrl; + raw_spinlock_t gpio_lock; + + /* Tx fast path data */ + int num_tx_queues; + u16 tx_itr_setting; + u16 tx_work_limit; + + /* Rx fast path data */ + int num_rx_queues; + u16 rx_itr_setting; + u16 rx_work_limit; + + int num_q_vectors; /* current number of q_vectors for device */ + int max_q_vectors; /* upper limit of q_vectors for device */ + + u32 tx_ring_count; + u32 rx_ring_count; + + struct wx_ring *tx_ring[64] ____cacheline_aligned_in_smp; + struct wx_ring *rx_ring[64]; + struct wx_q_vector *q_vector[64]; + + unsigned int queues_per_pool; + struct msix_entry *msix_entries; + + /* misc interrupt status block */ + dma_addr_t isb_dma; + u32 *isb_mem; + u32 isb_tag[WX_ISB_MAX]; + +#define WX_MAX_RETA_ENTRIES 128 + u8 rss_indir_tbl[WX_MAX_RETA_ENTRIES]; + +#define WX_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ + u32 *rss_key; + u32 wol; + + u16 bd_number; +}; + +#define WX_INTR_ALL (~0ULL) +#define WX_INTR_Q(i) BIT(i) + +/* register operations */ +#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) +#define rd32(a, reg) readl((a)->hw_addr + (reg)) +#define rd32a(a, reg, offset) ( \ + rd32((a), (reg) + ((offset) << 2))) +#define wr32a(a, reg, off, val) \ + wr32((a), (reg) + ((off) << 2), (val)) + +static inline u32 +rd32m(struct wx *wx, u32 reg, u32 mask) +{ + u32 val; + + val = rd32(wx, reg); + return val & mask; +} + +static inline void +wr32m(struct wx *wx, u32 reg, u32 mask, u32 field) +{ + u32 val; + + val = rd32(wx, reg); + val = ((val & ~mask) | (field & mask)); + + wr32(wx, reg, val); +} + +/* On some domestic CPU platforms, sometimes IO is not synchronized with + * flushing memory, here use readl() to flush PCI read and write. + */ +#define WX_WRITE_FLUSH(H) rd32(H, WX_MIS_PWR) + +#define wx_err(wx, fmt, arg...) \ + dev_err(&(wx)->pdev->dev, fmt, ##arg) + +#define wx_dbg(wx, fmt, arg...) \ + dev_dbg(&(wx)->pdev->dev, fmt, ##arg) + +#endif /* _WX_TYPE_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/Makefile b/drivers/net/ethernet/wangxun/ngbe/Makefile new file mode 100644 index 0000000000..61a13d98ab --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. +# +# Makefile for the Wangxun(R) GbE PCI Express ethernet driver +# + +obj-$(CONFIG_NGBE) += ngbe.o + +ngbe-objs := ngbe_main.o ngbe_hw.o ngbe_mdio.o ngbe_ethtool.o diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c new file mode 100644 index 0000000000..ec0e869e9a --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/pci.h> +#include <linux/phy.h> +#include <linux/netdevice.h> + +#include "../libwx/wx_ethtool.h" +#include "../libwx/wx_type.h" +#include "ngbe_ethtool.h" + +static void ngbe_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct wx *wx = netdev_priv(netdev); + + if (!wx->wol_hw_supported) + return; + wol->supported = WAKE_MAGIC; + wol->wolopts = 0; + if (wx->wol & WX_PSR_WKUP_CTL_MAG) + wol->wolopts |= WAKE_MAGIC; +} + +static int ngbe_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct wx *wx = netdev_priv(netdev); + struct pci_dev *pdev = wx->pdev; + + if (!wx->wol_hw_supported) + return -EOPNOTSUPP; + + wx->wol = 0; + if (wol->wolopts & WAKE_MAGIC) + wx->wol = WX_PSR_WKUP_CTL_MAG; + netdev->wol_enabled = !!(wx->wol); + wr32(wx, WX_PSR_WKUP_CTL, wx->wol); + device_set_wakeup_enable(&pdev->dev, netdev->wol_enabled); + + return 0; +} + +static const struct ethtool_ops ngbe_ethtool_ops = { + .get_drvinfo = wx_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .nway_reset = phy_ethtool_nway_reset, + .get_wol = ngbe_get_wol, + .set_wol = ngbe_set_wol, +}; + +void ngbe_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &ngbe_ethtool_ops; +} diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.h new file mode 100644 index 0000000000..487074e0ee --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _NGBE_ETHTOOL_H_ +#define _NGBE_ETHTOOL_H_ + +void ngbe_set_ethtool_ops(struct net_device *netdev); + +#endif /* _NGBE_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c new file mode 100644 index 0000000000..6562a2de95 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/etherdevice.h> +#include <linux/iopoll.h> +#include <linux/pci.h> + +#include "../libwx/wx_type.h" +#include "../libwx/wx_hw.h" +#include "ngbe_type.h" +#include "ngbe_hw.h" + +int ngbe_eeprom_chksum_hostif(struct wx *wx) +{ + struct wx_hic_read_shadow_ram buffer; + int status; + int tmp; + + buffer.hdr.req.cmd = NGBE_FW_EEPROM_CHECKSUM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = 0; + buffer.hdr.req.checksum = NGBE_FW_CMD_DEFAULT_CHECKSUM; + /* convert offset from words to bytes */ + buffer.address = 0; + /* one word */ + buffer.length = 0; + + status = wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer), + WX_HI_COMMAND_TIMEOUT, false); + + if (status < 0) + return status; + tmp = rd32a(wx, WX_MNG_MBOX, 1); + if (tmp == NGBE_FW_CMD_ST_PASS) + return 0; + return -EIO; +} + +static int ngbe_reset_misc(struct wx *wx) +{ + wx_reset_misc(wx); + if (wx->gpio_ctrl) { + /* gpio0 is used to power on/off control*/ + wr32(wx, NGBE_GPIO_DDR, 0x1); + ngbe_sfp_modules_txrx_powerctl(wx, false); + } + return 0; +} + +void ngbe_sfp_modules_txrx_powerctl(struct wx *wx, bool swi) +{ + /* gpio0 is used to power on control . 0 is on */ + wr32(wx, NGBE_GPIO_DR, swi ? 0 : NGBE_GPIO_DR_0); +} + +/** + * ngbe_reset_hw - Perform hardware reset + * @wx: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, perform a PHY reset, and perform a link (MAC) + * reset. + **/ +int ngbe_reset_hw(struct wx *wx) +{ + u32 val = 0; + int ret = 0; + + /* Call wx stop to disable tx/rx and clear interrupts */ + ret = wx_stop_adapter(wx); + if (ret != 0) + return ret; + + if (wx->mac_type != em_mac_type_mdi) { + val = WX_MIS_RST_LAN_RST(wx->bus.func); + wr32(wx, WX_MIS_RST, val | rd32(wx, WX_MIS_RST)); + + ret = read_poll_timeout(rd32, val, + !(val & (BIT(9) << wx->bus.func)), 1000, + 100000, false, wx, 0x10028); + if (ret) { + wx_err(wx, "Lan reset exceed s maximum times.\n"); + return ret; + } + } + ngbe_reset_misc(wx); + + /* Store the permanent mac address */ + wx_get_mac_addr(wx, wx->mac.perm_addr); + + /* reset num_rar_entries to 128 */ + wx->mac.num_rar_entries = NGBE_RAR_ENTRIES; + wx_init_rx_addrs(wx); + pci_set_master(wx->pdev); + + return 0; +} diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h new file mode 100644 index 0000000000..a4693e0068 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. + */ + +#ifndef _NGBE_HW_H_ +#define _NGBE_HW_H_ + +int ngbe_eeprom_chksum_hostif(struct wx *wx); +void ngbe_sfp_modules_txrx_powerctl(struct wx *wx, bool swi); +int ngbe_reset_hw(struct wx *wx); +#endif /* _NGBE_HW_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c new file mode 100644 index 0000000000..a4d63d2f3c --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -0,0 +1,778 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/types.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/string.h> +#include <linux/etherdevice.h> +#include <net/ip.h> +#include <linux/phy.h> +#include <linux/if_vlan.h> + +#include "../libwx/wx_type.h" +#include "../libwx/wx_hw.h" +#include "../libwx/wx_lib.h" +#include "ngbe_type.h" +#include "ngbe_mdio.h" +#include "ngbe_hw.h" +#include "ngbe_ethtool.h" + +char ngbe_driver_name[] = "ngbe"; + +/* ngbe_pci_tbl - PCI Device ID Table + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id ngbe_pci_tbl[] = { + { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W), 0}, + { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A2), 0}, + { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A2S), 0}, + { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A4), 0}, + { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A4S), 0}, + { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL2), 0}, + { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S), 0}, + { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL4), 0}, + { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S), 0}, + { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860LC), 0}, + { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A1), 0}, + { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A1L), 0}, + /* required last entry */ + { .device = 0 } +}; + +/** + * ngbe_init_type_code - Initialize the shared code + * @wx: pointer to hardware structure + **/ +static void ngbe_init_type_code(struct wx *wx) +{ + int wol_mask = 0, ncsi_mask = 0; + u16 type_mask = 0, val; + + wx->mac.type = wx_mac_em; + type_mask = (u16)(wx->subsystem_device_id & NGBE_OEM_MASK); + ncsi_mask = wx->subsystem_device_id & NGBE_NCSI_MASK; + wol_mask = wx->subsystem_device_id & NGBE_WOL_MASK; + + val = rd32(wx, WX_CFG_PORT_ST); + wx->mac_type = (val & BIT(7)) >> 7 ? + em_mac_type_rgmii : + em_mac_type_mdi; + + wx->wol_hw_supported = (wol_mask == NGBE_WOL_SUP) ? 1 : 0; + wx->ncsi_enabled = (ncsi_mask == NGBE_NCSI_MASK || + type_mask == NGBE_SUBID_OCP_CARD) ? 1 : 0; + + switch (type_mask) { + case NGBE_SUBID_LY_YT8521S_SFP: + case NGBE_SUBID_LY_M88E1512_SFP: + case NGBE_SUBID_YT8521S_SFP_GPIO: + case NGBE_SUBID_INTERNAL_YT8521S_SFP_GPIO: + wx->gpio_ctrl = 1; + break; + default: + wx->gpio_ctrl = 0; + break; + } +} + +/** + * ngbe_init_rss_key - Initialize wx RSS key + * @wx: device handle + * + * Allocates and initializes the RSS key if it is not allocated. + **/ +static inline int ngbe_init_rss_key(struct wx *wx) +{ + u32 *rss_key; + + if (!wx->rss_key) { + rss_key = kzalloc(WX_RSS_KEY_SIZE, GFP_KERNEL); + if (unlikely(!rss_key)) + return -ENOMEM; + + netdev_rss_key_fill(rss_key, WX_RSS_KEY_SIZE); + wx->rss_key = rss_key; + } + + return 0; +} + +/** + * ngbe_sw_init - Initialize general software structures + * @wx: board private structure to initialize + **/ +static int ngbe_sw_init(struct wx *wx) +{ + struct pci_dev *pdev = wx->pdev; + u16 msix_count = 0; + int err = 0; + + wx->mac.num_rar_entries = NGBE_RAR_ENTRIES; + wx->mac.max_rx_queues = NGBE_MAX_RX_QUEUES; + wx->mac.max_tx_queues = NGBE_MAX_TX_QUEUES; + wx->mac.mcft_size = NGBE_MC_TBL_SIZE; + wx->mac.vft_size = NGBE_SP_VFT_TBL_SIZE; + wx->mac.rx_pb_size = NGBE_RX_PB_SIZE; + wx->mac.tx_pb_size = NGBE_TDB_PB_SZ; + + /* PCI config space info */ + err = wx_sw_init(wx); + if (err < 0) + return err; + + /* mac type, phy type , oem type */ + ngbe_init_type_code(wx); + + /* Set common capability flags and settings */ + wx->max_q_vectors = NGBE_MAX_MSIX_VECTORS; + err = wx_get_pcie_msix_counts(wx, &msix_count, NGBE_MAX_MSIX_VECTORS); + if (err) + dev_err(&pdev->dev, "Do not support MSI-X\n"); + wx->mac.max_msix_vectors = msix_count; + + if (ngbe_init_rss_key(wx)) + return -ENOMEM; + + /* enable itr by default in dynamic mode */ + wx->rx_itr_setting = 1; + wx->tx_itr_setting = 1; + + /* set default ring sizes */ + wx->tx_ring_count = NGBE_DEFAULT_TXD; + wx->rx_ring_count = NGBE_DEFAULT_RXD; + + /* set default work limits */ + wx->tx_work_limit = NGBE_DEFAULT_TX_WORK; + wx->rx_work_limit = NGBE_DEFAULT_RX_WORK; + + return 0; +} + +/** + * ngbe_irq_enable - Enable default interrupt generation settings + * @wx: board private structure + * @queues: enable all queues interrupts + **/ +static void ngbe_irq_enable(struct wx *wx, bool queues) +{ + u32 mask; + + /* enable misc interrupt */ + mask = NGBE_PX_MISC_IEN_MASK; + + wr32(wx, WX_GPIO_DDR, WX_GPIO_DDR_0); + wr32(wx, WX_GPIO_INTEN, WX_GPIO_INTEN_0 | WX_GPIO_INTEN_1); + wr32(wx, WX_GPIO_INTTYPE_LEVEL, 0x0); + wr32(wx, WX_GPIO_POLARITY, wx->gpio_ctrl ? 0 : 0x3); + + wr32(wx, WX_PX_MISC_IEN, mask); + + /* mask interrupt */ + if (queues) + wx_intr_enable(wx, NGBE_INTR_ALL); + else + wx_intr_enable(wx, NGBE_INTR_MISC(wx)); +} + +/** + * ngbe_intr - msi/legacy mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t ngbe_intr(int __always_unused irq, void *data) +{ + struct wx_q_vector *q_vector; + struct wx *wx = data; + struct pci_dev *pdev; + u32 eicr; + + q_vector = wx->q_vector[0]; + pdev = wx->pdev; + + eicr = wx_misc_isb(wx, WX_ISB_VEC0); + if (!eicr) { + /* shared interrupt alert! + * the interrupt that we masked before the EICR read. + */ + if (netif_running(wx->netdev)) + ngbe_irq_enable(wx, true); + return IRQ_NONE; /* Not our interrupt */ + } + wx->isb_mem[WX_ISB_VEC0] = 0; + if (!(pdev->msi_enabled)) + wr32(wx, WX_PX_INTA, 1); + + wx->isb_mem[WX_ISB_MISC] = 0; + /* would disable interrupts here but it is auto disabled */ + napi_schedule_irqoff(&q_vector->napi); + + if (netif_running(wx->netdev)) + ngbe_irq_enable(wx, false); + + return IRQ_HANDLED; +} + +static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data) +{ + struct wx *wx = data; + + /* re-enable the original interrupt state, no lsc, no queues */ + if (netif_running(wx->netdev)) + ngbe_irq_enable(wx, false); + + return IRQ_HANDLED; +} + +/** + * ngbe_request_msix_irqs - Initialize MSI-X interrupts + * @wx: board private structure + * + * ngbe_request_msix_irqs allocates MSI-X vectors and requests + * interrupts from the kernel. + **/ +static int ngbe_request_msix_irqs(struct wx *wx) +{ + struct net_device *netdev = wx->netdev; + int vector, err; + + for (vector = 0; vector < wx->num_q_vectors; vector++) { + struct wx_q_vector *q_vector = wx->q_vector[vector]; + struct msix_entry *entry = &wx->msix_entries[vector]; + + if (q_vector->tx.ring && q_vector->rx.ring) + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-TxRx-%d", netdev->name, entry->entry); + else + /* skip this unused q_vector */ + continue; + + err = request_irq(entry->vector, wx_msix_clean_rings, 0, + q_vector->name, q_vector); + if (err) { + wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n", + q_vector->name, err); + goto free_queue_irqs; + } + } + + err = request_irq(wx->msix_entries[vector].vector, + ngbe_msix_other, 0, netdev->name, wx); + + if (err) { + wx_err(wx, "request_irq for msix_other failed: %d\n", err); + goto free_queue_irqs; + } + + return 0; + +free_queue_irqs: + while (vector) { + vector--; + free_irq(wx->msix_entries[vector].vector, + wx->q_vector[vector]); + } + wx_reset_interrupt_capability(wx); + return err; +} + +/** + * ngbe_request_irq - initialize interrupts + * @wx: board private structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int ngbe_request_irq(struct wx *wx) +{ + struct net_device *netdev = wx->netdev; + struct pci_dev *pdev = wx->pdev; + int err; + + if (pdev->msix_enabled) + err = ngbe_request_msix_irqs(wx); + else if (pdev->msi_enabled) + err = request_irq(pdev->irq, ngbe_intr, 0, + netdev->name, wx); + else + err = request_irq(pdev->irq, ngbe_intr, IRQF_SHARED, + netdev->name, wx); + + if (err) + wx_err(wx, "request_irq failed, Error %d\n", err); + + return err; +} + +static void ngbe_disable_device(struct wx *wx) +{ + struct net_device *netdev = wx->netdev; + u32 i; + + /* disable all enabled rx queues */ + for (i = 0; i < wx->num_rx_queues; i++) + /* this call also flushes the previous write */ + wx_disable_rx_queue(wx, wx->rx_ring[i]); + /* disable receives */ + wx_disable_rx(wx); + wx_napi_disable_all(wx); + netif_tx_stop_all_queues(netdev); + netif_tx_disable(netdev); + if (wx->gpio_ctrl) + ngbe_sfp_modules_txrx_powerctl(wx, false); + wx_irq_disable(wx); + /* disable transmits in the hardware now that interrupts are off */ + for (i = 0; i < wx->num_tx_queues; i++) { + u8 reg_idx = wx->tx_ring[i]->reg_idx; + + wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH); + } +} + +static void ngbe_down(struct wx *wx) +{ + phy_stop(wx->phydev); + ngbe_disable_device(wx); + wx_clean_all_tx_rings(wx); + wx_clean_all_rx_rings(wx); +} + +static void ngbe_up(struct wx *wx) +{ + wx_configure_vectors(wx); + + /* make sure to complete pre-operations */ + smp_mb__before_atomic(); + wx_napi_enable_all(wx); + /* enable transmits */ + netif_tx_start_all_queues(wx->netdev); + + /* clear any pending interrupts, may auto mask */ + rd32(wx, WX_PX_IC(0)); + rd32(wx, WX_PX_MISC_IC); + ngbe_irq_enable(wx, true); + if (wx->gpio_ctrl) + ngbe_sfp_modules_txrx_powerctl(wx, true); + + phy_start(wx->phydev); +} + +/** + * ngbe_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). + **/ +static int ngbe_open(struct net_device *netdev) +{ + struct wx *wx = netdev_priv(netdev); + int err; + + wx_control_hw(wx, true); + + err = wx_setup_resources(wx); + if (err) + return err; + + wx_configure(wx); + + err = ngbe_request_irq(wx); + if (err) + goto err_free_resources; + + err = ngbe_phy_connect(wx); + if (err) + goto err_free_irq; + + err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues); + if (err) + goto err_dis_phy; + + err = netif_set_real_num_rx_queues(netdev, wx->num_rx_queues); + if (err) + goto err_dis_phy; + + ngbe_up(wx); + + return 0; +err_dis_phy: + phy_disconnect(wx->phydev); +err_free_irq: + wx_free_irq(wx); +err_free_resources: + wx_free_resources(wx); + return err; +} + +/** + * ngbe_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int ngbe_close(struct net_device *netdev) +{ + struct wx *wx = netdev_priv(netdev); + + ngbe_down(wx); + wx_free_irq(wx); + wx_free_resources(wx); + phy_disconnect(wx->phydev); + wx_control_hw(wx, false); + + return 0; +} + +static void ngbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct wx *wx = pci_get_drvdata(pdev); + struct net_device *netdev; + u32 wufc = wx->wol; + + netdev = wx->netdev; + rtnl_lock(); + netif_device_detach(netdev); + + if (netif_running(netdev)) + ngbe_close(netdev); + wx_clear_interrupt_scheme(wx); + rtnl_unlock(); + + if (wufc) { + wx_set_rx_mode(netdev); + wx_configure_rx(wx); + wr32(wx, NGBE_PSR_WKUP_CTL, wufc); + } else { + wr32(wx, NGBE_PSR_WKUP_CTL, 0); + } + pci_wake_from_d3(pdev, !!wufc); + *enable_wake = !!wufc; + wx_control_hw(wx, false); + + pci_disable_device(pdev); +} + +static void ngbe_shutdown(struct pci_dev *pdev) +{ + struct wx *wx = pci_get_drvdata(pdev); + bool wake; + + wake = !!wx->wol; + + ngbe_dev_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +static const struct net_device_ops ngbe_netdev_ops = { + .ndo_open = ngbe_open, + .ndo_stop = ngbe_close, + .ndo_change_mtu = wx_change_mtu, + .ndo_start_xmit = wx_xmit_frame, + .ndo_set_rx_mode = wx_set_rx_mode, + .ndo_set_features = wx_set_features, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = wx_set_mac, + .ndo_get_stats64 = wx_get_stats64, + .ndo_vlan_rx_add_vid = wx_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = wx_vlan_rx_kill_vid, +}; + +/** + * ngbe_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in ngbe_pci_tbl + * + * Returns 0 on success, negative on failure + * + * ngbe_probe initializes an wx identified by a pci_dev structure. + * The OS initialization, configuring of the wx private structure, + * and a hardware reset occur. + **/ +static int ngbe_probe(struct pci_dev *pdev, + const struct pci_device_id __always_unused *ent) +{ + struct net_device *netdev; + u32 e2rom_cksum_cap = 0; + struct wx *wx = NULL; + static int func_nums; + u16 e2rom_ver = 0; + u32 etrack_id = 0; + u32 saved_ver = 0; + int err; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_pci_disable_dev; + } + + err = pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM), + ngbe_driver_name); + if (err) { + dev_err(&pdev->dev, + "pci_request_selected_regions failed %d\n", err); + goto err_pci_disable_dev; + } + + pci_set_master(pdev); + + netdev = devm_alloc_etherdev_mqs(&pdev->dev, + sizeof(struct wx), + NGBE_MAX_TX_QUEUES, + NGBE_MAX_RX_QUEUES); + if (!netdev) { + err = -ENOMEM; + goto err_pci_release_regions; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + wx = netdev_priv(netdev); + wx->netdev = netdev; + wx->pdev = pdev; + wx->msg_enable = BIT(3) - 1; + + wx->hw_addr = devm_ioremap(&pdev->dev, + pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!wx->hw_addr) { + err = -EIO; + goto err_pci_release_regions; + } + + wx->driver_name = ngbe_driver_name; + ngbe_set_ethtool_ops(netdev); + netdev->netdev_ops = &ngbe_netdev_ops; + + netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_RXHASH | NETIF_F_RXCSUM; + netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_TSO_MANGLEID; + netdev->vlan_features |= netdev->features; + netdev->features |= NETIF_F_IPV6_CSUM | NETIF_F_VLAN_FEATURES; + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= netdev->features | NETIF_F_RXALL; + netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; + netdev->features |= NETIF_F_HIGHDMA; + netdev->hw_features |= NETIF_F_GRO; + netdev->features |= NETIF_F_GRO; + + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->priv_flags |= IFF_SUPP_NOFCS; + + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = WX_MAX_JUMBO_FRAME_SIZE - + (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); + + wx->bd_number = func_nums; + /* setup the private structure */ + err = ngbe_sw_init(wx); + if (err) + goto err_free_mac_table; + + /* check if flash load is done after hw power up */ + err = wx_check_flash_load(wx, NGBE_SPI_ILDR_STATUS_PERST); + if (err) + goto err_free_mac_table; + err = wx_check_flash_load(wx, NGBE_SPI_ILDR_STATUS_PWRRST); + if (err) + goto err_free_mac_table; + + err = wx_mng_present(wx); + if (err) { + dev_err(&pdev->dev, "Management capability is not present\n"); + goto err_free_mac_table; + } + + err = ngbe_reset_hw(wx); + if (err) { + dev_err(&pdev->dev, "HW Init failed: %d\n", err); + goto err_free_mac_table; + } + + if (wx->bus.func == 0) { + wr32(wx, NGBE_CALSUM_CAP_STATUS, 0x0); + wr32(wx, NGBE_EEPROM_VERSION_STORE_REG, 0x0); + } else { + e2rom_cksum_cap = rd32(wx, NGBE_CALSUM_CAP_STATUS); + saved_ver = rd32(wx, NGBE_EEPROM_VERSION_STORE_REG); + } + + wx_init_eeprom_params(wx); + if (wx->bus.func == 0 || e2rom_cksum_cap == 0) { + /* make sure the EEPROM is ready */ + err = ngbe_eeprom_chksum_hostif(wx); + if (err) { + dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); + err = -EIO; + goto err_free_mac_table; + } + } + + wx->wol = 0; + if (wx->wol_hw_supported) + wx->wol = NGBE_PSR_WKUP_CTL_MAG; + + netdev->wol_enabled = !!(wx->wol); + wr32(wx, NGBE_PSR_WKUP_CTL, wx->wol); + device_set_wakeup_enable(&pdev->dev, wx->wol); + + /* Save off EEPROM version number and Option Rom version which + * together make a unique identify for the eeprom + */ + if (saved_ver) { + etrack_id = saved_ver; + } else { + wx_read_ee_hostif(wx, + wx->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_H, + &e2rom_ver); + etrack_id = e2rom_ver << 16; + wx_read_ee_hostif(wx, + wx->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_L, + &e2rom_ver); + etrack_id |= e2rom_ver; + wr32(wx, NGBE_EEPROM_VERSION_STORE_REG, etrack_id); + } + snprintf(wx->eeprom_id, sizeof(wx->eeprom_id), + "0x%08x", etrack_id); + + eth_hw_addr_set(netdev, wx->mac.perm_addr); + wx_mac_set_default_filter(wx, wx->mac.perm_addr); + + err = wx_init_interrupt_scheme(wx); + if (err) + goto err_free_mac_table; + + /* phy Interface Configuration */ + err = ngbe_mdio_init(wx); + if (err) + goto err_clear_interrupt_scheme; + + err = register_netdev(netdev); + if (err) + goto err_register; + + pci_set_drvdata(pdev, wx); + + netif_info(wx, probe, netdev, + "PHY: %s, PBA No: Wang Xun GbE Family Controller\n", + wx->mac_type == em_mac_type_mdi ? "Internal" : "External"); + netif_info(wx, probe, netdev, "%pM\n", netdev->dev_addr); + + return 0; + +err_register: + wx_control_hw(wx, false); +err_clear_interrupt_scheme: + wx_clear_interrupt_scheme(wx); +err_free_mac_table: + kfree(wx->mac_table); +err_pci_release_regions: + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_disable_dev: + pci_disable_device(pdev); + return err; +} + +/** + * ngbe_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * ngbe_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void ngbe_remove(struct pci_dev *pdev) +{ + struct wx *wx = pci_get_drvdata(pdev); + struct net_device *netdev; + + netdev = wx->netdev; + unregister_netdev(netdev); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + + kfree(wx->mac_table); + wx_clear_interrupt_scheme(wx); + + pci_disable_device(pdev); +} + +static int ngbe_suspend(struct pci_dev *pdev, pm_message_t state) +{ + bool wake; + + ngbe_dev_shutdown(pdev, &wake); + device_set_wakeup_enable(&pdev->dev, wake); + + return 0; +} + +static int ngbe_resume(struct pci_dev *pdev) +{ + struct net_device *netdev; + struct wx *wx; + u32 err; + + wx = pci_get_drvdata(pdev); + netdev = wx->netdev; + + err = pci_enable_device_mem(pdev); + if (err) { + wx_err(wx, "Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + device_wakeup_disable(&pdev->dev); + + ngbe_reset_hw(wx); + rtnl_lock(); + err = wx_init_interrupt_scheme(wx); + if (!err && netif_running(netdev)) + err = ngbe_open(netdev); + if (!err) + netif_device_attach(netdev); + rtnl_unlock(); + + return 0; +} + +static struct pci_driver ngbe_driver = { + .name = ngbe_driver_name, + .id_table = ngbe_pci_tbl, + .probe = ngbe_probe, + .remove = ngbe_remove, + .suspend = ngbe_suspend, + .resume = ngbe_resume, + .shutdown = ngbe_shutdown, +}; + +module_pci_driver(ngbe_driver); + +MODULE_DEVICE_TABLE(pci, ngbe_pci_tbl); +MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@net-swift.com>"); +MODULE_DESCRIPTION("WangXun(R) Gigabit PCI Express Network Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c new file mode 100644 index 0000000000..591f5b7b6d --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c @@ -0,0 +1,286 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/ethtool.h> +#include <linux/iopoll.h> +#include <linux/pci.h> +#include <linux/phy.h> + +#include "../libwx/wx_type.h" +#include "../libwx/wx_hw.h" +#include "ngbe_type.h" +#include "ngbe_mdio.h" + +static int ngbe_phy_read_reg_internal(struct mii_bus *bus, int phy_addr, int regnum) +{ + struct wx *wx = bus->priv; + + if (phy_addr != 0) + return 0xffff; + return (u16)rd32(wx, NGBE_PHY_CONFIG(regnum)); +} + +static int ngbe_phy_write_reg_internal(struct mii_bus *bus, int phy_addr, int regnum, u16 value) +{ + struct wx *wx = bus->priv; + + if (phy_addr == 0) + wr32(wx, NGBE_PHY_CONFIG(regnum), value); + return 0; +} + +static int ngbe_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum) +{ + u32 command, val, device_type = 0; + struct wx *wx = bus->priv; + int ret; + + wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF); + /* setup and write the address cycle command */ + command = WX_MSCA_RA(regnum) | + WX_MSCA_PA(phy_addr) | + WX_MSCA_DA(device_type); + wr32(wx, WX_MSCA, command); + command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | + WX_MSCC_BUSY | + WX_MDIO_CLK(6); + wr32(wx, WX_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, + 100000, false, wx, WX_MSCC); + if (ret) { + wx_err(wx, "Mdio read c22 command did not complete.\n"); + return ret; + } + + return (u16)rd32(wx, WX_MSCC); +} + +static int ngbe_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value) +{ + u32 command, val, device_type = 0; + struct wx *wx = bus->priv; + int ret; + + wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF); + /* setup and write the address cycle command */ + command = WX_MSCA_RA(regnum) | + WX_MSCA_PA(phy_addr) | + WX_MSCA_DA(device_type); + wr32(wx, WX_MSCA, command); + command = value | + WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | + WX_MSCC_BUSY | + WX_MDIO_CLK(6); + wr32(wx, WX_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, + 100000, false, wx, WX_MSCC); + if (ret) + wx_err(wx, "Mdio write c22 command did not complete.\n"); + + return ret; +} + +static int ngbe_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum) +{ + struct wx *wx = bus->priv; + u32 val, command; + int ret; + + wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0); + /* setup and write the address cycle command */ + command = WX_MSCA_RA(regnum) | + WX_MSCA_PA(phy_addr) | + WX_MSCA_DA(devnum); + wr32(wx, WX_MSCA, command); + command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | + WX_MSCC_BUSY | + WX_MDIO_CLK(6); + wr32(wx, WX_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, + 100000, false, wx, WX_MSCC); + if (ret) { + wx_err(wx, "Mdio read c45 command did not complete.\n"); + return ret; + } + + return (u16)rd32(wx, WX_MSCC); +} + +static int ngbe_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr, + int devnum, int regnum, u16 value) +{ + struct wx *wx = bus->priv; + int ret, command; + u16 val; + + wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0); + /* setup and write the address cycle command */ + command = WX_MSCA_RA(regnum) | + WX_MSCA_PA(phy_addr) | + WX_MSCA_DA(devnum); + wr32(wx, WX_MSCA, command); + command = value | + WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | + WX_MSCC_BUSY | + WX_MDIO_CLK(6); + wr32(wx, WX_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, + 100000, false, wx, WX_MSCC); + if (ret) + wx_err(wx, "Mdio write c45 command did not complete.\n"); + + return ret; +} + +static int ngbe_phy_read_reg_c22(struct mii_bus *bus, int phy_addr, int regnum) +{ + struct wx *wx = bus->priv; + u16 phy_data; + + if (wx->mac_type == em_mac_type_mdi) + phy_data = ngbe_phy_read_reg_internal(bus, phy_addr, regnum); + else + phy_data = ngbe_phy_read_reg_mdi_c22(bus, phy_addr, regnum); + + return phy_data; +} + +static int ngbe_phy_write_reg_c22(struct mii_bus *bus, int phy_addr, + int regnum, u16 value) +{ + struct wx *wx = bus->priv; + int ret; + + if (wx->mac_type == em_mac_type_mdi) + ret = ngbe_phy_write_reg_internal(bus, phy_addr, regnum, value); + else + ret = ngbe_phy_write_reg_mdi_c22(bus, phy_addr, regnum, value); + + return ret; +} + +static void ngbe_handle_link_change(struct net_device *dev) +{ + struct wx *wx = netdev_priv(dev); + struct phy_device *phydev; + u32 lan_speed, reg; + + phydev = wx->phydev; + if (!(wx->link != phydev->link || + wx->speed != phydev->speed || + wx->duplex != phydev->duplex)) + return; + + wx->link = phydev->link; + wx->speed = phydev->speed; + wx->duplex = phydev->duplex; + switch (phydev->speed) { + case SPEED_10: + lan_speed = 0; + break; + case SPEED_100: + lan_speed = 1; + break; + case SPEED_1000: + default: + lan_speed = 2; + break; + } + wr32m(wx, NGBE_CFG_LAN_SPEED, 0x3, lan_speed); + + if (phydev->link) { + reg = rd32(wx, WX_MAC_TX_CFG); + reg &= ~WX_MAC_TX_CFG_SPEED_MASK; + reg |= WX_MAC_TX_CFG_SPEED_1G | WX_MAC_TX_CFG_TE; + wr32(wx, WX_MAC_TX_CFG, reg); + /* Re configure MAC RX */ + reg = rd32(wx, WX_MAC_RX_CFG); + wr32(wx, WX_MAC_RX_CFG, reg); + wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); + reg = rd32(wx, WX_MAC_WDG_TIMEOUT); + wr32(wx, WX_MAC_WDG_TIMEOUT, reg); + } + phy_print_status(phydev); +} + +int ngbe_phy_connect(struct wx *wx) +{ + int ret; + + ret = phy_connect_direct(wx->netdev, + wx->phydev, + ngbe_handle_link_change, + PHY_INTERFACE_MODE_RGMII_ID); + if (ret) { + wx_err(wx, "PHY connect failed.\n"); + return ret; + } + + return 0; +} + +static void ngbe_phy_fixup(struct wx *wx) +{ + struct phy_device *phydev = wx->phydev; + struct ethtool_eee eee; + + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + + phydev->mac_managed_pm = true; + if (wx->mac_type != em_mac_type_mdi) + return; + /* disable EEE, internal phy does not support eee */ + memset(&eee, 0, sizeof(eee)); + phy_ethtool_set_eee(phydev, &eee); +} + +int ngbe_mdio_init(struct wx *wx) +{ + struct pci_dev *pdev = wx->pdev; + struct mii_bus *mii_bus; + int ret; + + mii_bus = devm_mdiobus_alloc(&pdev->dev); + if (!mii_bus) + return -ENOMEM; + + mii_bus->name = "ngbe_mii_bus"; + mii_bus->read = ngbe_phy_read_reg_c22; + mii_bus->write = ngbe_phy_write_reg_c22; + mii_bus->phy_mask = GENMASK(31, 4); + mii_bus->parent = &pdev->dev; + mii_bus->priv = wx; + + if (wx->mac_type == em_mac_type_rgmii) { + mii_bus->read_c45 = ngbe_phy_read_reg_mdi_c45; + mii_bus->write_c45 = ngbe_phy_write_reg_mdi_c45; + } + + snprintf(mii_bus->id, MII_BUS_ID_SIZE, "ngbe-%x", pci_dev_id(pdev)); + ret = devm_mdiobus_register(&pdev->dev, mii_bus); + if (ret) + return ret; + + wx->phydev = phy_find_first(mii_bus); + if (!wx->phydev) + return -ENODEV; + + phy_attached_info(wx->phydev); + ngbe_phy_fixup(wx); + + wx->link = 0; + wx->speed = 0; + wx->duplex = 0; + + return 0; +} diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h new file mode 100644 index 0000000000..0a6400dd89 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. + */ + +#ifndef _NGBE_MDIO_H_ +#define _NGBE_MDIO_H_ + +int ngbe_phy_connect(struct wx *wx); +int ngbe_mdio_init(struct wx *wx); +#endif /* _NGBE_MDIO_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h new file mode 100644 index 0000000000..72c8cd2d55 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _NGBE_TYPE_H_ +#define _NGBE_TYPE_H_ + +#include <linux/types.h> +#include <linux/netdevice.h> + +/************ NGBE_register.h ************/ +/* Device IDs */ +#define NGBE_DEV_ID_EM_WX1860AL_W 0x0100 +#define NGBE_DEV_ID_EM_WX1860A2 0x0101 +#define NGBE_DEV_ID_EM_WX1860A2S 0x0102 +#define NGBE_DEV_ID_EM_WX1860A4 0x0103 +#define NGBE_DEV_ID_EM_WX1860A4S 0x0104 +#define NGBE_DEV_ID_EM_WX1860AL2 0x0105 +#define NGBE_DEV_ID_EM_WX1860AL2S 0x0106 +#define NGBE_DEV_ID_EM_WX1860AL4 0x0107 +#define NGBE_DEV_ID_EM_WX1860AL4S 0x0108 +#define NGBE_DEV_ID_EM_WX1860LC 0x0109 +#define NGBE_DEV_ID_EM_WX1860A1 0x010a +#define NGBE_DEV_ID_EM_WX1860A1L 0x010b + +/* Subsystem ID */ +#define NGBE_SUBID_M88E1512_SFP 0x0003 +#define NGBE_SUBID_OCP_CARD 0x0040 +#define NGBE_SUBID_LY_M88E1512_SFP 0x0050 +#define NGBE_SUBID_M88E1512_RJ45 0x0051 +#define NGBE_SUBID_M88E1512_MIX 0x0052 +#define NGBE_SUBID_YT8521S_SFP 0x0060 +#define NGBE_SUBID_INTERNAL_YT8521S_SFP 0x0061 +#define NGBE_SUBID_YT8521S_SFP_GPIO 0x0062 +#define NGBE_SUBID_INTERNAL_YT8521S_SFP_GPIO 0x0064 +#define NGBE_SUBID_LY_YT8521S_SFP 0x0070 +#define NGBE_SUBID_RGMII_FPGA 0x0080 + +#define NGBE_OEM_MASK 0x00FF + +#define NGBE_NCSI_SUP 0x8000 +#define NGBE_NCSI_MASK 0x8000 +#define NGBE_WOL_SUP 0x4000 +#define NGBE_WOL_MASK 0x4000 + +/**************** EM Registers ****************************/ +/* chip control Registers */ +#define NGBE_MIS_PRB_CTL 0x10010 +/* FMGR Registers */ +#define NGBE_SPI_ILDR_STATUS 0x10120 +#define NGBE_SPI_ILDR_STATUS_PERST BIT(0) /* PCIE_PERST is done */ +#define NGBE_SPI_ILDR_STATUS_PWRRST BIT(1) /* Power on reset is done */ + +/* Checksum and EEPROM pointers */ +#define NGBE_CALSUM_COMMAND 0xE9 +#define NGBE_CALSUM_CAP_STATUS 0x10224 +#define NGBE_EEPROM_VERSION_STORE_REG 0x1022C +#define NGBE_SAN_MAC_ADDR_PTR 0x18 +#define NGBE_DEVICE_CAPS 0x1C +#define NGBE_EEPROM_VERSION_L 0x1D +#define NGBE_EEPROM_VERSION_H 0x1E + +/* Media-dependent registers. */ +#define NGBE_MDIO_CLAUSE_SELECT 0x11220 + +/* GPIO Registers */ +#define NGBE_GPIO_DR 0x14800 +#define NGBE_GPIO_DDR 0x14804 +/*GPIO bit */ +#define NGBE_GPIO_DR_0 BIT(0) /* SDP0 Data Value */ +#define NGBE_GPIO_DR_1 BIT(1) /* SDP1 Data Value */ +#define NGBE_GPIO_DDR_0 BIT(0) /* SDP0 IO direction */ +#define NGBE_GPIO_DDR_1 BIT(1) /* SDP1 IO direction */ + +/* Extended Interrupt Enable Set */ +#define NGBE_PX_MISC_IEN_DEV_RST BIT(10) +#define NGBE_PX_MISC_IEN_ETH_LK BIT(18) +#define NGBE_PX_MISC_IEN_INT_ERR BIT(20) +#define NGBE_PX_MISC_IEN_GPIO BIT(26) +#define NGBE_PX_MISC_IEN_MASK ( \ + NGBE_PX_MISC_IEN_DEV_RST | \ + NGBE_PX_MISC_IEN_ETH_LK | \ + NGBE_PX_MISC_IEN_INT_ERR | \ + NGBE_PX_MISC_IEN_GPIO) + +#define NGBE_INTR_ALL 0x1FF +#define NGBE_INTR_MISC(A) BIT((A)->num_q_vectors) + +#define NGBE_PHY_CONFIG(reg_offset) (0x14000 + ((reg_offset) * 4)) +#define NGBE_CFG_LAN_SPEED 0x14440 +#define NGBE_CFG_PORT_ST 0x14404 + +/* Wake up registers */ +#define NGBE_PSR_WKUP_CTL 0x15B80 +/* Wake Up Filter Control Bit */ +#define NGBE_PSR_WKUP_CTL_LNKC BIT(0) /* Link Status Change Wakeup Enable*/ +#define NGBE_PSR_WKUP_CTL_MAG BIT(1) /* Magic Packet Wakeup Enable */ +#define NGBE_PSR_WKUP_CTL_EX BIT(2) /* Directed Exact Wakeup Enable */ +#define NGBE_PSR_WKUP_CTL_MC BIT(3) /* Directed Multicast Wakeup Enable*/ +#define NGBE_PSR_WKUP_CTL_BC BIT(4) /* Broadcast Wakeup Enable */ +#define NGBE_PSR_WKUP_CTL_ARP BIT(5) /* ARP Request Packet Wakeup Enable*/ +#define NGBE_PSR_WKUP_CTL_IPV4 BIT(6) /* Directed IPv4 Pkt Wakeup Enable */ +#define NGBE_PSR_WKUP_CTL_IPV6 BIT(7) /* Directed IPv6 Pkt Wakeup Enable */ + +#define NGBE_FW_EEPROM_CHECKSUM_CMD 0xE9 +#define NGBE_FW_NVM_DATA_OFFSET 3 +#define NGBE_FW_CMD_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */ +#define NGBE_FW_CMD_ST_PASS 0x80658383 +#define NGBE_FW_CMD_ST_FAIL 0x70657376 + +#define NGBE_MAX_FDIR_INDICES 7 + +#define NGBE_MAX_RX_QUEUES (NGBE_MAX_FDIR_INDICES + 1) +#define NGBE_MAX_TX_QUEUES (NGBE_MAX_FDIR_INDICES + 1) + +#define NGBE_ETH_LENGTH_OF_ADDRESS 6 +#define NGBE_MAX_MSIX_VECTORS 0x09 +#define NGBE_RAR_ENTRIES 32 +#define NGBE_RX_PB_SIZE 42 +#define NGBE_MC_TBL_SIZE 128 +#define NGBE_SP_VFT_TBL_SIZE 128 +#define NGBE_TDB_PB_SZ (20 * 1024) /* 160KB Packet Buffer */ + +/* TX/RX descriptor defines */ +#define NGBE_DEFAULT_TXD 512 /* default ring size */ +#define NGBE_DEFAULT_TX_WORK 256 +#define NGBE_MAX_TXD 8192 +#define NGBE_MIN_TXD 128 + +#define NGBE_DEFAULT_RXD 512 /* default ring size */ +#define NGBE_DEFAULT_RX_WORK 256 +#define NGBE_MAX_RXD 8192 +#define NGBE_MIN_RXD 128 + +extern char ngbe_driver_name[]; + +#endif /* _NGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile new file mode 100644 index 0000000000..7507f762ed --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/Makefile @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. +# +# Makefile for the Wangxun(R) 10GbE PCI Express ethernet driver +# + +obj-$(CONFIG_TXGBE) += txgbe.o + +txgbe-objs := txgbe_main.o \ + txgbe_hw.o \ + txgbe_phy.o \ + txgbe_ethtool.o diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c new file mode 100644 index 0000000000..859da11258 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/pci.h> +#include <linux/phylink.h> +#include <linux/netdevice.h> + +#include "../libwx/wx_ethtool.h" +#include "../libwx/wx_type.h" +#include "txgbe_type.h" +#include "txgbe_ethtool.h" + +static int txgbe_nway_reset(struct net_device *netdev) +{ + struct txgbe *txgbe = netdev_to_txgbe(netdev); + + return phylink_ethtool_nway_reset(txgbe->phylink); +} + +static int txgbe_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct txgbe *txgbe = netdev_to_txgbe(netdev); + + return phylink_ethtool_ksettings_get(txgbe->phylink, cmd); +} + +static int txgbe_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct txgbe *txgbe = netdev_to_txgbe(netdev); + + return phylink_ethtool_ksettings_set(txgbe->phylink, cmd); +} + +static const struct ethtool_ops txgbe_ethtool_ops = { + .get_drvinfo = wx_get_drvinfo, + .nway_reset = txgbe_nway_reset, + .get_link = ethtool_op_get_link, + .get_link_ksettings = txgbe_get_link_ksettings, + .set_link_ksettings = txgbe_set_link_ksettings, +}; + +void txgbe_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &txgbe_ethtool_ops; +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h new file mode 100644 index 0000000000..ace1b35710 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_ETHTOOL_H_ +#define _TXGBE_ETHTOOL_H_ + +void txgbe_set_ethtool_ops(struct net_device *netdev); + +#endif /* _TXGBE_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c new file mode 100644 index 0000000000..3727452502 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -0,0 +1,322 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/etherdevice.h> +#include <linux/if_ether.h> +#include <linux/string.h> +#include <linux/iopoll.h> +#include <linux/types.h> +#include <linux/pci.h> + +#include "../libwx/wx_type.h" +#include "../libwx/wx_hw.h" +#include "txgbe_type.h" +#include "txgbe_hw.h" + +/** + * txgbe_disable_sec_tx_path - Stops the transmit data path + * @wx: pointer to hardware structure + * + * Stops the transmit data path and waits for the HW to internally empty + * the tx security block + **/ +int txgbe_disable_sec_tx_path(struct wx *wx) +{ + int val; + + wr32m(wx, WX_TSC_CTL, WX_TSC_CTL_TX_DIS, WX_TSC_CTL_TX_DIS); + return read_poll_timeout(rd32, val, val & WX_TSC_ST_SECTX_RDY, + 1000, 20000, false, wx, WX_TSC_ST); +} + +/** + * txgbe_enable_sec_tx_path - Enables the transmit data path + * @wx: pointer to hardware structure + * + * Enables the transmit data path. + **/ +void txgbe_enable_sec_tx_path(struct wx *wx) +{ + wr32m(wx, WX_TSC_CTL, WX_TSC_CTL_TX_DIS, 0); + WX_WRITE_FLUSH(wx); +} + +/** + * txgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds + * @wx: pointer to hardware structure + * + * Inits the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +static void txgbe_init_thermal_sensor_thresh(struct wx *wx) +{ + struct wx_thermal_sensor_data *data = &wx->mac.sensor; + + memset(data, 0, sizeof(struct wx_thermal_sensor_data)); + + /* Only support thermal sensors attached to SP physical port 0 */ + if (wx->bus.func) + return; + + wr32(wx, TXGBE_TS_CTL, TXGBE_TS_CTL_EVAL_MD); + + wr32(wx, WX_TS_INT_EN, + WX_TS_INT_EN_ALARM_INT_EN | WX_TS_INT_EN_DALARM_INT_EN); + wr32(wx, WX_TS_EN, WX_TS_EN_ENA); + + data->alarm_thresh = 100; + wr32(wx, WX_TS_ALARM_THRE, 677); + data->dalarm_thresh = 90; + wr32(wx, WX_TS_DALARM_THRE, 614); +} + +/** + * txgbe_read_pba_string - Reads part number string from EEPROM + * @wx: pointer to hardware structure + * @pba_num: stores the part number string from the EEPROM + * @pba_num_size: part number string buffer length + * + * Reads the part number string from the EEPROM. + **/ +int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size) +{ + u16 pba_ptr, offset, length, data; + int ret_val; + + if (!pba_num) { + wx_err(wx, "PBA string buffer was null\n"); + return -EINVAL; + } + + ret_val = wx_read_ee_hostif(wx, + wx->eeprom.sw_region_offset + TXGBE_PBANUM0_PTR, + &data); + if (ret_val != 0) { + wx_err(wx, "NVM Read Error\n"); + return ret_val; + } + + ret_val = wx_read_ee_hostif(wx, + wx->eeprom.sw_region_offset + TXGBE_PBANUM1_PTR, + &pba_ptr); + if (ret_val != 0) { + wx_err(wx, "NVM Read Error\n"); + return ret_val; + } + + /* if data is not ptr guard the PBA must be in legacy format which + * means pba_ptr is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (data != TXGBE_PBANUM_PTR_GUARD) { + wx_err(wx, "NVM PBA number is not stored as string\n"); + + /* we will need 11 characters to store the PBA */ + if (pba_num_size < 11) { + wx_err(wx, "PBA string buffer too small\n"); + return -ENOMEM; + } + + /* extract hex string from data and pba_ptr */ + pba_num[0] = (data >> 12) & 0xF; + pba_num[1] = (data >> 8) & 0xF; + pba_num[2] = (data >> 4) & 0xF; + pba_num[3] = data & 0xF; + pba_num[4] = (pba_ptr >> 12) & 0xF; + pba_num[5] = (pba_ptr >> 8) & 0xF; + pba_num[6] = '-'; + pba_num[7] = 0; + pba_num[8] = (pba_ptr >> 4) & 0xF; + pba_num[9] = pba_ptr & 0xF; + + /* put a null character on the end of our string */ + pba_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (pba_num[offset] < 0xA) + pba_num[offset] += '0'; + else if (pba_num[offset] < 0x10) + pba_num[offset] += 'A' - 0xA; + } + + return 0; + } + + ret_val = wx_read_ee_hostif(wx, pba_ptr, &length); + if (ret_val != 0) { + wx_err(wx, "NVM Read Error\n"); + return ret_val; + } + + if (length == 0xFFFF || length == 0) { + wx_err(wx, "NVM PBA number section invalid length\n"); + return -EINVAL; + } + + /* check if pba_num buffer is big enough */ + if (pba_num_size < (((u32)length * 2) - 1)) { + wx_err(wx, "PBA string buffer too small\n"); + return -ENOMEM; + } + + /* trim pba length from start of string */ + pba_ptr++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = wx_read_ee_hostif(wx, pba_ptr + offset, &data); + if (ret_val != 0) { + wx_err(wx, "NVM Read Error\n"); + return ret_val; + } + pba_num[offset * 2] = (u8)(data >> 8); + pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); + } + pba_num[offset * 2] = '\0'; + + return 0; +} + +/** + * txgbe_calc_eeprom_checksum - Calculates and returns the checksum + * @wx: pointer to hardware structure + * @checksum: pointer to cheksum + * + * Returns a negative error code on error + **/ +static int txgbe_calc_eeprom_checksum(struct wx *wx, u16 *checksum) +{ + u16 *eeprom_ptrs = NULL; + u16 *local_buffer; + int status; + u16 i; + + wx_init_eeprom_params(wx); + + eeprom_ptrs = kvmalloc_array(TXGBE_EEPROM_LAST_WORD, sizeof(u16), + GFP_KERNEL); + if (!eeprom_ptrs) + return -ENOMEM; + /* Read pointer area */ + status = wx_read_ee_hostif_buffer(wx, 0, TXGBE_EEPROM_LAST_WORD, eeprom_ptrs); + if (status != 0) { + wx_err(wx, "Failed to read EEPROM image\n"); + kvfree(eeprom_ptrs); + return status; + } + local_buffer = eeprom_ptrs; + + for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++) + if (i != wx->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM) + *checksum += local_buffer[i]; + + if (eeprom_ptrs) + kvfree(eeprom_ptrs); + + *checksum = TXGBE_EEPROM_SUM - *checksum; + + return 0; +} + +/** + * txgbe_validate_eeprom_checksum - Validate EEPROM checksum + * @wx: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +int txgbe_validate_eeprom_checksum(struct wx *wx, u16 *checksum_val) +{ + u16 read_checksum = 0; + u16 checksum; + int status; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = wx_read_ee_hostif(wx, 0, &checksum); + if (status) { + wx_err(wx, "EEPROM read failed\n"); + return status; + } + + checksum = 0; + status = txgbe_calc_eeprom_checksum(wx, &checksum); + if (status != 0) + return status; + + status = wx_read_ee_hostif(wx, wx->eeprom.sw_region_offset + + TXGBE_EEPROM_CHECKSUM, &read_checksum); + if (status != 0) + return status; + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) { + status = -EIO; + wx_err(wx, "Invalid EEPROM checksum\n"); + } + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + + return status; +} + +static void txgbe_reset_misc(struct wx *wx) +{ + wx_reset_misc(wx); + txgbe_init_thermal_sensor_thresh(wx); +} + +/** + * txgbe_reset_hw - Perform hardware reset + * @wx: pointer to wx structure + * + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, perform a PHY reset, and perform a link (MAC) + * reset. + **/ +int txgbe_reset_hw(struct wx *wx) +{ + int status; + + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = wx_stop_adapter(wx); + if (status != 0) + return status; + + if (wx->media_type != sp_media_copper) { + u32 val; + + val = WX_MIS_RST_LAN_RST(wx->bus.func); + wr32(wx, WX_MIS_RST, val | rd32(wx, WX_MIS_RST)); + WX_WRITE_FLUSH(wx); + usleep_range(10, 100); + } + + status = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_LAN_SW_RST(wx->bus.func)); + if (status != 0) + return status; + + txgbe_reset_misc(wx); + + /* Store the permanent mac address */ + wx_get_mac_addr(wx, wx->mac.perm_addr); + + /* Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES; + wx_init_rx_addrs(wx); + + pci_set_master(wx->pdev); + + return 0; +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h new file mode 100644 index 0000000000..abc729eb18 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_HW_H_ +#define _TXGBE_HW_H_ + +int txgbe_disable_sec_tx_path(struct wx *wx); +void txgbe_enable_sec_tx_path(struct wx *wx); +int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size); +int txgbe_validate_eeprom_checksum(struct wx *wx, u16 *checksum_val); +int txgbe_reset_hw(struct wx *wx); + +#endif /* _TXGBE_HW_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c new file mode 100644 index 0000000000..d60c26ba0b --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -0,0 +1,803 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/types.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/string.h> +#include <linux/etherdevice.h> +#include <linux/phylink.h> +#include <net/ip.h> +#include <linux/if_vlan.h> + +#include "../libwx/wx_type.h" +#include "../libwx/wx_lib.h" +#include "../libwx/wx_hw.h" +#include "txgbe_type.h" +#include "txgbe_hw.h" +#include "txgbe_phy.h" +#include "txgbe_ethtool.h" + +char txgbe_driver_name[] = "txgbe"; + +/* txgbe_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id txgbe_pci_tbl[] = { + { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_SP1000), 0}, + { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_WX1820), 0}, + /* required last entry */ + { .device = 0 } +}; + +#define DEFAULT_DEBUG_LEVEL_SHIFT 3 + +static void txgbe_check_minimum_link(struct wx *wx) +{ + struct pci_dev *pdev; + + pdev = wx->pdev; + pcie_print_link_status(pdev); +} + +/** + * txgbe_enumerate_functions - Get the number of ports this device has + * @wx: wx structure + * + * This function enumerates the phsyical functions co-located on a single slot, + * in order to determine how many ports a device has. This is most useful in + * determining the required GT/s of PCIe bandwidth necessary for optimal + * performance. + **/ +static int txgbe_enumerate_functions(struct wx *wx) +{ + struct pci_dev *entry, *pdev = wx->pdev; + int physfns = 0; + + list_for_each_entry(entry, &pdev->bus->devices, bus_list) { + /* When the devices on the bus don't all match our device ID, + * we can't reliably determine the correct number of + * functions. This can occur if a function has been direct + * attached to a virtual machine using VT-d. + */ + if (entry->vendor != pdev->vendor || + entry->device != pdev->device) + return -EINVAL; + + physfns++; + } + + return physfns; +} + +/** + * txgbe_irq_enable - Enable default interrupt generation settings + * @wx: pointer to private structure + * @queues: enable irqs for queues + **/ +static void txgbe_irq_enable(struct wx *wx, bool queues) +{ + wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK); + + /* unmask interrupt */ + wx_intr_enable(wx, TXGBE_INTR_MISC(wx)); + if (queues) + wx_intr_enable(wx, TXGBE_INTR_QALL(wx)); +} + +/** + * txgbe_intr - msi/legacy mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t txgbe_intr(int __always_unused irq, void *data) +{ + struct wx_q_vector *q_vector; + struct wx *wx = data; + struct pci_dev *pdev; + u32 eicr; + + q_vector = wx->q_vector[0]; + pdev = wx->pdev; + + eicr = wx_misc_isb(wx, WX_ISB_VEC0); + if (!eicr) { + /* shared interrupt alert! + * the interrupt that we masked before the ICR read. + */ + if (netif_running(wx->netdev)) + txgbe_irq_enable(wx, true); + return IRQ_NONE; /* Not our interrupt */ + } + wx->isb_mem[WX_ISB_VEC0] = 0; + if (!(pdev->msi_enabled)) + wr32(wx, WX_PX_INTA, 1); + + wx->isb_mem[WX_ISB_MISC] = 0; + /* would disable interrupts here but it is auto disabled */ + napi_schedule_irqoff(&q_vector->napi); + + /* re-enable link(maybe) and non-queue interrupts, no flush. + * txgbe_poll will re-enable the queue interrupts + */ + if (netif_running(wx->netdev)) + txgbe_irq_enable(wx, false); + + return IRQ_HANDLED; +} + +/** + * txgbe_request_msix_irqs - Initialize MSI-X interrupts + * @wx: board private structure + * + * Allocate MSI-X vectors and request interrupts from the kernel. + **/ +static int txgbe_request_msix_irqs(struct wx *wx) +{ + struct net_device *netdev = wx->netdev; + int vector, err; + + for (vector = 0; vector < wx->num_q_vectors; vector++) { + struct wx_q_vector *q_vector = wx->q_vector[vector]; + struct msix_entry *entry = &wx->msix_entries[vector]; + + if (q_vector->tx.ring && q_vector->rx.ring) + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-TxRx-%d", netdev->name, entry->entry); + else + /* skip this unused q_vector */ + continue; + + err = request_irq(entry->vector, wx_msix_clean_rings, 0, + q_vector->name, q_vector); + if (err) { + wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n", + q_vector->name, err); + goto free_queue_irqs; + } + } + + return 0; + +free_queue_irqs: + while (vector) { + vector--; + free_irq(wx->msix_entries[vector].vector, + wx->q_vector[vector]); + } + wx_reset_interrupt_capability(wx); + return err; +} + +/** + * txgbe_request_irq - initialize interrupts + * @wx: board private structure + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int txgbe_request_irq(struct wx *wx) +{ + struct net_device *netdev = wx->netdev; + struct pci_dev *pdev = wx->pdev; + int err; + + if (pdev->msix_enabled) + err = txgbe_request_msix_irqs(wx); + else if (pdev->msi_enabled) + err = request_irq(wx->pdev->irq, &txgbe_intr, 0, + netdev->name, wx); + else + err = request_irq(wx->pdev->irq, &txgbe_intr, IRQF_SHARED, + netdev->name, wx); + + if (err) + wx_err(wx, "request_irq failed, Error %d\n", err); + + return err; +} + +static void txgbe_up_complete(struct wx *wx) +{ + struct net_device *netdev = wx->netdev; + struct txgbe *txgbe; + + wx_control_hw(wx, true); + wx_configure_vectors(wx); + + /* make sure to complete pre-operations */ + smp_mb__before_atomic(); + wx_napi_enable_all(wx); + + txgbe = netdev_to_txgbe(netdev); + phylink_start(txgbe->phylink); + + /* clear any pending interrupts, may auto mask */ + rd32(wx, WX_PX_IC(0)); + rd32(wx, WX_PX_IC(1)); + rd32(wx, WX_PX_MISC_IC); + txgbe_irq_enable(wx, true); + + /* enable transmits */ + netif_tx_start_all_queues(netdev); +} + +static void txgbe_reset(struct wx *wx) +{ + struct net_device *netdev = wx->netdev; + u8 old_addr[ETH_ALEN]; + int err; + + err = txgbe_reset_hw(wx); + if (err != 0) + wx_err(wx, "Hardware Error: %d\n", err); + + wx_start_hw(wx); + /* do not flush user set addresses */ + memcpy(old_addr, &wx->mac_table[0].addr, netdev->addr_len); + wx_flush_sw_mac_table(wx); + wx_mac_set_default_filter(wx, old_addr); +} + +static void txgbe_disable_device(struct wx *wx) +{ + struct net_device *netdev = wx->netdev; + u32 i; + + wx_disable_pcie_master(wx); + /* disable receives */ + wx_disable_rx(wx); + + /* disable all enabled rx queues */ + for (i = 0; i < wx->num_rx_queues; i++) + /* this call also flushes the previous write */ + wx_disable_rx_queue(wx, wx->rx_ring[i]); + + netif_tx_stop_all_queues(netdev); + netif_tx_disable(netdev); + + wx_irq_disable(wx); + wx_napi_disable_all(wx); + + if (wx->bus.func < 2) + wr32m(wx, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wx->bus.func), 0); + else + wx_err(wx, "%s: invalid bus lan id %d\n", + __func__, wx->bus.func); + + if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) || + ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) { + /* disable mac transmiter */ + wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); + } + + /* disable transmits in the hardware now that interrupts are off */ + for (i = 0; i < wx->num_tx_queues; i++) { + u8 reg_idx = wx->tx_ring[i]->reg_idx; + + wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH); + } + + /* Disable the Tx DMA engine */ + wr32m(wx, WX_TDM_CTL, WX_TDM_CTL_TE, 0); +} + +static void txgbe_down(struct wx *wx) +{ + struct txgbe *txgbe = netdev_to_txgbe(wx->netdev); + + txgbe_disable_device(wx); + txgbe_reset(wx); + phylink_stop(txgbe->phylink); + + wx_clean_all_tx_rings(wx); + wx_clean_all_rx_rings(wx); +} + +/** + * txgbe_init_type_code - Initialize the shared code + * @wx: pointer to hardware structure + **/ +static void txgbe_init_type_code(struct wx *wx) +{ + u8 device_type = wx->subsystem_device_id & 0xF0; + + switch (wx->device_id) { + case TXGBE_DEV_ID_SP1000: + case TXGBE_DEV_ID_WX1820: + wx->mac.type = wx_mac_sp; + break; + default: + wx->mac.type = wx_mac_unknown; + break; + } + + switch (device_type) { + case TXGBE_ID_SFP: + wx->media_type = sp_media_fiber; + break; + case TXGBE_ID_XAUI: + case TXGBE_ID_SGMII: + wx->media_type = sp_media_copper; + break; + case TXGBE_ID_KR_KX_KX4: + case TXGBE_ID_MAC_XAUI: + case TXGBE_ID_MAC_SGMII: + wx->media_type = sp_media_backplane; + break; + case TXGBE_ID_SFI_XAUI: + if (wx->bus.func == 0) + wx->media_type = sp_media_fiber; + else + wx->media_type = sp_media_copper; + break; + default: + wx->media_type = sp_media_unknown; + break; + } +} + +/** + * txgbe_sw_init - Initialize general software structures (struct wx) + * @wx: board private structure to initialize + **/ +static int txgbe_sw_init(struct wx *wx) +{ + u16 msix_count = 0; + int err; + + wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES; + wx->mac.max_tx_queues = TXGBE_SP_MAX_TX_QUEUES; + wx->mac.max_rx_queues = TXGBE_SP_MAX_RX_QUEUES; + wx->mac.mcft_size = TXGBE_SP_MC_TBL_SIZE; + wx->mac.vft_size = TXGBE_SP_VFT_TBL_SIZE; + wx->mac.rx_pb_size = TXGBE_SP_RX_PB_SIZE; + wx->mac.tx_pb_size = TXGBE_SP_TDB_PB_SZ; + + /* PCI config space info */ + err = wx_sw_init(wx); + if (err < 0) + return err; + + txgbe_init_type_code(wx); + + /* Set common capability flags and settings */ + wx->max_q_vectors = TXGBE_MAX_MSIX_VECTORS; + err = wx_get_pcie_msix_counts(wx, &msix_count, TXGBE_MAX_MSIX_VECTORS); + if (err) + wx_err(wx, "Do not support MSI-X\n"); + wx->mac.max_msix_vectors = msix_count; + + /* enable itr by default in dynamic mode */ + wx->rx_itr_setting = 1; + wx->tx_itr_setting = 1; + + /* set default ring sizes */ + wx->tx_ring_count = TXGBE_DEFAULT_TXD; + wx->rx_ring_count = TXGBE_DEFAULT_RXD; + + /* set default work limits */ + wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK; + wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK; + + return 0; +} + +/** + * txgbe_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). + **/ +static int txgbe_open(struct net_device *netdev) +{ + struct wx *wx = netdev_priv(netdev); + int err; + + err = wx_setup_resources(wx); + if (err) + goto err_reset; + + wx_configure(wx); + + err = txgbe_request_irq(wx); + if (err) + goto err_free_isb; + + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues); + if (err) + goto err_free_irq; + + err = netif_set_real_num_rx_queues(netdev, wx->num_rx_queues); + if (err) + goto err_free_irq; + + txgbe_up_complete(wx); + + return 0; + +err_free_irq: + wx_free_irq(wx); +err_free_isb: + wx_free_isb_resources(wx); +err_reset: + txgbe_reset(wx); + + return err; +} + +/** + * txgbe_close_suspend - actions necessary to both suspend and close flows + * @wx: the private wx struct + * + * This function should contain the necessary work common to both suspending + * and closing of the device. + */ +static void txgbe_close_suspend(struct wx *wx) +{ + txgbe_disable_device(wx); + wx_free_resources(wx); +} + +/** + * txgbe_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int txgbe_close(struct net_device *netdev) +{ + struct wx *wx = netdev_priv(netdev); + + txgbe_down(wx); + wx_free_irq(wx); + wx_free_resources(wx); + wx_control_hw(wx, false); + + return 0; +} + +static void txgbe_dev_shutdown(struct pci_dev *pdev) +{ + struct wx *wx = pci_get_drvdata(pdev); + struct net_device *netdev; + + netdev = wx->netdev; + netif_device_detach(netdev); + + rtnl_lock(); + if (netif_running(netdev)) + txgbe_close_suspend(wx); + rtnl_unlock(); + + wx_control_hw(wx, false); + + pci_disable_device(pdev); +} + +static void txgbe_shutdown(struct pci_dev *pdev) +{ + txgbe_dev_shutdown(pdev); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +static const struct net_device_ops txgbe_netdev_ops = { + .ndo_open = txgbe_open, + .ndo_stop = txgbe_close, + .ndo_change_mtu = wx_change_mtu, + .ndo_start_xmit = wx_xmit_frame, + .ndo_set_rx_mode = wx_set_rx_mode, + .ndo_set_features = wx_set_features, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = wx_set_mac, + .ndo_get_stats64 = wx_get_stats64, + .ndo_vlan_rx_add_vid = wx_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = wx_vlan_rx_kill_vid, +}; + +/** + * txgbe_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in txgbe_pci_tbl + * + * Returns 0 on success, negative on failure + * + * txgbe_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the wx private structure, + * and a hardware reset occur. + **/ +static int txgbe_probe(struct pci_dev *pdev, + const struct pci_device_id __always_unused *ent) +{ + struct net_device *netdev; + int err, expected_gts; + struct wx *wx = NULL; + struct txgbe *txgbe; + + u16 eeprom_verh = 0, eeprom_verl = 0, offset = 0; + u16 eeprom_cfg_blkh = 0, eeprom_cfg_blkl = 0; + u16 build = 0, major = 0, patch = 0; + u8 part_str[TXGBE_PBANUM_LENGTH]; + u32 etrack_id = 0; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_pci_disable_dev; + } + + err = pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM), + txgbe_driver_name); + if (err) { + dev_err(&pdev->dev, + "pci_request_selected_regions failed 0x%x\n", err); + goto err_pci_disable_dev; + } + + pci_set_master(pdev); + + netdev = devm_alloc_etherdev_mqs(&pdev->dev, + sizeof(struct wx), + TXGBE_MAX_TX_QUEUES, + TXGBE_MAX_RX_QUEUES); + if (!netdev) { + err = -ENOMEM; + goto err_pci_release_regions; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + wx = netdev_priv(netdev); + wx->netdev = netdev; + wx->pdev = pdev; + + wx->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + + wx->hw_addr = devm_ioremap(&pdev->dev, + pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!wx->hw_addr) { + err = -EIO; + goto err_pci_release_regions; + } + + wx->driver_name = txgbe_driver_name; + txgbe_set_ethtool_ops(netdev); + netdev->netdev_ops = &txgbe_netdev_ops; + + /* setup the private structure */ + err = txgbe_sw_init(wx); + if (err) + goto err_free_mac_table; + + /* check if flash load is done after hw power up */ + err = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_PERST); + if (err) + goto err_free_mac_table; + err = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_PWRRST); + if (err) + goto err_free_mac_table; + + err = wx_mng_present(wx); + if (err) { + dev_err(&pdev->dev, "Management capability is not present\n"); + goto err_free_mac_table; + } + + err = txgbe_reset_hw(wx); + if (err) { + dev_err(&pdev->dev, "HW Init failed: %d\n", err); + goto err_free_mac_table; + } + + netdev->features = NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXHASH | + NETIF_F_RXCSUM | + NETIF_F_HW_CSUM; + + netdev->gso_partial_features = NETIF_F_GSO_ENCAP_ALL; + netdev->features |= netdev->gso_partial_features; + netdev->features |= NETIF_F_SCTP_CRC; + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->hw_enc_features |= netdev->vlan_features; + netdev->features |= NETIF_F_VLAN_FEATURES; + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= netdev->features | NETIF_F_RXALL; + netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; + netdev->features |= NETIF_F_HIGHDMA; + netdev->hw_features |= NETIF_F_GRO; + netdev->features |= NETIF_F_GRO; + + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->priv_flags |= IFF_SUPP_NOFCS; + + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = WX_MAX_JUMBO_FRAME_SIZE - + (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); + + /* make sure the EEPROM is good */ + err = txgbe_validate_eeprom_checksum(wx, NULL); + if (err != 0) { + dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); + wr32(wx, WX_MIS_RST, WX_MIS_RST_SW_RST); + err = -EIO; + goto err_free_mac_table; + } + + eth_hw_addr_set(netdev, wx->mac.perm_addr); + wx_mac_set_default_filter(wx, wx->mac.perm_addr); + + err = wx_init_interrupt_scheme(wx); + if (err) + goto err_free_mac_table; + + /* Save off EEPROM version number and Option Rom version which + * together make a unique identify for the eeprom + */ + wx_read_ee_hostif(wx, + wx->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_H, + &eeprom_verh); + wx_read_ee_hostif(wx, + wx->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_L, + &eeprom_verl); + etrack_id = (eeprom_verh << 16) | eeprom_verl; + + wx_read_ee_hostif(wx, + wx->eeprom.sw_region_offset + TXGBE_ISCSI_BOOT_CONFIG, + &offset); + + /* Make sure offset to SCSI block is valid */ + if (!(offset == 0x0) && !(offset == 0xffff)) { + wx_read_ee_hostif(wx, offset + 0x84, &eeprom_cfg_blkh); + wx_read_ee_hostif(wx, offset + 0x83, &eeprom_cfg_blkl); + + /* Only display Option Rom if exist */ + if (eeprom_cfg_blkl && eeprom_cfg_blkh) { + major = eeprom_cfg_blkl >> 8; + build = (eeprom_cfg_blkl << 8) | (eeprom_cfg_blkh >> 8); + patch = eeprom_cfg_blkh & 0x00ff; + + snprintf(wx->eeprom_id, sizeof(wx->eeprom_id), + "0x%08x, %d.%d.%d", etrack_id, major, build, + patch); + } else { + snprintf(wx->eeprom_id, sizeof(wx->eeprom_id), + "0x%08x", etrack_id); + } + } else { + snprintf(wx->eeprom_id, sizeof(wx->eeprom_id), + "0x%08x", etrack_id); + } + + if (etrack_id < 0x20010) + dev_warn(&pdev->dev, "Please upgrade the firmware to 0x20010 or above.\n"); + + txgbe = devm_kzalloc(&pdev->dev, sizeof(*txgbe), GFP_KERNEL); + if (!txgbe) { + err = -ENOMEM; + goto err_release_hw; + } + + txgbe->wx = wx; + wx->priv = txgbe; + + err = txgbe_init_phy(txgbe); + if (err) + goto err_release_hw; + + err = register_netdev(netdev); + if (err) + goto err_remove_phy; + + pci_set_drvdata(pdev, wx); + + netif_tx_stop_all_queues(netdev); + + /* calculate the expected PCIe bandwidth required for optimal + * performance. Note that some older parts will never have enough + * bandwidth due to being older generation PCIe parts. We clamp these + * parts to ensure that no warning is displayed, as this could confuse + * users otherwise. + */ + expected_gts = txgbe_enumerate_functions(wx) * 10; + + /* don't check link if we failed to enumerate functions */ + if (expected_gts > 0) + txgbe_check_minimum_link(wx); + else + dev_warn(&pdev->dev, "Failed to enumerate PF devices.\n"); + + /* First try to read PBA as a string */ + err = txgbe_read_pba_string(wx, part_str, TXGBE_PBANUM_LENGTH); + if (err) + strncpy(part_str, "Unknown", TXGBE_PBANUM_LENGTH); + + netif_info(wx, probe, netdev, "%pM\n", netdev->dev_addr); + + return 0; + +err_remove_phy: + txgbe_remove_phy(txgbe); +err_release_hw: + wx_clear_interrupt_scheme(wx); + wx_control_hw(wx, false); +err_free_mac_table: + kfree(wx->mac_table); +err_pci_release_regions: + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_disable_dev: + pci_disable_device(pdev); + return err; +} + +/** + * txgbe_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * txgbe_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void txgbe_remove(struct pci_dev *pdev) +{ + struct wx *wx = pci_get_drvdata(pdev); + struct txgbe *txgbe = wx->priv; + struct net_device *netdev; + + netdev = wx->netdev; + unregister_netdev(netdev); + + txgbe_remove_phy(txgbe); + + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + + kfree(wx->mac_table); + wx_clear_interrupt_scheme(wx); + + pci_disable_device(pdev); +} + +static struct pci_driver txgbe_driver = { + .name = txgbe_driver_name, + .id_table = txgbe_pci_tbl, + .probe = txgbe_probe, + .remove = txgbe_remove, + .shutdown = txgbe_shutdown, +}; + +module_pci_driver(txgbe_driver); + +MODULE_DEVICE_TABLE(pci, txgbe_pci_tbl); +MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@trustnetic.com>"); +MODULE_DESCRIPTION("WangXun(R) 10 Gigabit PCI Express Network Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c new file mode 100644 index 0000000000..4159c84035 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -0,0 +1,835 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/gpio/machine.h> +#include <linux/gpio/driver.h> +#include <linux/gpio/property.h> +#include <linux/clk-provider.h> +#include <linux/clkdev.h> +#include <linux/i2c.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/pcs/pcs-xpcs.h> +#include <linux/phylink.h> + +#include "../libwx/wx_type.h" +#include "../libwx/wx_lib.h" +#include "../libwx/wx_hw.h" +#include "txgbe_type.h" +#include "txgbe_phy.h" +#include "txgbe_hw.h" + +static int txgbe_swnodes_register(struct txgbe *txgbe) +{ + struct txgbe_nodes *nodes = &txgbe->nodes; + struct pci_dev *pdev = txgbe->wx->pdev; + struct software_node *swnodes; + u32 id; + + id = pci_dev_id(pdev); + + snprintf(nodes->gpio_name, sizeof(nodes->gpio_name), "txgbe_gpio-%x", id); + snprintf(nodes->i2c_name, sizeof(nodes->i2c_name), "txgbe_i2c-%x", id); + snprintf(nodes->sfp_name, sizeof(nodes->sfp_name), "txgbe_sfp-%x", id); + snprintf(nodes->phylink_name, sizeof(nodes->phylink_name), "txgbe_phylink-%x", id); + + swnodes = nodes->swnodes; + + /* GPIO 0: tx fault + * GPIO 1: tx disable + * GPIO 2: sfp module absent + * GPIO 3: rx signal lost + * GPIO 4: rate select, 1G(0) 10G(1) + * GPIO 5: rate select, 1G(0) 10G(1) + */ + nodes->gpio_props[0] = PROPERTY_ENTRY_STRING("pinctrl-names", "default"); + swnodes[SWNODE_GPIO] = NODE_PROP(nodes->gpio_name, nodes->gpio_props); + nodes->gpio0_ref[0] = SOFTWARE_NODE_REFERENCE(&swnodes[SWNODE_GPIO], 0, GPIO_ACTIVE_HIGH); + nodes->gpio1_ref[0] = SOFTWARE_NODE_REFERENCE(&swnodes[SWNODE_GPIO], 1, GPIO_ACTIVE_HIGH); + nodes->gpio2_ref[0] = SOFTWARE_NODE_REFERENCE(&swnodes[SWNODE_GPIO], 2, GPIO_ACTIVE_LOW); + nodes->gpio3_ref[0] = SOFTWARE_NODE_REFERENCE(&swnodes[SWNODE_GPIO], 3, GPIO_ACTIVE_HIGH); + nodes->gpio4_ref[0] = SOFTWARE_NODE_REFERENCE(&swnodes[SWNODE_GPIO], 4, GPIO_ACTIVE_HIGH); + nodes->gpio5_ref[0] = SOFTWARE_NODE_REFERENCE(&swnodes[SWNODE_GPIO], 5, GPIO_ACTIVE_HIGH); + + nodes->i2c_props[0] = PROPERTY_ENTRY_STRING("compatible", "snps,designware-i2c"); + nodes->i2c_props[1] = PROPERTY_ENTRY_BOOL("wx,i2c-snps-model"); + nodes->i2c_props[2] = PROPERTY_ENTRY_U32("clock-frequency", I2C_MAX_STANDARD_MODE_FREQ); + swnodes[SWNODE_I2C] = NODE_PROP(nodes->i2c_name, nodes->i2c_props); + nodes->i2c_ref[0] = SOFTWARE_NODE_REFERENCE(&swnodes[SWNODE_I2C]); + + nodes->sfp_props[0] = PROPERTY_ENTRY_STRING("compatible", "sff,sfp"); + nodes->sfp_props[1] = PROPERTY_ENTRY_REF_ARRAY("i2c-bus", nodes->i2c_ref); + nodes->sfp_props[2] = PROPERTY_ENTRY_REF_ARRAY("tx-fault-gpios", nodes->gpio0_ref); + nodes->sfp_props[3] = PROPERTY_ENTRY_REF_ARRAY("tx-disable-gpios", nodes->gpio1_ref); + nodes->sfp_props[4] = PROPERTY_ENTRY_REF_ARRAY("mod-def0-gpios", nodes->gpio2_ref); + nodes->sfp_props[5] = PROPERTY_ENTRY_REF_ARRAY("los-gpios", nodes->gpio3_ref); + nodes->sfp_props[6] = PROPERTY_ENTRY_REF_ARRAY("rate-select1-gpios", nodes->gpio4_ref); + nodes->sfp_props[7] = PROPERTY_ENTRY_REF_ARRAY("rate-select0-gpios", nodes->gpio5_ref); + swnodes[SWNODE_SFP] = NODE_PROP(nodes->sfp_name, nodes->sfp_props); + nodes->sfp_ref[0] = SOFTWARE_NODE_REFERENCE(&swnodes[SWNODE_SFP]); + + nodes->phylink_props[0] = PROPERTY_ENTRY_STRING("managed", "in-band-status"); + nodes->phylink_props[1] = PROPERTY_ENTRY_REF_ARRAY("sfp", nodes->sfp_ref); + swnodes[SWNODE_PHYLINK] = NODE_PROP(nodes->phylink_name, nodes->phylink_props); + + nodes->group[SWNODE_GPIO] = &swnodes[SWNODE_GPIO]; + nodes->group[SWNODE_I2C] = &swnodes[SWNODE_I2C]; + nodes->group[SWNODE_SFP] = &swnodes[SWNODE_SFP]; + nodes->group[SWNODE_PHYLINK] = &swnodes[SWNODE_PHYLINK]; + + return software_node_register_node_group(nodes->group); +} + +static int txgbe_pcs_read(struct mii_bus *bus, int addr, int devnum, int regnum) +{ + struct wx *wx = bus->priv; + u32 offset, val; + + if (addr) + return -EOPNOTSUPP; + + offset = devnum << 16 | regnum; + + /* Set the LAN port indicator to IDA_ADDR */ + wr32(wx, TXGBE_XPCS_IDA_ADDR, offset); + + /* Read the data from IDA_DATA register */ + val = rd32(wx, TXGBE_XPCS_IDA_DATA); + + return (u16)val; +} + +static int txgbe_pcs_write(struct mii_bus *bus, int addr, int devnum, int regnum, u16 val) +{ + struct wx *wx = bus->priv; + u32 offset; + + if (addr) + return -EOPNOTSUPP; + + offset = devnum << 16 | regnum; + + /* Set the LAN port indicator to IDA_ADDR */ + wr32(wx, TXGBE_XPCS_IDA_ADDR, offset); + + /* Write the data to IDA_DATA register */ + wr32(wx, TXGBE_XPCS_IDA_DATA, val); + + return 0; +} + +static int txgbe_mdio_pcs_init(struct txgbe *txgbe) +{ + struct mii_bus *mii_bus; + struct dw_xpcs *xpcs; + struct pci_dev *pdev; + struct wx *wx; + int ret = 0; + + wx = txgbe->wx; + pdev = wx->pdev; + + mii_bus = devm_mdiobus_alloc(&pdev->dev); + if (!mii_bus) + return -ENOMEM; + + mii_bus->name = "txgbe_pcs_mdio_bus"; + mii_bus->read_c45 = &txgbe_pcs_read; + mii_bus->write_c45 = &txgbe_pcs_write; + mii_bus->parent = &pdev->dev; + mii_bus->phy_mask = ~0; + mii_bus->priv = wx; + snprintf(mii_bus->id, MII_BUS_ID_SIZE, "txgbe_pcs-%x", + pci_dev_id(pdev)); + + ret = devm_mdiobus_register(&pdev->dev, mii_bus); + if (ret) + return ret; + + xpcs = xpcs_create_mdiodev(mii_bus, 0, PHY_INTERFACE_MODE_10GBASER); + if (IS_ERR(xpcs)) + return PTR_ERR(xpcs); + + txgbe->xpcs = xpcs; + + return 0; +} + +static struct phylink_pcs *txgbe_phylink_mac_select(struct phylink_config *config, + phy_interface_t interface) +{ + struct txgbe *txgbe = netdev_to_txgbe(to_net_dev(config->dev)); + + if (interface == PHY_INTERFACE_MODE_10GBASER) + return &txgbe->xpcs->pcs; + + return NULL; +} + +static void txgbe_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ +} + +static void txgbe_mac_link_down(struct phylink_config *config, + unsigned int mode, phy_interface_t interface) +{ + struct wx *wx = netdev_priv(to_net_dev(config->dev)); + + wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); +} + +static void txgbe_mac_link_up(struct phylink_config *config, + struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct wx *wx = netdev_priv(to_net_dev(config->dev)); + u32 txcfg, wdg; + + txcfg = rd32(wx, WX_MAC_TX_CFG); + txcfg &= ~WX_MAC_TX_CFG_SPEED_MASK; + + switch (speed) { + case SPEED_10000: + txcfg |= WX_MAC_TX_CFG_SPEED_10G; + break; + case SPEED_1000: + case SPEED_100: + case SPEED_10: + txcfg |= WX_MAC_TX_CFG_SPEED_1G; + break; + default: + break; + } + + wr32(wx, WX_MAC_TX_CFG, txcfg | WX_MAC_TX_CFG_TE); + + /* Re configure MAC Rx */ + wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE); + wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); + wdg = rd32(wx, WX_MAC_WDG_TIMEOUT); + wr32(wx, WX_MAC_WDG_TIMEOUT, wdg); +} + +static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct wx *wx = netdev_priv(to_net_dev(config->dev)); + + wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); + wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, 0); + + return txgbe_disable_sec_tx_path(wx); +} + +static int txgbe_mac_finish(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct wx *wx = netdev_priv(to_net_dev(config->dev)); + + txgbe_enable_sec_tx_path(wx); + wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE); + + return 0; +} + +static const struct phylink_mac_ops txgbe_mac_ops = { + .mac_select_pcs = txgbe_phylink_mac_select, + .mac_prepare = txgbe_mac_prepare, + .mac_finish = txgbe_mac_finish, + .mac_config = txgbe_mac_config, + .mac_link_down = txgbe_mac_link_down, + .mac_link_up = txgbe_mac_link_up, +}; + +static int txgbe_phylink_init(struct txgbe *txgbe) +{ + struct fwnode_handle *fwnode = NULL; + struct phylink_config *config; + struct wx *wx = txgbe->wx; + phy_interface_t phy_mode; + struct phylink *phylink; + + config = devm_kzalloc(&wx->pdev->dev, sizeof(*config), GFP_KERNEL); + if (!config) + return -ENOMEM; + + config->dev = &wx->netdev->dev; + config->type = PHYLINK_NETDEV; + config->mac_capabilities = MAC_10000FD | MAC_1000FD | MAC_100FD | + MAC_SYM_PAUSE | MAC_ASYM_PAUSE; + + if (wx->media_type == sp_media_copper) { + phy_mode = PHY_INTERFACE_MODE_XAUI; + __set_bit(PHY_INTERFACE_MODE_XAUI, config->supported_interfaces); + } else { + phy_mode = PHY_INTERFACE_MODE_10GBASER; + fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_PHYLINK]); + __set_bit(PHY_INTERFACE_MODE_10GBASER, config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_SGMII, config->supported_interfaces); + } + + phylink = phylink_create(config, fwnode, phy_mode, &txgbe_mac_ops); + if (IS_ERR(phylink)) + return PTR_ERR(phylink); + + if (wx->phydev) { + int ret; + + ret = phylink_connect_phy(phylink, wx->phydev); + if (ret) { + phylink_destroy(phylink); + return ret; + } + } + + txgbe->phylink = phylink; + + return 0; +} + +static int txgbe_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct wx *wx = gpiochip_get_data(chip); + int val; + + val = rd32m(wx, WX_GPIO_EXT, BIT(offset)); + + return !!(val & BIT(offset)); +} + +static int txgbe_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) +{ + struct wx *wx = gpiochip_get_data(chip); + u32 val; + + val = rd32(wx, WX_GPIO_DDR); + if (BIT(offset) & val) + return GPIO_LINE_DIRECTION_OUT; + + return GPIO_LINE_DIRECTION_IN; +} + +static int txgbe_gpio_direction_in(struct gpio_chip *chip, unsigned int offset) +{ + struct wx *wx = gpiochip_get_data(chip); + unsigned long flags; + + raw_spin_lock_irqsave(&wx->gpio_lock, flags); + wr32m(wx, WX_GPIO_DDR, BIT(offset), 0); + raw_spin_unlock_irqrestore(&wx->gpio_lock, flags); + + return 0; +} + +static int txgbe_gpio_direction_out(struct gpio_chip *chip, unsigned int offset, + int val) +{ + struct wx *wx = gpiochip_get_data(chip); + unsigned long flags; + u32 set; + + set = val ? BIT(offset) : 0; + + raw_spin_lock_irqsave(&wx->gpio_lock, flags); + wr32m(wx, WX_GPIO_DR, BIT(offset), set); + wr32m(wx, WX_GPIO_DDR, BIT(offset), BIT(offset)); + raw_spin_unlock_irqrestore(&wx->gpio_lock, flags); + + return 0; +} + +static void txgbe_gpio_irq_ack(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + struct wx *wx = gpiochip_get_data(gc); + unsigned long flags; + + raw_spin_lock_irqsave(&wx->gpio_lock, flags); + wr32(wx, WX_GPIO_EOI, BIT(hwirq)); + raw_spin_unlock_irqrestore(&wx->gpio_lock, flags); +} + +static void txgbe_gpio_irq_mask(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + struct wx *wx = gpiochip_get_data(gc); + unsigned long flags; + + gpiochip_disable_irq(gc, hwirq); + + raw_spin_lock_irqsave(&wx->gpio_lock, flags); + wr32m(wx, WX_GPIO_INTMASK, BIT(hwirq), BIT(hwirq)); + raw_spin_unlock_irqrestore(&wx->gpio_lock, flags); +} + +static void txgbe_gpio_irq_unmask(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + struct wx *wx = gpiochip_get_data(gc); + unsigned long flags; + + gpiochip_enable_irq(gc, hwirq); + + raw_spin_lock_irqsave(&wx->gpio_lock, flags); + wr32m(wx, WX_GPIO_INTMASK, BIT(hwirq), 0); + raw_spin_unlock_irqrestore(&wx->gpio_lock, flags); +} + +static void txgbe_toggle_trigger(struct gpio_chip *gc, unsigned int offset) +{ + struct wx *wx = gpiochip_get_data(gc); + u32 pol, val; + + pol = rd32(wx, WX_GPIO_POLARITY); + val = rd32(wx, WX_GPIO_EXT); + + if (val & BIT(offset)) + pol &= ~BIT(offset); + else + pol |= BIT(offset); + + wr32(wx, WX_GPIO_POLARITY, pol); +} + +static int txgbe_gpio_set_type(struct irq_data *d, unsigned int type) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + struct wx *wx = gpiochip_get_data(gc); + u32 level, polarity, mask; + unsigned long flags; + + mask = BIT(hwirq); + + if (type & IRQ_TYPE_LEVEL_MASK) { + level = 0; + irq_set_handler_locked(d, handle_level_irq); + } else { + level = mask; + irq_set_handler_locked(d, handle_edge_irq); + } + + if (type == IRQ_TYPE_EDGE_RISING || type == IRQ_TYPE_LEVEL_HIGH) + polarity = mask; + else + polarity = 0; + + raw_spin_lock_irqsave(&wx->gpio_lock, flags); + + wr32m(wx, WX_GPIO_INTEN, mask, mask); + wr32m(wx, WX_GPIO_INTTYPE_LEVEL, mask, level); + if (type == IRQ_TYPE_EDGE_BOTH) + txgbe_toggle_trigger(gc, hwirq); + else + wr32m(wx, WX_GPIO_POLARITY, mask, polarity); + + raw_spin_unlock_irqrestore(&wx->gpio_lock, flags); + + return 0; +} + +static const struct irq_chip txgbe_gpio_irq_chip = { + .name = "txgbe_gpio_irq", + .irq_ack = txgbe_gpio_irq_ack, + .irq_mask = txgbe_gpio_irq_mask, + .irq_unmask = txgbe_gpio_irq_unmask, + .irq_set_type = txgbe_gpio_set_type, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, +}; + +static void txgbe_irq_handler(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct wx *wx = irq_desc_get_handler_data(desc); + struct txgbe *txgbe = wx->priv; + irq_hw_number_t hwirq; + unsigned long gpioirq; + struct gpio_chip *gc; + unsigned long flags; + u32 eicr; + + eicr = wx_misc_isb(wx, WX_ISB_MISC); + + chained_irq_enter(chip, desc); + + gpioirq = rd32(wx, WX_GPIO_INTSTATUS); + + gc = txgbe->gpio; + for_each_set_bit(hwirq, &gpioirq, gc->ngpio) { + int gpio = irq_find_mapping(gc->irq.domain, hwirq); + u32 irq_type = irq_get_trigger_type(gpio); + + generic_handle_domain_irq(gc->irq.domain, hwirq); + + if ((irq_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { + raw_spin_lock_irqsave(&wx->gpio_lock, flags); + txgbe_toggle_trigger(gc, hwirq); + raw_spin_unlock_irqrestore(&wx->gpio_lock, flags); + } + } + + chained_irq_exit(chip, desc); + + if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN | + TXGBE_PX_MISC_ETH_AN)) { + u32 reg = rd32(wx, TXGBE_CFG_PORT_ST); + + phylink_mac_change(txgbe->phylink, !!(reg & TXGBE_CFG_PORT_ST_LINK_UP)); + } + + /* unmask interrupt */ + wx_intr_enable(wx, TXGBE_INTR_MISC(wx)); +} + +static int txgbe_gpio_init(struct txgbe *txgbe) +{ + struct gpio_irq_chip *girq; + struct gpio_chip *gc; + struct device *dev; + struct wx *wx; + int ret; + + wx = txgbe->wx; + dev = &wx->pdev->dev; + + raw_spin_lock_init(&wx->gpio_lock); + + gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL); + if (!gc) + return -ENOMEM; + + gc->label = devm_kasprintf(dev, GFP_KERNEL, "txgbe_gpio-%x", + pci_dev_id(wx->pdev)); + if (!gc->label) + return -ENOMEM; + + gc->base = -1; + gc->ngpio = 6; + gc->owner = THIS_MODULE; + gc->parent = dev; + gc->fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_GPIO]); + gc->get = txgbe_gpio_get; + gc->get_direction = txgbe_gpio_get_direction; + gc->direction_input = txgbe_gpio_direction_in; + gc->direction_output = txgbe_gpio_direction_out; + + girq = &gc->irq; + gpio_irq_chip_set_chip(girq, &txgbe_gpio_irq_chip); + girq->parent_handler = txgbe_irq_handler; + girq->parent_handler_data = wx; + girq->num_parents = 1; + girq->parents = devm_kcalloc(dev, girq->num_parents, + sizeof(*girq->parents), GFP_KERNEL); + if (!girq->parents) + return -ENOMEM; + girq->parents[0] = wx->msix_entries[wx->num_q_vectors].vector; + girq->default_type = IRQ_TYPE_NONE; + girq->handler = handle_bad_irq; + + ret = devm_gpiochip_add_data(dev, gc, wx); + if (ret) + return ret; + + txgbe->gpio = gc; + + return 0; +} + +static int txgbe_clock_register(struct txgbe *txgbe) +{ + struct pci_dev *pdev = txgbe->wx->pdev; + struct clk_lookup *clock; + char clk_name[32]; + struct clk *clk; + + snprintf(clk_name, sizeof(clk_name), "i2c_designware.%d", + pci_dev_id(pdev)); + + clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, 156250000); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + clock = clkdev_create(clk, NULL, clk_name); + if (!clock) { + clk_unregister(clk); + return -ENOMEM; + } + + txgbe->clk = clk; + txgbe->clock = clock; + + return 0; +} + +static int txgbe_i2c_read(void *context, unsigned int reg, unsigned int *val) +{ + struct wx *wx = context; + + *val = rd32(wx, reg + TXGBE_I2C_BASE); + + return 0; +} + +static int txgbe_i2c_write(void *context, unsigned int reg, unsigned int val) +{ + struct wx *wx = context; + + wr32(wx, reg + TXGBE_I2C_BASE, val); + + return 0; +} + +static const struct regmap_config i2c_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_read = txgbe_i2c_read, + .reg_write = txgbe_i2c_write, + .fast_io = true, +}; + +static int txgbe_i2c_register(struct txgbe *txgbe) +{ + struct platform_device_info info = {}; + struct platform_device *i2c_dev; + struct regmap *i2c_regmap; + struct pci_dev *pdev; + struct wx *wx; + + wx = txgbe->wx; + pdev = wx->pdev; + i2c_regmap = devm_regmap_init(&pdev->dev, NULL, wx, &i2c_regmap_config); + if (IS_ERR(i2c_regmap)) { + wx_err(wx, "failed to init I2C regmap\n"); + return PTR_ERR(i2c_regmap); + } + + info.parent = &pdev->dev; + info.fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_I2C]); + info.name = "i2c_designware"; + info.id = pci_dev_id(pdev); + + info.res = &DEFINE_RES_IRQ(pdev->irq); + info.num_res = 1; + i2c_dev = platform_device_register_full(&info); + if (IS_ERR(i2c_dev)) + return PTR_ERR(i2c_dev); + + txgbe->i2c_dev = i2c_dev; + + return 0; +} + +static int txgbe_sfp_register(struct txgbe *txgbe) +{ + struct pci_dev *pdev = txgbe->wx->pdev; + struct platform_device_info info = {}; + struct platform_device *sfp_dev; + + info.parent = &pdev->dev; + info.fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_SFP]); + info.name = "sfp"; + info.id = pci_dev_id(pdev); + sfp_dev = platform_device_register_full(&info); + if (IS_ERR(sfp_dev)) + return PTR_ERR(sfp_dev); + + txgbe->sfp_dev = sfp_dev; + + return 0; +} + +static int txgbe_phy_read(struct mii_bus *bus, int phy_addr, + int devnum, int regnum) +{ + struct wx *wx = bus->priv; + u32 val, command; + int ret; + + /* setup and write the address cycle command */ + command = WX_MSCA_RA(regnum) | + WX_MSCA_PA(phy_addr) | + WX_MSCA_DA(devnum); + wr32(wx, WX_MSCA, command); + + command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | WX_MSCC_BUSY; + wr32(wx, WX_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, + 100000, false, wx, WX_MSCC); + if (ret) { + wx_err(wx, "Mdio read c45 command did not complete.\n"); + return ret; + } + + return (u16)rd32(wx, WX_MSCC); +} + +static int txgbe_phy_write(struct mii_bus *bus, int phy_addr, + int devnum, int regnum, u16 value) +{ + struct wx *wx = bus->priv; + int ret, command; + u16 val; + + /* setup and write the address cycle command */ + command = WX_MSCA_RA(regnum) | + WX_MSCA_PA(phy_addr) | + WX_MSCA_DA(devnum); + wr32(wx, WX_MSCA, command); + + command = value | WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | WX_MSCC_BUSY; + wr32(wx, WX_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, + 100000, false, wx, WX_MSCC); + if (ret) + wx_err(wx, "Mdio write c45 command did not complete.\n"); + + return ret; +} + +static int txgbe_ext_phy_init(struct txgbe *txgbe) +{ + struct phy_device *phydev; + struct mii_bus *mii_bus; + struct pci_dev *pdev; + struct wx *wx; + int ret = 0; + + wx = txgbe->wx; + pdev = wx->pdev; + + mii_bus = devm_mdiobus_alloc(&pdev->dev); + if (!mii_bus) + return -ENOMEM; + + mii_bus->name = "txgbe_mii_bus"; + mii_bus->read_c45 = &txgbe_phy_read; + mii_bus->write_c45 = &txgbe_phy_write; + mii_bus->parent = &pdev->dev; + mii_bus->phy_mask = GENMASK(31, 1); + mii_bus->priv = wx; + snprintf(mii_bus->id, MII_BUS_ID_SIZE, "txgbe-%x", + (pdev->bus->number << 8) | pdev->devfn); + + ret = devm_mdiobus_register(&pdev->dev, mii_bus); + if (ret) { + wx_err(wx, "failed to register MDIO bus: %d\n", ret); + return ret; + } + + phydev = phy_find_first(mii_bus); + if (!phydev) { + wx_err(wx, "no PHY found\n"); + return -ENODEV; + } + + phy_attached_info(phydev); + + wx->link = 0; + wx->speed = 0; + wx->duplex = 0; + wx->phydev = phydev; + + ret = txgbe_phylink_init(txgbe); + if (ret) { + wx_err(wx, "failed to init phylink: %d\n", ret); + return ret; + } + + return 0; +} + +int txgbe_init_phy(struct txgbe *txgbe) +{ + int ret; + + if (txgbe->wx->media_type == sp_media_copper) + return txgbe_ext_phy_init(txgbe); + + ret = txgbe_swnodes_register(txgbe); + if (ret) { + wx_err(txgbe->wx, "failed to register software nodes\n"); + return ret; + } + + ret = txgbe_mdio_pcs_init(txgbe); + if (ret) { + wx_err(txgbe->wx, "failed to init mdio pcs: %d\n", ret); + goto err_unregister_swnode; + } + + ret = txgbe_phylink_init(txgbe); + if (ret) { + wx_err(txgbe->wx, "failed to init phylink\n"); + goto err_destroy_xpcs; + } + + ret = txgbe_gpio_init(txgbe); + if (ret) { + wx_err(txgbe->wx, "failed to init gpio\n"); + goto err_destroy_phylink; + } + + ret = txgbe_clock_register(txgbe); + if (ret) { + wx_err(txgbe->wx, "failed to register clock: %d\n", ret); + goto err_destroy_phylink; + } + + ret = txgbe_i2c_register(txgbe); + if (ret) { + wx_err(txgbe->wx, "failed to init i2c interface: %d\n", ret); + goto err_unregister_clk; + } + + ret = txgbe_sfp_register(txgbe); + if (ret) { + wx_err(txgbe->wx, "failed to register sfp\n"); + goto err_unregister_i2c; + } + + return 0; + +err_unregister_i2c: + platform_device_unregister(txgbe->i2c_dev); +err_unregister_clk: + clkdev_drop(txgbe->clock); + clk_unregister(txgbe->clk); +err_destroy_phylink: + phylink_destroy(txgbe->phylink); +err_destroy_xpcs: + xpcs_destroy(txgbe->xpcs); +err_unregister_swnode: + software_node_unregister_node_group(txgbe->nodes.group); + + return ret; +} + +void txgbe_remove_phy(struct txgbe *txgbe) +{ + if (txgbe->wx->media_type == sp_media_copper) { + phylink_disconnect_phy(txgbe->phylink); + phylink_destroy(txgbe->phylink); + return; + } + + platform_device_unregister(txgbe->sfp_dev); + platform_device_unregister(txgbe->i2c_dev); + clkdev_drop(txgbe->clock); + clk_unregister(txgbe->clk); + phylink_destroy(txgbe->phylink); + xpcs_destroy(txgbe->xpcs); + software_node_unregister_node_group(txgbe->nodes.group); +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h new file mode 100644 index 0000000000..1ab5921249 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_PHY_H_ +#define _TXGBE_PHY_H_ + +int txgbe_init_phy(struct txgbe *txgbe); +void txgbe_remove_phy(struct txgbe *txgbe); + +#endif /* _TXGBE_NODE_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h new file mode 100644 index 0000000000..51199c355f --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -0,0 +1,192 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_TYPE_H_ +#define _TXGBE_TYPE_H_ + +#include <linux/property.h> + +/* Device IDs */ +#define TXGBE_DEV_ID_SP1000 0x1001 +#define TXGBE_DEV_ID_WX1820 0x2001 + +/* Subsystem IDs */ +/* SFP */ +#define TXGBE_ID_SP1000_SFP 0x0000 +#define TXGBE_ID_WX1820_SFP 0x2000 +#define TXGBE_ID_SFP 0x00 + +/* copper */ +#define TXGBE_ID_SP1000_XAUI 0x1010 +#define TXGBE_ID_WX1820_XAUI 0x2010 +#define TXGBE_ID_XAUI 0x10 +#define TXGBE_ID_SP1000_SGMII 0x1020 +#define TXGBE_ID_WX1820_SGMII 0x2020 +#define TXGBE_ID_SGMII 0x20 +/* backplane */ +#define TXGBE_ID_SP1000_KR_KX_KX4 0x1030 +#define TXGBE_ID_WX1820_KR_KX_KX4 0x2030 +#define TXGBE_ID_KR_KX_KX4 0x30 +/* MAC Interface */ +#define TXGBE_ID_SP1000_MAC_XAUI 0x1040 +#define TXGBE_ID_WX1820_MAC_XAUI 0x2040 +#define TXGBE_ID_MAC_XAUI 0x40 +#define TXGBE_ID_SP1000_MAC_SGMII 0x1060 +#define TXGBE_ID_WX1820_MAC_SGMII 0x2060 +#define TXGBE_ID_MAC_SGMII 0x60 + +/* Combined interface*/ +#define TXGBE_ID_SFI_XAUI 0x50 + +/* Revision ID */ +#define TXGBE_SP_MPW 1 + +/**************** SP Registers ****************************/ +/* chip control Registers */ +#define TXGBE_MIS_PRB_CTL 0x10010 +#define TXGBE_MIS_PRB_CTL_LAN_UP(_i) BIT(1 - (_i)) +/* FMGR Registers */ +#define TXGBE_SPI_ILDR_STATUS 0x10120 +#define TXGBE_SPI_ILDR_STATUS_PERST BIT(0) /* PCIE_PERST is done */ +#define TXGBE_SPI_ILDR_STATUS_PWRRST BIT(1) /* Power on reset is done */ +#define TXGBE_SPI_ILDR_STATUS_LAN_SW_RST(_i) BIT((_i) + 9) /* lan soft reset done */ + +/* Sensors for PVT(Process Voltage Temperature) */ +#define TXGBE_TS_CTL 0x10300 +#define TXGBE_TS_CTL_EVAL_MD BIT(31) + +/* GPIO register bit */ +#define TXGBE_GPIOBIT_0 BIT(0) /* I:tx fault */ +#define TXGBE_GPIOBIT_1 BIT(1) /* O:tx disabled */ +#define TXGBE_GPIOBIT_2 BIT(2) /* I:sfp module absent */ +#define TXGBE_GPIOBIT_3 BIT(3) /* I:rx signal lost */ +#define TXGBE_GPIOBIT_4 BIT(4) /* O:rate select, 1G(0) 10G(1) */ +#define TXGBE_GPIOBIT_5 BIT(5) /* O:rate select, 1G(0) 10G(1) */ + +/* Extended Interrupt Enable Set */ +#define TXGBE_PX_MISC_ETH_LKDN BIT(8) +#define TXGBE_PX_MISC_DEV_RST BIT(10) +#define TXGBE_PX_MISC_ETH_EVENT BIT(17) +#define TXGBE_PX_MISC_ETH_LK BIT(18) +#define TXGBE_PX_MISC_ETH_AN BIT(19) +#define TXGBE_PX_MISC_INT_ERR BIT(20) +#define TXGBE_PX_MISC_GPIO BIT(26) +#define TXGBE_PX_MISC_IEN_MASK \ + (TXGBE_PX_MISC_ETH_LKDN | TXGBE_PX_MISC_DEV_RST | \ + TXGBE_PX_MISC_ETH_EVENT | TXGBE_PX_MISC_ETH_LK | \ + TXGBE_PX_MISC_ETH_AN | TXGBE_PX_MISC_INT_ERR | \ + TXGBE_PX_MISC_GPIO) + +/* Port cfg registers */ +#define TXGBE_CFG_PORT_ST 0x14404 +#define TXGBE_CFG_PORT_ST_LINK_UP BIT(0) + +/* I2C registers */ +#define TXGBE_I2C_BASE 0x14900 + +/************************************** ETH PHY ******************************/ +#define TXGBE_XPCS_IDA_ADDR 0x13000 +#define TXGBE_XPCS_IDA_DATA 0x13004 + +/* Part Number String Length */ +#define TXGBE_PBANUM_LENGTH 32 + +/* Checksum and EEPROM pointers */ +#define TXGBE_EEPROM_LAST_WORD 0x800 +#define TXGBE_EEPROM_CHECKSUM 0x2F +#define TXGBE_EEPROM_SUM 0xBABA +#define TXGBE_EEPROM_VERSION_L 0x1D +#define TXGBE_EEPROM_VERSION_H 0x1E +#define TXGBE_ISCSI_BOOT_CONFIG 0x07 +#define TXGBE_PBANUM0_PTR 0x05 +#define TXGBE_PBANUM1_PTR 0x06 +#define TXGBE_PBANUM_PTR_GUARD 0xFAFA + +#define TXGBE_MAX_MSIX_VECTORS 64 +#define TXGBE_MAX_FDIR_INDICES 63 + +#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) +#define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) + +#define TXGBE_SP_MAX_TX_QUEUES 128 +#define TXGBE_SP_MAX_RX_QUEUES 128 +#define TXGBE_SP_RAR_ENTRIES 128 +#define TXGBE_SP_MC_TBL_SIZE 128 +#define TXGBE_SP_VFT_TBL_SIZE 128 +#define TXGBE_SP_RX_PB_SIZE 512 +#define TXGBE_SP_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */ + +/* TX/RX descriptor defines */ +#define TXGBE_DEFAULT_TXD 512 +#define TXGBE_DEFAULT_TX_WORK 256 + +#if (PAGE_SIZE < 8192) +#define TXGBE_DEFAULT_RXD 512 +#define TXGBE_DEFAULT_RX_WORK 256 +#else +#define TXGBE_DEFAULT_RXD 256 +#define TXGBE_DEFAULT_RX_WORK 128 +#endif + +#define TXGBE_INTR_MISC(A) BIT((A)->num_q_vectors) +#define TXGBE_INTR_QALL(A) (TXGBE_INTR_MISC(A) - 1) + +#define TXGBE_MAX_EITR GENMASK(11, 3) + +extern char txgbe_driver_name[]; + +static inline struct txgbe *netdev_to_txgbe(struct net_device *netdev) +{ + struct wx *wx = netdev_priv(netdev); + + return wx->priv; +} + +#define NODE_PROP(_NAME, _PROP) \ + (const struct software_node) { \ + .name = _NAME, \ + .properties = _PROP, \ + } + +enum txgbe_swnodes { + SWNODE_GPIO = 0, + SWNODE_I2C, + SWNODE_SFP, + SWNODE_PHYLINK, + SWNODE_MAX +}; + +struct txgbe_nodes { + char gpio_name[32]; + char i2c_name[32]; + char sfp_name[32]; + char phylink_name[32]; + struct property_entry gpio_props[1]; + struct property_entry i2c_props[3]; + struct property_entry sfp_props[8]; + struct property_entry phylink_props[2]; + struct software_node_ref_args i2c_ref[1]; + struct software_node_ref_args gpio0_ref[1]; + struct software_node_ref_args gpio1_ref[1]; + struct software_node_ref_args gpio2_ref[1]; + struct software_node_ref_args gpio3_ref[1]; + struct software_node_ref_args gpio4_ref[1]; + struct software_node_ref_args gpio5_ref[1]; + struct software_node_ref_args sfp_ref[1]; + struct software_node swnodes[SWNODE_MAX]; + const struct software_node *group[SWNODE_MAX + 1]; +}; + +struct txgbe { + struct wx *wx; + struct txgbe_nodes nodes; + struct dw_xpcs *xpcs; + struct phylink *phylink; + struct platform_device *sfp_dev; + struct platform_device *i2c_dev; + struct clk_lookup *clock; + struct clk *clk; + struct gpio_chip *gpio; +}; + +#endif /* _TXGBE_TYPE_H_ */ |