diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
commit | 5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch) | |
tree | a94efe259b9009378be6d90eb30d2b019d95c194 /drivers/net/ethernet/marvell | |
parent | Initial commit. (diff) | |
download | linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip |
Adding upstream version 5.10.209.upstream/5.10.209
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/marvell')
75 files changed, 86181 insertions, 0 deletions
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig new file mode 100644 index 000000000..41815b609 --- /dev/null +++ b/drivers/net/ethernet/marvell/Kconfig @@ -0,0 +1,183 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Marvell device configuration +# + +config NET_VENDOR_MARVELL + bool "Marvell devices" + default y + depends on PCI || CPU_PXA168 || MV64X60 || PPC32 || PLAT_ORION || INET || COMPILE_TEST + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Marvell devices. If you say Y, you will be + asked for your specific card in the following questions. + +if NET_VENDOR_MARVELL + +config MV643XX_ETH + tristate "Marvell Discovery (643XX) and Orion ethernet support" + depends on MV64X60 || PPC32 || PLAT_ORION || COMPILE_TEST + depends on INET + select PHYLIB + select MVMDIO + help + This driver supports the gigabit ethernet MACs in the + Marvell Discovery PPC/MIPS chipset family (MV643XX) and + in the Marvell Orion ARM SoC family. + + Some boards that use the Discovery chipset are the Momenco + Ocelot C and Jaguar ATX and Pegasos II. + +config MVMDIO + tristate "Marvell MDIO interface support" + depends on HAS_IOMEM + select PHYLIB + help + This driver supports the MDIO interface found in the network + interface units of the Marvell EBU SoCs (Kirkwood, Orion5x, + Dove, Armada 370 and Armada XP). + + This driver is used by the MV643XX_ETH and MVNETA drivers. + +config MVNETA_BM_ENABLE + tristate "Marvell Armada 38x/XP network interface BM support" + depends on MVNETA + depends on !64BIT + help + This driver supports auxiliary block of the network + interface units in the Marvell ARMADA XP and ARMADA 38x SoC + family, which is called buffer manager. + + This driver, when enabled, strictly cooperates with mvneta + driver and is common for all network ports of the devices, + even for Armada 370 SoC, which doesn't support hardware + buffer management. + +config MVNETA + tristate "Marvell Armada 370/38x/XP/37xx network interface support" + depends on ARCH_MVEBU || COMPILE_TEST + select MVMDIO + select PHYLINK + select PAGE_POOL + help + This driver supports the network interface units in the + Marvell ARMADA XP, ARMADA 370, ARMADA 38x and + ARMADA 37xx SoC family. + + Note that this driver is distinct from the mv643xx_eth + driver, which should be used for the older Marvell SoCs + (Dove, Orion, Discovery, Kirkwood). + +config MVNETA_BM + tristate + depends on !64BIT + default y if MVNETA=y && MVNETA_BM_ENABLE!=n + default MVNETA_BM_ENABLE + select HWBM + select GENERIC_ALLOCATOR + help + MVNETA_BM must not be 'm' if MVNETA=y, so this symbol ensures + that all dependencies are met. + +config MVPP2 + tristate "Marvell Armada 375/7K/8K network interface support" + depends on ARCH_MVEBU || COMPILE_TEST + select MVMDIO + select PHYLINK + select PAGE_POOL + help + This driver supports the network interface units in the + Marvell ARMADA 375, 7K and 8K SoCs. + +config MVPP2_PTP + bool "Marvell Armada 8K Enable PTP support" + depends on NETWORK_PHY_TIMESTAMPING + depends on (PTP_1588_CLOCK = y && MVPP2 = y) || \ + (PTP_1588_CLOCK && MVPP2 = m) + +config PXA168_ETH + tristate "Marvell pxa168 ethernet support" + depends on HAS_IOMEM + depends on CPU_PXA168 || ARCH_BERLIN || COMPILE_TEST + select PHYLIB + help + This driver supports the pxa168 Ethernet ports. + + To compile this driver as a module, choose M here. The module + will be called pxa168_eth. + +config SKGE + tristate "Marvell Yukon Gigabit Ethernet support" + depends on PCI + select CRC32 + help + This driver support the Marvell Yukon or SysKonnect SK-98xx/SK-95xx + and related Gigabit Ethernet adapters. It is a new smaller driver + with better performance and more complete ethtool support. + + It does not support the link failover and network management + features that "portable" vendor supplied sk98lin driver does. + + This driver supports adapters based on the original Yukon chipset: + Marvell 88E8001, Belkin F5D5005, CNet GigaCard, DLink DGE-530T, + Linksys EG1032/EG1064, 3Com 3C940/3C940B, SysKonnect SK-9871/9872. + + It does not support the newer Yukon2 chipset: a separate driver, + sky2, is provided for these adapters. + + To compile this driver as a module, choose M here: the module + will be called skge. This is recommended. + +config SKGE_DEBUG + bool "Debugging interface" + depends on SKGE && DEBUG_FS + help + This option adds the ability to dump driver state for debugging. + The file /sys/kernel/debug/skge/ethX displays the state of the internal + transmit and receive rings. + + If unsure, say N. + +config SKGE_GENESIS + bool "Support for older SysKonnect Genesis boards" + depends on SKGE + help + This enables support for the older and uncommon SysKonnect Genesis + chips, which support MII via an external transceiver, instead of + an internal one. Disabling this option will save some memory + by making code smaller. If unsure say Y. + +config SKY2 + tristate "Marvell Yukon 2 support" + depends on PCI + select CRC32 + help + This driver supports Gigabit Ethernet adapters based on the + Marvell Yukon 2 chipset: + Marvell 88E8021/88E8022/88E8035/88E8036/88E8038/88E8050/88E8052/ + 88E8053/88E8055/88E8061/88E8062, SysKonnect SK-9E21D/SK-9S21 + + There is companion driver for the older Marvell Yukon and + SysKonnect Genesis based adapters: skge. + + To compile this driver as a module, choose M here: the module + will be called sky2. This is recommended. + +config SKY2_DEBUG + bool "Debugging interface" + depends on SKY2 && DEBUG_FS + help + This option adds the ability to dump driver state for debugging. + The file /sys/kernel/debug/sky2/ethX displays the state of the internal + transmit and receive rings. + + If unsure, say N. + + +source "drivers/net/ethernet/marvell/octeontx2/Kconfig" +source "drivers/net/ethernet/marvell/prestera/Kconfig" + +endif # NET_VENDOR_MARVELL diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile new file mode 100644 index 000000000..9f88fe822 --- /dev/null +++ b/drivers/net/ethernet/marvell/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Marvell device drivers. +# + +obj-$(CONFIG_MVMDIO) += mvmdio.o +obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o +obj-$(CONFIG_MVNETA_BM) += mvneta_bm.o +obj-$(CONFIG_MVNETA) += mvneta.o +obj-$(CONFIG_MVPP2) += mvpp2/ +obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o +obj-$(CONFIG_SKGE) += skge.o +obj-$(CONFIG_SKY2) += sky2.o +obj-y += octeontx2/ +obj-y += prestera/ diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c new file mode 100644 index 000000000..735b76eff --- /dev/null +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -0,0 +1,3291 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports + * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com> + * + * Based on the 64360 driver from: + * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il> + * Rabeeh Khoury <rabeeh@marvell.com> + * + * Copyright (C) 2003 PMC-Sierra, Inc., + * written by Manish Lachwani + * + * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org> + * + * Copyright (C) 2004-2006 MontaVista Software, Inc. + * Dale Farnsworth <dale@farnsworth.org> + * + * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com> + * <sjhill@realitydiluted.com> + * + * Copyright (C) 2007-2008 Marvell Semiconductor + * Lennert Buytenhek <buytenh@marvell.com> + * + * Copyright (C) 2013 Michael Stapelberg <michael@stapelberg.de> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/init.h> +#include <linux/dma-mapping.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <net/tso.h> +#include <linux/tcp.h> +#include <linux/udp.h> +#include <linux/etherdevice.h> +#include <linux/delay.h> +#include <linux/ethtool.h> +#include <linux/platform_device.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> +#include <linux/phy.h> +#include <linux/mv643xx_eth.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/clk.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_net.h> +#include <linux/of_mdio.h> + +static char mv643xx_eth_driver_name[] = "mv643xx_eth"; +static char mv643xx_eth_driver_version[] = "1.4"; + + +/* + * Registers shared between all ports. + */ +#define PHY_ADDR 0x0000 +#define WINDOW_BASE(w) (0x0200 + ((w) << 3)) +#define WINDOW_SIZE(w) (0x0204 + ((w) << 3)) +#define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2)) +#define WINDOW_BAR_ENABLE 0x0290 +#define WINDOW_PROTECT(w) (0x0294 + ((w) << 4)) + +/* + * Main per-port registers. These live at offset 0x0400 for + * port #0, 0x0800 for port #1, and 0x0c00 for port #2. + */ +#define PORT_CONFIG 0x0000 +#define UNICAST_PROMISCUOUS_MODE 0x00000001 +#define PORT_CONFIG_EXT 0x0004 +#define MAC_ADDR_LOW 0x0014 +#define MAC_ADDR_HIGH 0x0018 +#define SDMA_CONFIG 0x001c +#define TX_BURST_SIZE_16_64BIT 0x01000000 +#define TX_BURST_SIZE_4_64BIT 0x00800000 +#define BLM_TX_NO_SWAP 0x00000020 +#define BLM_RX_NO_SWAP 0x00000010 +#define RX_BURST_SIZE_16_64BIT 0x00000008 +#define RX_BURST_SIZE_4_64BIT 0x00000004 +#define PORT_SERIAL_CONTROL 0x003c +#define SET_MII_SPEED_TO_100 0x01000000 +#define SET_GMII_SPEED_TO_1000 0x00800000 +#define SET_FULL_DUPLEX_MODE 0x00200000 +#define MAX_RX_PACKET_9700BYTE 0x000a0000 +#define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000 +#define DO_NOT_FORCE_LINK_FAIL 0x00000400 +#define SERIAL_PORT_CONTROL_RESERVED 0x00000200 +#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008 +#define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004 +#define FORCE_LINK_PASS 0x00000002 +#define SERIAL_PORT_ENABLE 0x00000001 +#define PORT_STATUS 0x0044 +#define TX_FIFO_EMPTY 0x00000400 +#define TX_IN_PROGRESS 0x00000080 +#define PORT_SPEED_MASK 0x00000030 +#define PORT_SPEED_1000 0x00000010 +#define PORT_SPEED_100 0x00000020 +#define PORT_SPEED_10 0x00000000 +#define FLOW_CONTROL_ENABLED 0x00000008 +#define FULL_DUPLEX 0x00000004 +#define LINK_UP 0x00000002 +#define TXQ_COMMAND 0x0048 +#define TXQ_FIX_PRIO_CONF 0x004c +#define PORT_SERIAL_CONTROL1 0x004c +#define CLK125_BYPASS_EN 0x00000010 +#define TX_BW_RATE 0x0050 +#define TX_BW_MTU 0x0058 +#define TX_BW_BURST 0x005c +#define INT_CAUSE 0x0060 +#define INT_TX_END 0x07f80000 +#define INT_TX_END_0 0x00080000 +#define INT_RX 0x000003fc +#define INT_RX_0 0x00000004 +#define INT_EXT 0x00000002 +#define INT_CAUSE_EXT 0x0064 +#define INT_EXT_LINK_PHY 0x00110000 +#define INT_EXT_TX 0x000000ff +#define INT_MASK 0x0068 +#define INT_MASK_EXT 0x006c +#define TX_FIFO_URGENT_THRESHOLD 0x0074 +#define RX_DISCARD_FRAME_CNT 0x0084 +#define RX_OVERRUN_FRAME_CNT 0x0088 +#define TXQ_FIX_PRIO_CONF_MOVED 0x00dc +#define TX_BW_RATE_MOVED 0x00e0 +#define TX_BW_MTU_MOVED 0x00e8 +#define TX_BW_BURST_MOVED 0x00ec +#define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4)) +#define RXQ_COMMAND 0x0280 +#define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2)) +#define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4)) +#define TXQ_BW_CONF(q) (0x0304 + ((q) << 4)) +#define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4)) + +/* + * Misc per-port registers. + */ +#define MIB_COUNTERS(p) (0x1000 + ((p) << 7)) +#define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10)) +#define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10)) +#define UNICAST_TABLE(p) (0x1600 + ((p) << 10)) + + +/* + * SDMA configuration register default value. + */ +#if defined(__BIG_ENDIAN) +#define PORT_SDMA_CONFIG_DEFAULT_VALUE \ + (RX_BURST_SIZE_4_64BIT | \ + TX_BURST_SIZE_4_64BIT) +#elif defined(__LITTLE_ENDIAN) +#define PORT_SDMA_CONFIG_DEFAULT_VALUE \ + (RX_BURST_SIZE_4_64BIT | \ + BLM_RX_NO_SWAP | \ + BLM_TX_NO_SWAP | \ + TX_BURST_SIZE_4_64BIT) +#else +#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined +#endif + + +/* + * Misc definitions. + */ +#define DEFAULT_RX_QUEUE_SIZE 128 +#define DEFAULT_TX_QUEUE_SIZE 512 +#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) + +/* Max number of allowed TCP segments for software TSO */ +#define MV643XX_MAX_TSO_SEGS 100 +#define MV643XX_MAX_SKB_DESCS (MV643XX_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) + +#define IS_TSO_HEADER(txq, addr) \ + ((addr >= txq->tso_hdrs_dma) && \ + (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) + +#define DESC_DMA_MAP_SINGLE 0 +#define DESC_DMA_MAP_PAGE 1 + +/* + * RX/TX descriptors. + */ +#if defined(__BIG_ENDIAN) +struct rx_desc { + u16 byte_cnt; /* Descriptor buffer byte count */ + u16 buf_size; /* Buffer size */ + u32 cmd_sts; /* Descriptor command status */ + u32 next_desc_ptr; /* Next descriptor pointer */ + u32 buf_ptr; /* Descriptor buffer pointer */ +}; + +struct tx_desc { + u16 byte_cnt; /* buffer byte count */ + u16 l4i_chk; /* CPU provided TCP checksum */ + u32 cmd_sts; /* Command/status field */ + u32 next_desc_ptr; /* Pointer to next descriptor */ + u32 buf_ptr; /* pointer to buffer for this descriptor*/ +}; +#elif defined(__LITTLE_ENDIAN) +struct rx_desc { + u32 cmd_sts; /* Descriptor command status */ + u16 buf_size; /* Buffer size */ + u16 byte_cnt; /* Descriptor buffer byte count */ + u32 buf_ptr; /* Descriptor buffer pointer */ + u32 next_desc_ptr; /* Next descriptor pointer */ +}; + +struct tx_desc { + u32 cmd_sts; /* Command/status field */ + u16 l4i_chk; /* CPU provided TCP checksum */ + u16 byte_cnt; /* buffer byte count */ + u32 buf_ptr; /* pointer to buffer for this descriptor*/ + u32 next_desc_ptr; /* Pointer to next descriptor */ +}; +#else +#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined +#endif + +/* RX & TX descriptor command */ +#define BUFFER_OWNED_BY_DMA 0x80000000 + +/* RX & TX descriptor status */ +#define ERROR_SUMMARY 0x00000001 + +/* RX descriptor status */ +#define LAYER_4_CHECKSUM_OK 0x40000000 +#define RX_ENABLE_INTERRUPT 0x20000000 +#define RX_FIRST_DESC 0x08000000 +#define RX_LAST_DESC 0x04000000 +#define RX_IP_HDR_OK 0x02000000 +#define RX_PKT_IS_IPV4 0x01000000 +#define RX_PKT_IS_ETHERNETV2 0x00800000 +#define RX_PKT_LAYER4_TYPE_MASK 0x00600000 +#define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000 +#define RX_PKT_IS_VLAN_TAGGED 0x00080000 + +/* TX descriptor command */ +#define TX_ENABLE_INTERRUPT 0x00800000 +#define GEN_CRC 0x00400000 +#define TX_FIRST_DESC 0x00200000 +#define TX_LAST_DESC 0x00100000 +#define ZERO_PADDING 0x00080000 +#define GEN_IP_V4_CHECKSUM 0x00040000 +#define GEN_TCP_UDP_CHECKSUM 0x00020000 +#define UDP_FRAME 0x00010000 +#define MAC_HDR_EXTRA_4_BYTES 0x00008000 +#define GEN_TCP_UDP_CHK_FULL 0x00000400 +#define MAC_HDR_EXTRA_8_BYTES 0x00000200 + +#define TX_IHL_SHIFT 11 + + +/* global *******************************************************************/ +struct mv643xx_eth_shared_private { + /* + * Ethernet controller base address. + */ + void __iomem *base; + + /* + * Per-port MBUS window access register value. + */ + u32 win_protect; + + /* + * Hardware-specific parameters. + */ + int extended_rx_coal_limit; + int tx_bw_control; + int tx_csum_limit; + struct clk *clk; +}; + +#define TX_BW_CONTROL_ABSENT 0 +#define TX_BW_CONTROL_OLD_LAYOUT 1 +#define TX_BW_CONTROL_NEW_LAYOUT 2 + +static int mv643xx_eth_open(struct net_device *dev); +static int mv643xx_eth_stop(struct net_device *dev); + + +/* per-port *****************************************************************/ +struct mib_counters { + u64 good_octets_received; + u32 bad_octets_received; + u32 internal_mac_transmit_err; + u32 good_frames_received; + u32 bad_frames_received; + u32 broadcast_frames_received; + u32 multicast_frames_received; + u32 frames_64_octets; + u32 frames_65_to_127_octets; + u32 frames_128_to_255_octets; + u32 frames_256_to_511_octets; + u32 frames_512_to_1023_octets; + u32 frames_1024_to_max_octets; + u64 good_octets_sent; + u32 good_frames_sent; + u32 excessive_collision; + u32 multicast_frames_sent; + u32 broadcast_frames_sent; + u32 unrec_mac_control_received; + u32 fc_sent; + u32 good_fc_received; + u32 bad_fc_received; + u32 undersize_received; + u32 fragments_received; + u32 oversize_received; + u32 jabber_received; + u32 mac_receive_error; + u32 bad_crc_event; + u32 collision; + u32 late_collision; + /* Non MIB hardware counters */ + u32 rx_discard; + u32 rx_overrun; +}; + +struct rx_queue { + int index; + + int rx_ring_size; + + int rx_desc_count; + int rx_curr_desc; + int rx_used_desc; + + struct rx_desc *rx_desc_area; + dma_addr_t rx_desc_dma; + int rx_desc_area_size; + struct sk_buff **rx_skb; +}; + +struct tx_queue { + int index; + + int tx_ring_size; + + int tx_desc_count; + int tx_curr_desc; + int tx_used_desc; + + int tx_stop_threshold; + int tx_wake_threshold; + + char *tso_hdrs; + dma_addr_t tso_hdrs_dma; + + struct tx_desc *tx_desc_area; + char *tx_desc_mapping; /* array to track the type of the dma mapping */ + dma_addr_t tx_desc_dma; + int tx_desc_area_size; + + struct sk_buff_head tx_skb; + + unsigned long tx_packets; + unsigned long tx_bytes; + unsigned long tx_dropped; +}; + +struct mv643xx_eth_private { + struct mv643xx_eth_shared_private *shared; + void __iomem *base; + int port_num; + + struct net_device *dev; + + struct timer_list mib_counters_timer; + spinlock_t mib_counters_lock; + struct mib_counters mib_counters; + + struct work_struct tx_timeout_task; + + struct napi_struct napi; + u32 int_mask; + u8 oom; + u8 work_link; + u8 work_tx; + u8 work_tx_end; + u8 work_rx; + u8 work_rx_refill; + + int skb_size; + + /* + * RX state. + */ + int rx_ring_size; + unsigned long rx_desc_sram_addr; + int rx_desc_sram_size; + int rxq_count; + struct timer_list rx_oom; + struct rx_queue rxq[8]; + + /* + * TX state. + */ + int tx_ring_size; + unsigned long tx_desc_sram_addr; + int tx_desc_sram_size; + int txq_count; + struct tx_queue txq[8]; + + /* + * Hardware-specific parameters. + */ + struct clk *clk; + unsigned int t_clk; +}; + + +/* port register accessors **************************************************/ +static inline u32 rdl(struct mv643xx_eth_private *mp, int offset) +{ + return readl(mp->shared->base + offset); +} + +static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset) +{ + return readl(mp->base + offset); +} + +static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data) +{ + writel(data, mp->shared->base + offset); +} + +static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data) +{ + writel(data, mp->base + offset); +} + + +/* rxq/txq helper functions *************************************************/ +static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) +{ + return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]); +} + +static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) +{ + return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); +} + +static void rxq_enable(struct rx_queue *rxq) +{ + struct mv643xx_eth_private *mp = rxq_to_mp(rxq); + wrlp(mp, RXQ_COMMAND, 1 << rxq->index); +} + +static void rxq_disable(struct rx_queue *rxq) +{ + struct mv643xx_eth_private *mp = rxq_to_mp(rxq); + u8 mask = 1 << rxq->index; + + wrlp(mp, RXQ_COMMAND, mask << 8); + while (rdlp(mp, RXQ_COMMAND) & mask) + udelay(10); +} + +static void txq_reset_hw_ptr(struct tx_queue *txq) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + u32 addr; + + addr = (u32)txq->tx_desc_dma; + addr += txq->tx_curr_desc * sizeof(struct tx_desc); + wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr); +} + +static void txq_enable(struct tx_queue *txq) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + wrlp(mp, TXQ_COMMAND, 1 << txq->index); +} + +static void txq_disable(struct tx_queue *txq) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + u8 mask = 1 << txq->index; + + wrlp(mp, TXQ_COMMAND, mask << 8); + while (rdlp(mp, TXQ_COMMAND) & mask) + udelay(10); +} + +static void txq_maybe_wake(struct tx_queue *txq) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); + + if (netif_tx_queue_stopped(nq)) { + __netif_tx_lock(nq, smp_processor_id()); + if (txq->tx_desc_count <= txq->tx_wake_threshold) + netif_tx_wake_queue(nq); + __netif_tx_unlock(nq); + } +} + +static int rxq_process(struct rx_queue *rxq, int budget) +{ + struct mv643xx_eth_private *mp = rxq_to_mp(rxq); + struct net_device_stats *stats = &mp->dev->stats; + int rx; + + rx = 0; + while (rx < budget && rxq->rx_desc_count) { + struct rx_desc *rx_desc; + unsigned int cmd_sts; + struct sk_buff *skb; + u16 byte_cnt; + + rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc]; + + cmd_sts = rx_desc->cmd_sts; + if (cmd_sts & BUFFER_OWNED_BY_DMA) + break; + rmb(); + + skb = rxq->rx_skb[rxq->rx_curr_desc]; + rxq->rx_skb[rxq->rx_curr_desc] = NULL; + + rxq->rx_curr_desc++; + if (rxq->rx_curr_desc == rxq->rx_ring_size) + rxq->rx_curr_desc = 0; + + dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr, + rx_desc->buf_size, DMA_FROM_DEVICE); + rxq->rx_desc_count--; + rx++; + + mp->work_rx_refill |= 1 << rxq->index; + + byte_cnt = rx_desc->byte_cnt; + + /* + * Update statistics. + * + * Note that the descriptor byte count includes 2 dummy + * bytes automatically inserted by the hardware at the + * start of the packet (which we don't count), and a 4 + * byte CRC at the end of the packet (which we do count). + */ + stats->rx_packets++; + stats->rx_bytes += byte_cnt - 2; + + /* + * In case we received a packet without first / last bits + * on, or the error summary bit is set, the packet needs + * to be dropped. + */ + if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY)) + != (RX_FIRST_DESC | RX_LAST_DESC)) + goto err; + + /* + * The -4 is for the CRC in the trailer of the + * received packet + */ + skb_put(skb, byte_cnt - 2 - 4); + + if (cmd_sts & LAYER_4_CHECKSUM_OK) + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->protocol = eth_type_trans(skb, mp->dev); + + napi_gro_receive(&mp->napi, skb); + + continue; + +err: + stats->rx_dropped++; + + if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != + (RX_FIRST_DESC | RX_LAST_DESC)) { + if (net_ratelimit()) + netdev_err(mp->dev, + "received packet spanning multiple descriptors\n"); + } + + if (cmd_sts & ERROR_SUMMARY) + stats->rx_errors++; + + dev_kfree_skb(skb); + } + + if (rx < budget) + mp->work_rx &= ~(1 << rxq->index); + + return rx; +} + +static int rxq_refill(struct rx_queue *rxq, int budget) +{ + struct mv643xx_eth_private *mp = rxq_to_mp(rxq); + int refilled; + + refilled = 0; + while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) { + struct sk_buff *skb; + int rx; + struct rx_desc *rx_desc; + int size; + + skb = netdev_alloc_skb(mp->dev, mp->skb_size); + + if (skb == NULL) { + mp->oom = 1; + goto oom; + } + + if (SKB_DMA_REALIGN) + skb_reserve(skb, SKB_DMA_REALIGN); + + refilled++; + rxq->rx_desc_count++; + + rx = rxq->rx_used_desc++; + if (rxq->rx_used_desc == rxq->rx_ring_size) + rxq->rx_used_desc = 0; + + rx_desc = rxq->rx_desc_area + rx; + + size = skb_end_pointer(skb) - skb->data; + rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, + skb->data, size, + DMA_FROM_DEVICE); + rx_desc->buf_size = size; + rxq->rx_skb[rx] = skb; + wmb(); + rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT; + wmb(); + + /* + * The hardware automatically prepends 2 bytes of + * dummy data to each received packet, so that the + * IP header ends up 16-byte aligned. + */ + skb_reserve(skb, 2); + } + + if (refilled < budget) + mp->work_rx_refill &= ~(1 << rxq->index); + +oom: + return refilled; +} + + +/* tx ***********************************************************************/ +static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) +{ + int frag; + + for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { + const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; + + if (skb_frag_size(fragp) <= 8 && skb_frag_off(fragp) & 7) + return 1; + } + + return 0; +} + +static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb, + u16 *l4i_chk, u32 *command, int length) +{ + int ret; + u32 cmd = 0; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + int hdr_len; + int tag_bytes; + + BUG_ON(skb->protocol != htons(ETH_P_IP) && + skb->protocol != htons(ETH_P_8021Q)); + + hdr_len = (void *)ip_hdr(skb) - (void *)skb->data; + tag_bytes = hdr_len - ETH_HLEN; + + if (length - hdr_len > mp->shared->tx_csum_limit || + unlikely(tag_bytes & ~12)) { + ret = skb_checksum_help(skb); + if (!ret) + goto no_csum; + return ret; + } + + if (tag_bytes & 4) + cmd |= MAC_HDR_EXTRA_4_BYTES; + if (tag_bytes & 8) + cmd |= MAC_HDR_EXTRA_8_BYTES; + + cmd |= GEN_TCP_UDP_CHECKSUM | GEN_TCP_UDP_CHK_FULL | + GEN_IP_V4_CHECKSUM | + ip_hdr(skb)->ihl << TX_IHL_SHIFT; + + /* TODO: Revisit this. With the usage of GEN_TCP_UDP_CHK_FULL + * it seems we don't need to pass the initial checksum. */ + switch (ip_hdr(skb)->protocol) { + case IPPROTO_UDP: + cmd |= UDP_FRAME; + *l4i_chk = 0; + break; + case IPPROTO_TCP: + *l4i_chk = 0; + break; + default: + WARN(1, "protocol not supported"); + } + } else { +no_csum: + /* Errata BTS #50, IHL must be 5 if no HW checksum */ + cmd |= 5 << TX_IHL_SHIFT; + } + *command = cmd; + return 0; +} + +static inline int +txq_put_data_tso(struct net_device *dev, struct tx_queue *txq, + struct sk_buff *skb, char *data, int length, + bool last_tcp, bool is_last) +{ + int tx_index; + u32 cmd_sts; + struct tx_desc *desc; + + tx_index = txq->tx_curr_desc++; + if (txq->tx_curr_desc == txq->tx_ring_size) + txq->tx_curr_desc = 0; + desc = &txq->tx_desc_area[tx_index]; + txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; + + desc->l4i_chk = 0; + desc->byte_cnt = length; + + if (length <= 8 && (uintptr_t)data & 0x7) { + /* Copy unaligned small data fragment to TSO header data area */ + memcpy(txq->tso_hdrs + tx_index * TSO_HEADER_SIZE, + data, length); + desc->buf_ptr = txq->tso_hdrs_dma + + tx_index * TSO_HEADER_SIZE; + } else { + /* Alignment is okay, map buffer and hand off to hardware */ + txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; + desc->buf_ptr = dma_map_single(dev->dev.parent, data, + length, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev->dev.parent, + desc->buf_ptr))) { + WARN(1, "dma_map_single failed!\n"); + return -ENOMEM; + } + } + + cmd_sts = BUFFER_OWNED_BY_DMA; + if (last_tcp) { + /* last descriptor in the TCP packet */ + cmd_sts |= ZERO_PADDING | TX_LAST_DESC; + /* last descriptor in SKB */ + if (is_last) + cmd_sts |= TX_ENABLE_INTERRUPT; + } + desc->cmd_sts = cmd_sts; + return 0; +} + +static inline void +txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length, + u32 *first_cmd_sts, bool first_desc) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + int tx_index; + struct tx_desc *desc; + int ret; + u32 cmd_csum = 0; + u16 l4i_chk = 0; + u32 cmd_sts; + + tx_index = txq->tx_curr_desc; + desc = &txq->tx_desc_area[tx_index]; + + ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length); + if (ret) + WARN(1, "failed to prepare checksum!"); + + /* Should we set this? Can't use the value from skb_tx_csum() + * as it's not the correct initial L4 checksum to use. */ + desc->l4i_chk = 0; + + desc->byte_cnt = hdr_len; + desc->buf_ptr = txq->tso_hdrs_dma + + txq->tx_curr_desc * TSO_HEADER_SIZE; + cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC | + GEN_CRC; + + /* Defer updating the first command descriptor until all + * following descriptors have been written. + */ + if (first_desc) + *first_cmd_sts = cmd_sts; + else + desc->cmd_sts = cmd_sts; + + txq->tx_curr_desc++; + if (txq->tx_curr_desc == txq->tx_ring_size) + txq->tx_curr_desc = 0; +} + +static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb, + struct net_device *dev) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + int hdr_len, total_len, data_left, ret; + int desc_count = 0; + struct tso_t tso; + struct tx_desc *first_tx_desc; + u32 first_cmd_sts = 0; + + /* Count needed descriptors */ + if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) { + netdev_dbg(dev, "not enough descriptors for TSO!\n"); + return -EBUSY; + } + + first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc]; + + /* Initialize the TSO handler, and prepare the first payload */ + hdr_len = tso_start(skb, &tso); + + total_len = skb->len - hdr_len; + while (total_len > 0) { + bool first_desc = (desc_count == 0); + char *hdr; + + data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); + total_len -= data_left; + desc_count++; + + /* prepare packet headers: MAC + IP + TCP */ + hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE; + tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); + txq_put_hdr_tso(skb, txq, data_left, &first_cmd_sts, + first_desc); + + while (data_left > 0) { + int size; + desc_count++; + + size = min_t(int, tso.size, data_left); + ret = txq_put_data_tso(dev, txq, skb, tso.data, size, + size == data_left, + total_len == 0); + if (ret) + goto err_release; + data_left -= size; + tso_build_data(skb, &tso, size); + } + } + + __skb_queue_tail(&txq->tx_skb, skb); + skb_tx_timestamp(skb); + + /* ensure all other descriptors are written before first cmd_sts */ + wmb(); + first_tx_desc->cmd_sts = first_cmd_sts; + + /* clear TX_END status */ + mp->work_tx_end &= ~(1 << txq->index); + + /* ensure all descriptors are written before poking hardware */ + wmb(); + txq_enable(txq); + txq->tx_desc_count += desc_count; + return 0; +err_release: + /* TODO: Release all used data descriptors; header descriptors must not + * be DMA-unmapped. + */ + return ret; +} + +static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + int nr_frags = skb_shinfo(skb)->nr_frags; + int frag; + + for (frag = 0; frag < nr_frags; frag++) { + skb_frag_t *this_frag; + int tx_index; + struct tx_desc *desc; + + this_frag = &skb_shinfo(skb)->frags[frag]; + tx_index = txq->tx_curr_desc++; + if (txq->tx_curr_desc == txq->tx_ring_size) + txq->tx_curr_desc = 0; + desc = &txq->tx_desc_area[tx_index]; + txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE; + + /* + * The last fragment will generate an interrupt + * which will free the skb on TX completion. + */ + if (frag == nr_frags - 1) { + desc->cmd_sts = BUFFER_OWNED_BY_DMA | + ZERO_PADDING | TX_LAST_DESC | + TX_ENABLE_INTERRUPT; + } else { + desc->cmd_sts = BUFFER_OWNED_BY_DMA; + } + + desc->l4i_chk = 0; + desc->byte_cnt = skb_frag_size(this_frag); + desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent, + this_frag, 0, desc->byte_cnt, + DMA_TO_DEVICE); + } +} + +static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb, + struct net_device *dev) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + int nr_frags = skb_shinfo(skb)->nr_frags; + int tx_index; + struct tx_desc *desc; + u32 cmd_sts; + u16 l4i_chk; + int length, ret; + + cmd_sts = 0; + l4i_chk = 0; + + if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { + if (net_ratelimit()) + netdev_err(dev, "tx queue full?!\n"); + return -EBUSY; + } + + ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len); + if (ret) + return ret; + cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; + + tx_index = txq->tx_curr_desc++; + if (txq->tx_curr_desc == txq->tx_ring_size) + txq->tx_curr_desc = 0; + desc = &txq->tx_desc_area[tx_index]; + txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; + + if (nr_frags) { + txq_submit_frag_skb(txq, skb); + length = skb_headlen(skb); + } else { + cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT; + length = skb->len; + } + + desc->l4i_chk = l4i_chk; + desc->byte_cnt = length; + desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data, + length, DMA_TO_DEVICE); + + __skb_queue_tail(&txq->tx_skb, skb); + + skb_tx_timestamp(skb); + + /* ensure all other descriptors are written before first cmd_sts */ + wmb(); + desc->cmd_sts = cmd_sts; + + /* clear TX_END status */ + mp->work_tx_end &= ~(1 << txq->index); + + /* ensure all descriptors are written before poking hardware */ + wmb(); + txq_enable(txq); + + txq->tx_desc_count += nr_frags + 1; + + return 0; +} + +static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + int length, queue, ret; + struct tx_queue *txq; + struct netdev_queue *nq; + + queue = skb_get_queue_mapping(skb); + txq = mp->txq + queue; + nq = netdev_get_tx_queue(dev, queue); + + if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { + netdev_printk(KERN_DEBUG, dev, + "failed to linearize skb with tiny unaligned fragment\n"); + return NETDEV_TX_BUSY; + } + + length = skb->len; + + if (skb_is_gso(skb)) + ret = txq_submit_tso(txq, skb, dev); + else + ret = txq_submit_skb(txq, skb, dev); + if (!ret) { + txq->tx_bytes += length; + txq->tx_packets++; + + if (txq->tx_desc_count >= txq->tx_stop_threshold) + netif_tx_stop_queue(nq); + } else { + txq->tx_dropped++; + dev_kfree_skb_any(skb); + } + + return NETDEV_TX_OK; +} + + +/* tx napi ******************************************************************/ +static void txq_kick(struct tx_queue *txq) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); + u32 hw_desc_ptr; + u32 expected_ptr; + + __netif_tx_lock(nq, smp_processor_id()); + + if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index)) + goto out; + + hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index)); + expected_ptr = (u32)txq->tx_desc_dma + + txq->tx_curr_desc * sizeof(struct tx_desc); + + if (hw_desc_ptr != expected_ptr) + txq_enable(txq); + +out: + __netif_tx_unlock(nq); + + mp->work_tx_end &= ~(1 << txq->index); +} + +static int txq_reclaim(struct tx_queue *txq, int budget, int force) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); + int reclaimed; + + __netif_tx_lock_bh(nq); + + reclaimed = 0; + while (reclaimed < budget && txq->tx_desc_count > 0) { + int tx_index; + struct tx_desc *desc; + u32 cmd_sts; + char desc_dma_map; + + tx_index = txq->tx_used_desc; + desc = &txq->tx_desc_area[tx_index]; + desc_dma_map = txq->tx_desc_mapping[tx_index]; + + cmd_sts = desc->cmd_sts; + + if (cmd_sts & BUFFER_OWNED_BY_DMA) { + if (!force) + break; + desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA; + } + + txq->tx_used_desc = tx_index + 1; + if (txq->tx_used_desc == txq->tx_ring_size) + txq->tx_used_desc = 0; + + reclaimed++; + txq->tx_desc_count--; + + if (!IS_TSO_HEADER(txq, desc->buf_ptr)) { + + if (desc_dma_map == DESC_DMA_MAP_PAGE) + dma_unmap_page(mp->dev->dev.parent, + desc->buf_ptr, + desc->byte_cnt, + DMA_TO_DEVICE); + else + dma_unmap_single(mp->dev->dev.parent, + desc->buf_ptr, + desc->byte_cnt, + DMA_TO_DEVICE); + } + + if (cmd_sts & TX_ENABLE_INTERRUPT) { + struct sk_buff *skb = __skb_dequeue(&txq->tx_skb); + + if (!WARN_ON(!skb)) + dev_consume_skb_any(skb); + } + + if (cmd_sts & ERROR_SUMMARY) { + netdev_info(mp->dev, "tx error\n"); + mp->dev->stats.tx_errors++; + } + + } + + __netif_tx_unlock_bh(nq); + + if (reclaimed < budget) + mp->work_tx &= ~(1 << txq->index); + + return reclaimed; +} + + +/* tx rate control **********************************************************/ +/* + * Set total maximum TX rate (shared by all TX queues for this port) + * to 'rate' bits per second, with a maximum burst of 'burst' bytes. + */ +static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst) +{ + int token_rate; + int mtu; + int bucket_size; + + token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000); + if (token_rate > 1023) + token_rate = 1023; + + mtu = (mp->dev->mtu + 255) >> 8; + if (mtu > 63) + mtu = 63; + + bucket_size = (burst + 255) >> 8; + if (bucket_size > 65535) + bucket_size = 65535; + + switch (mp->shared->tx_bw_control) { + case TX_BW_CONTROL_OLD_LAYOUT: + wrlp(mp, TX_BW_RATE, token_rate); + wrlp(mp, TX_BW_MTU, mtu); + wrlp(mp, TX_BW_BURST, bucket_size); + break; + case TX_BW_CONTROL_NEW_LAYOUT: + wrlp(mp, TX_BW_RATE_MOVED, token_rate); + wrlp(mp, TX_BW_MTU_MOVED, mtu); + wrlp(mp, TX_BW_BURST_MOVED, bucket_size); + break; + } +} + +static void txq_set_rate(struct tx_queue *txq, int rate, int burst) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + int token_rate; + int bucket_size; + + token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000); + if (token_rate > 1023) + token_rate = 1023; + + bucket_size = (burst + 255) >> 8; + if (bucket_size > 65535) + bucket_size = 65535; + + wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14); + wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate); +} + +static void txq_set_fixed_prio_mode(struct tx_queue *txq) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + int off; + u32 val; + + /* + * Turn on fixed priority mode. + */ + off = 0; + switch (mp->shared->tx_bw_control) { + case TX_BW_CONTROL_OLD_LAYOUT: + off = TXQ_FIX_PRIO_CONF; + break; + case TX_BW_CONTROL_NEW_LAYOUT: + off = TXQ_FIX_PRIO_CONF_MOVED; + break; + } + + if (off) { + val = rdlp(mp, off); + val |= 1 << txq->index; + wrlp(mp, off, val); + } +} + + +/* mii management interface *************************************************/ +static void mv643xx_eth_adjust_link(struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL); + u32 autoneg_disable = FORCE_LINK_PASS | + DISABLE_AUTO_NEG_SPEED_GMII | + DISABLE_AUTO_NEG_FOR_FLOW_CTRL | + DISABLE_AUTO_NEG_FOR_DUPLEX; + + if (dev->phydev->autoneg == AUTONEG_ENABLE) { + /* enable auto negotiation */ + pscr &= ~autoneg_disable; + goto out_write; + } + + pscr |= autoneg_disable; + + if (dev->phydev->speed == SPEED_1000) { + /* force gigabit, half duplex not supported */ + pscr |= SET_GMII_SPEED_TO_1000; + pscr |= SET_FULL_DUPLEX_MODE; + goto out_write; + } + + pscr &= ~SET_GMII_SPEED_TO_1000; + + if (dev->phydev->speed == SPEED_100) + pscr |= SET_MII_SPEED_TO_100; + else + pscr &= ~SET_MII_SPEED_TO_100; + + if (dev->phydev->duplex == DUPLEX_FULL) + pscr |= SET_FULL_DUPLEX_MODE; + else + pscr &= ~SET_FULL_DUPLEX_MODE; + +out_write: + wrlp(mp, PORT_SERIAL_CONTROL, pscr); +} + +/* statistics ***************************************************************/ +static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + unsigned long tx_packets = 0; + unsigned long tx_bytes = 0; + unsigned long tx_dropped = 0; + int i; + + for (i = 0; i < mp->txq_count; i++) { + struct tx_queue *txq = mp->txq + i; + + tx_packets += txq->tx_packets; + tx_bytes += txq->tx_bytes; + tx_dropped += txq->tx_dropped; + } + + stats->tx_packets = tx_packets; + stats->tx_bytes = tx_bytes; + stats->tx_dropped = tx_dropped; + + return stats; +} + +static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset) +{ + return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); +} + +static void mib_counters_clear(struct mv643xx_eth_private *mp) +{ + int i; + + for (i = 0; i < 0x80; i += 4) + mib_read(mp, i); + + /* Clear non MIB hw counters also */ + rdlp(mp, RX_DISCARD_FRAME_CNT); + rdlp(mp, RX_OVERRUN_FRAME_CNT); +} + +static void mib_counters_update(struct mv643xx_eth_private *mp) +{ + struct mib_counters *p = &mp->mib_counters; + + spin_lock_bh(&mp->mib_counters_lock); + p->good_octets_received += mib_read(mp, 0x00); + p->bad_octets_received += mib_read(mp, 0x08); + p->internal_mac_transmit_err += mib_read(mp, 0x0c); + p->good_frames_received += mib_read(mp, 0x10); + p->bad_frames_received += mib_read(mp, 0x14); + p->broadcast_frames_received += mib_read(mp, 0x18); + p->multicast_frames_received += mib_read(mp, 0x1c); + p->frames_64_octets += mib_read(mp, 0x20); + p->frames_65_to_127_octets += mib_read(mp, 0x24); + p->frames_128_to_255_octets += mib_read(mp, 0x28); + p->frames_256_to_511_octets += mib_read(mp, 0x2c); + p->frames_512_to_1023_octets += mib_read(mp, 0x30); + p->frames_1024_to_max_octets += mib_read(mp, 0x34); + p->good_octets_sent += mib_read(mp, 0x38); + p->good_frames_sent += mib_read(mp, 0x40); + p->excessive_collision += mib_read(mp, 0x44); + p->multicast_frames_sent += mib_read(mp, 0x48); + p->broadcast_frames_sent += mib_read(mp, 0x4c); + p->unrec_mac_control_received += mib_read(mp, 0x50); + p->fc_sent += mib_read(mp, 0x54); + p->good_fc_received += mib_read(mp, 0x58); + p->bad_fc_received += mib_read(mp, 0x5c); + p->undersize_received += mib_read(mp, 0x60); + p->fragments_received += mib_read(mp, 0x64); + p->oversize_received += mib_read(mp, 0x68); + p->jabber_received += mib_read(mp, 0x6c); + p->mac_receive_error += mib_read(mp, 0x70); + p->bad_crc_event += mib_read(mp, 0x74); + p->collision += mib_read(mp, 0x78); + p->late_collision += mib_read(mp, 0x7c); + /* Non MIB hardware counters */ + p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT); + p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT); + spin_unlock_bh(&mp->mib_counters_lock); +} + +static void mib_counters_timer_wrapper(struct timer_list *t) +{ + struct mv643xx_eth_private *mp = from_timer(mp, t, mib_counters_timer); + mib_counters_update(mp); + mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); +} + + +/* interrupt coalescing *****************************************************/ +/* + * Hardware coalescing parameters are set in units of 64 t_clk + * cycles. I.e.: + * + * coal_delay_in_usec = 64000000 * register_value / t_clk_rate + * + * register_value = coal_delay_in_usec * t_clk_rate / 64000000 + * + * In the ->set*() methods, we round the computed register value + * to the nearest integer. + */ +static unsigned int get_rx_coal(struct mv643xx_eth_private *mp) +{ + u32 val = rdlp(mp, SDMA_CONFIG); + u64 temp; + + if (mp->shared->extended_rx_coal_limit) + temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7); + else + temp = (val & 0x003fff00) >> 8; + + temp *= 64000000; + temp += mp->t_clk / 2; + do_div(temp, mp->t_clk); + + return (unsigned int)temp; +} + +static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec) +{ + u64 temp; + u32 val; + + temp = (u64)usec * mp->t_clk; + temp += 31999999; + do_div(temp, 64000000); + + val = rdlp(mp, SDMA_CONFIG); + if (mp->shared->extended_rx_coal_limit) { + if (temp > 0xffff) + temp = 0xffff; + val &= ~0x023fff80; + val |= (temp & 0x8000) << 10; + val |= (temp & 0x7fff) << 7; + } else { + if (temp > 0x3fff) + temp = 0x3fff; + val &= ~0x003fff00; + val |= (temp & 0x3fff) << 8; + } + wrlp(mp, SDMA_CONFIG, val); +} + +static unsigned int get_tx_coal(struct mv643xx_eth_private *mp) +{ + u64 temp; + + temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; + temp *= 64000000; + temp += mp->t_clk / 2; + do_div(temp, mp->t_clk); + + return (unsigned int)temp; +} + +static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec) +{ + u64 temp; + + temp = (u64)usec * mp->t_clk; + temp += 31999999; + do_div(temp, 64000000); + + if (temp > 0x3fff) + temp = 0x3fff; + + wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4); +} + + +/* ethtool ******************************************************************/ +struct mv643xx_eth_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int netdev_off; + int mp_off; +}; + +#define SSTAT(m) \ + { #m, sizeof_field(struct net_device_stats, m), \ + offsetof(struct net_device, stats.m), -1 } + +#define MIBSTAT(m) \ + { #m, sizeof_field(struct mib_counters, m), \ + -1, offsetof(struct mv643xx_eth_private, mib_counters.m) } + +static const struct mv643xx_eth_stats mv643xx_eth_stats[] = { + SSTAT(rx_packets), + SSTAT(tx_packets), + SSTAT(rx_bytes), + SSTAT(tx_bytes), + SSTAT(rx_errors), + SSTAT(tx_errors), + SSTAT(rx_dropped), + SSTAT(tx_dropped), + MIBSTAT(good_octets_received), + MIBSTAT(bad_octets_received), + MIBSTAT(internal_mac_transmit_err), + MIBSTAT(good_frames_received), + MIBSTAT(bad_frames_received), + MIBSTAT(broadcast_frames_received), + MIBSTAT(multicast_frames_received), + MIBSTAT(frames_64_octets), + MIBSTAT(frames_65_to_127_octets), + MIBSTAT(frames_128_to_255_octets), + MIBSTAT(frames_256_to_511_octets), + MIBSTAT(frames_512_to_1023_octets), + MIBSTAT(frames_1024_to_max_octets), + MIBSTAT(good_octets_sent), + MIBSTAT(good_frames_sent), + MIBSTAT(excessive_collision), + MIBSTAT(multicast_frames_sent), + MIBSTAT(broadcast_frames_sent), + MIBSTAT(unrec_mac_control_received), + MIBSTAT(fc_sent), + MIBSTAT(good_fc_received), + MIBSTAT(bad_fc_received), + MIBSTAT(undersize_received), + MIBSTAT(fragments_received), + MIBSTAT(oversize_received), + MIBSTAT(jabber_received), + MIBSTAT(mac_receive_error), + MIBSTAT(bad_crc_event), + MIBSTAT(collision), + MIBSTAT(late_collision), + MIBSTAT(rx_discard), + MIBSTAT(rx_overrun), +}; + +static int +mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp, + struct ethtool_link_ksettings *cmd) +{ + struct net_device *dev = mp->dev; + + phy_ethtool_ksettings_get(dev->phydev, cmd); + + /* + * The MAC does not support 1000baseT_Half. + */ + linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + cmd->link_modes.supported); + linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + cmd->link_modes.advertising); + + return 0; +} + +static int +mv643xx_eth_get_link_ksettings_phyless(struct mv643xx_eth_private *mp, + struct ethtool_link_ksettings *cmd) +{ + u32 port_status; + u32 supported, advertising; + + port_status = rdlp(mp, PORT_STATUS); + + supported = SUPPORTED_MII; + advertising = ADVERTISED_MII; + switch (port_status & PORT_SPEED_MASK) { + case PORT_SPEED_10: + cmd->base.speed = SPEED_10; + break; + case PORT_SPEED_100: + cmd->base.speed = SPEED_100; + break; + case PORT_SPEED_1000: + cmd->base.speed = SPEED_1000; + break; + default: + cmd->base.speed = -1; + break; + } + cmd->base.duplex = (port_status & FULL_DUPLEX) ? + DUPLEX_FULL : DUPLEX_HALF; + cmd->base.port = PORT_MII; + cmd->base.phy_address = 0; + cmd->base.autoneg = AUTONEG_DISABLE; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + + return 0; +} + +static void +mv643xx_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + wol->supported = 0; + wol->wolopts = 0; + if (dev->phydev) + phy_ethtool_get_wol(dev->phydev, wol); +} + +static int +mv643xx_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + int err; + + if (!dev->phydev) + return -EOPNOTSUPP; + + err = phy_ethtool_set_wol(dev->phydev, wol); + /* Given that mv643xx_eth works without the marvell-specific PHY driver, + * this debugging hint is useful to have. + */ + if (err == -EOPNOTSUPP) + netdev_info(dev, "The PHY does not support set_wol, was CONFIG_MARVELL_PHY enabled?\n"); + return err; +} + +static int +mv643xx_eth_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + if (dev->phydev) + return mv643xx_eth_get_link_ksettings_phy(mp, cmd); + else + return mv643xx_eth_get_link_ksettings_phyless(mp, cmd); +} + +static int +mv643xx_eth_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + struct ethtool_link_ksettings c = *cmd; + u32 advertising; + int ret; + + if (!dev->phydev) + return -EINVAL; + + /* + * The MAC does not support 1000baseT_Half. + */ + ethtool_convert_link_mode_to_legacy_u32(&advertising, + c.link_modes.advertising); + advertising &= ~ADVERTISED_1000baseT_Half; + ethtool_convert_legacy_u32_to_link_mode(c.link_modes.advertising, + advertising); + + ret = phy_ethtool_ksettings_set(dev->phydev, &c); + if (!ret) + mv643xx_eth_adjust_link(dev); + return ret; +} + +static void mv643xx_eth_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, mv643xx_eth_driver_name, + sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, mv643xx_eth_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info)); +} + +static int +mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + ec->rx_coalesce_usecs = get_rx_coal(mp); + ec->tx_coalesce_usecs = get_tx_coal(mp); + + return 0; +} + +static int +mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + set_rx_coal(mp, ec->rx_coalesce_usecs); + set_tx_coal(mp, ec->tx_coalesce_usecs); + + return 0; +} + +static void +mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + er->rx_max_pending = 4096; + er->tx_max_pending = 4096; + + er->rx_pending = mp->rx_ring_size; + er->tx_pending = mp->tx_ring_size; +} + +static int +mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + if (er->rx_mini_pending || er->rx_jumbo_pending) + return -EINVAL; + + mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096; + mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending, + MV643XX_MAX_SKB_DESCS * 2, 4096); + if (mp->tx_ring_size != er->tx_pending) + netdev_warn(dev, "TX queue size set to %u (requested %u)\n", + mp->tx_ring_size, er->tx_pending); + + if (netif_running(dev)) { + mv643xx_eth_stop(dev); + if (mv643xx_eth_open(dev)) { + netdev_err(dev, + "fatal error on re-opening device after ring param change\n"); + return -ENOMEM; + } + } + + return 0; +} + + +static int +mv643xx_eth_set_features(struct net_device *dev, netdev_features_t features) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + bool rx_csum = features & NETIF_F_RXCSUM; + + wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000); + + return 0; +} + +static void mv643xx_eth_get_strings(struct net_device *dev, + uint32_t stringset, uint8_t *data) +{ + int i; + + if (stringset == ETH_SS_STATS) { + for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { + memcpy(data + i * ETH_GSTRING_LEN, + mv643xx_eth_stats[i].stat_string, + ETH_GSTRING_LEN); + } + } +} + +static void mv643xx_eth_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, + uint64_t *data) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + int i; + + mv643xx_eth_get_stats(dev); + mib_counters_update(mp); + + for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { + const struct mv643xx_eth_stats *stat; + void *p; + + stat = mv643xx_eth_stats + i; + + if (stat->netdev_off >= 0) + p = ((void *)mp->dev) + stat->netdev_off; + else + p = ((void *)mp) + stat->mp_off; + + data[i] = (stat->sizeof_stat == 8) ? + *(uint64_t *)p : *(uint32_t *)p; + } +} + +static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset) +{ + if (sset == ETH_SS_STATS) + return ARRAY_SIZE(mv643xx_eth_stats); + + return -EOPNOTSUPP; +} + +static const struct ethtool_ops mv643xx_eth_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, + .get_drvinfo = mv643xx_eth_get_drvinfo, + .nway_reset = phy_ethtool_nway_reset, + .get_link = ethtool_op_get_link, + .get_coalesce = mv643xx_eth_get_coalesce, + .set_coalesce = mv643xx_eth_set_coalesce, + .get_ringparam = mv643xx_eth_get_ringparam, + .set_ringparam = mv643xx_eth_set_ringparam, + .get_strings = mv643xx_eth_get_strings, + .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, + .get_sset_count = mv643xx_eth_get_sset_count, + .get_ts_info = ethtool_op_get_ts_info, + .get_wol = mv643xx_eth_get_wol, + .set_wol = mv643xx_eth_set_wol, + .get_link_ksettings = mv643xx_eth_get_link_ksettings, + .set_link_ksettings = mv643xx_eth_set_link_ksettings, +}; + + +/* address handling *********************************************************/ +static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr) +{ + unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH); + unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW); + + addr[0] = (mac_h >> 24) & 0xff; + addr[1] = (mac_h >> 16) & 0xff; + addr[2] = (mac_h >> 8) & 0xff; + addr[3] = mac_h & 0xff; + addr[4] = (mac_l >> 8) & 0xff; + addr[5] = mac_l & 0xff; +} + +static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr) +{ + wrlp(mp, MAC_ADDR_HIGH, + (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]); + wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]); +} + +static u32 uc_addr_filter_mask(struct net_device *dev) +{ + struct netdev_hw_addr *ha; + u32 nibbles; + + if (dev->flags & IFF_PROMISC) + return 0; + + nibbles = 1 << (dev->dev_addr[5] & 0x0f); + netdev_for_each_uc_addr(ha, dev) { + if (memcmp(dev->dev_addr, ha->addr, 5)) + return 0; + if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0) + return 0; + + nibbles |= 1 << (ha->addr[5] & 0x0f); + } + + return nibbles; +} + +static void mv643xx_eth_program_unicast_filter(struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + u32 port_config; + u32 nibbles; + int i; + + uc_addr_set(mp, dev->dev_addr); + + port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE; + + nibbles = uc_addr_filter_mask(dev); + if (!nibbles) { + port_config |= UNICAST_PROMISCUOUS_MODE; + nibbles = 0xffff; + } + + for (i = 0; i < 16; i += 4) { + int off = UNICAST_TABLE(mp->port_num) + i; + u32 v; + + v = 0; + if (nibbles & 1) + v |= 0x00000001; + if (nibbles & 2) + v |= 0x00000100; + if (nibbles & 4) + v |= 0x00010000; + if (nibbles & 8) + v |= 0x01000000; + nibbles >>= 4; + + wrl(mp, off, v); + } + + wrlp(mp, PORT_CONFIG, port_config); +} + +static int addr_crc(unsigned char *addr) +{ + int crc = 0; + int i; + + for (i = 0; i < 6; i++) { + int j; + + crc = (crc ^ addr[i]) << 8; + for (j = 7; j >= 0; j--) { + if (crc & (0x100 << j)) + crc ^= 0x107 << j; + } + } + + return crc; +} + +static void mv643xx_eth_program_multicast_filter(struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + u32 *mc_spec; + u32 *mc_other; + struct netdev_hw_addr *ha; + int i; + + if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) + goto promiscuous; + + /* Allocate both mc_spec and mc_other tables */ + mc_spec = kcalloc(128, sizeof(u32), GFP_ATOMIC); + if (!mc_spec) + goto promiscuous; + mc_other = &mc_spec[64]; + + netdev_for_each_mc_addr(ha, dev) { + u8 *a = ha->addr; + u32 *table; + u8 entry; + + if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) { + table = mc_spec; + entry = a[5]; + } else { + table = mc_other; + entry = addr_crc(a); + } + + table[entry >> 2] |= 1 << (8 * (entry & 3)); + } + + for (i = 0; i < 64; i++) { + wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32), + mc_spec[i]); + wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32), + mc_other[i]); + } + + kfree(mc_spec); + return; + +promiscuous: + for (i = 0; i < 64; i++) { + wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32), + 0x01010101u); + wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32), + 0x01010101u); + } +} + +static void mv643xx_eth_set_rx_mode(struct net_device *dev) +{ + mv643xx_eth_program_unicast_filter(dev); + mv643xx_eth_program_multicast_filter(dev); +} + +static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr) +{ + struct sockaddr *sa = addr; + + if (!is_valid_ether_addr(sa->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); + + netif_addr_lock_bh(dev); + mv643xx_eth_program_unicast_filter(dev); + netif_addr_unlock_bh(dev); + + return 0; +} + + +/* rx/tx queue initialisation ***********************************************/ +static int rxq_init(struct mv643xx_eth_private *mp, int index) +{ + struct rx_queue *rxq = mp->rxq + index; + struct rx_desc *rx_desc; + int size; + int i; + + rxq->index = index; + + rxq->rx_ring_size = mp->rx_ring_size; + + rxq->rx_desc_count = 0; + rxq->rx_curr_desc = 0; + rxq->rx_used_desc = 0; + + size = rxq->rx_ring_size * sizeof(struct rx_desc); + + if (index == 0 && size <= mp->rx_desc_sram_size) { + rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr, + mp->rx_desc_sram_size); + rxq->rx_desc_dma = mp->rx_desc_sram_addr; + } else { + rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, + size, &rxq->rx_desc_dma, + GFP_KERNEL); + } + + if (rxq->rx_desc_area == NULL) { + netdev_err(mp->dev, + "can't allocate rx ring (%d bytes)\n", size); + goto out; + } + memset(rxq->rx_desc_area, 0, size); + + rxq->rx_desc_area_size = size; + rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb), + GFP_KERNEL); + if (rxq->rx_skb == NULL) + goto out_free; + + rx_desc = rxq->rx_desc_area; + for (i = 0; i < rxq->rx_ring_size; i++) { + int nexti; + + nexti = i + 1; + if (nexti == rxq->rx_ring_size) + nexti = 0; + + rx_desc[i].next_desc_ptr = rxq->rx_desc_dma + + nexti * sizeof(struct rx_desc); + } + + return 0; + + +out_free: + if (index == 0 && size <= mp->rx_desc_sram_size) + iounmap(rxq->rx_desc_area); + else + dma_free_coherent(mp->dev->dev.parent, size, + rxq->rx_desc_area, + rxq->rx_desc_dma); + +out: + return -ENOMEM; +} + +static void rxq_deinit(struct rx_queue *rxq) +{ + struct mv643xx_eth_private *mp = rxq_to_mp(rxq); + int i; + + rxq_disable(rxq); + + for (i = 0; i < rxq->rx_ring_size; i++) { + if (rxq->rx_skb[i]) { + dev_consume_skb_any(rxq->rx_skb[i]); + rxq->rx_desc_count--; + } + } + + if (rxq->rx_desc_count) { + netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n", + rxq->rx_desc_count); + } + + if (rxq->index == 0 && + rxq->rx_desc_area_size <= mp->rx_desc_sram_size) + iounmap(rxq->rx_desc_area); + else + dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size, + rxq->rx_desc_area, rxq->rx_desc_dma); + + kfree(rxq->rx_skb); +} + +static int txq_init(struct mv643xx_eth_private *mp, int index) +{ + struct tx_queue *txq = mp->txq + index; + struct tx_desc *tx_desc; + int size; + int ret; + int i; + + txq->index = index; + + txq->tx_ring_size = mp->tx_ring_size; + + /* A queue must always have room for at least one skb. + * Therefore, stop the queue when the free entries reaches + * the maximum number of descriptors per skb. + */ + txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS; + txq->tx_wake_threshold = txq->tx_stop_threshold / 2; + + txq->tx_desc_count = 0; + txq->tx_curr_desc = 0; + txq->tx_used_desc = 0; + + size = txq->tx_ring_size * sizeof(struct tx_desc); + + if (index == 0 && size <= mp->tx_desc_sram_size) { + txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, + mp->tx_desc_sram_size); + txq->tx_desc_dma = mp->tx_desc_sram_addr; + } else { + txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, + size, &txq->tx_desc_dma, + GFP_KERNEL); + } + + if (txq->tx_desc_area == NULL) { + netdev_err(mp->dev, + "can't allocate tx ring (%d bytes)\n", size); + return -ENOMEM; + } + memset(txq->tx_desc_area, 0, size); + + txq->tx_desc_area_size = size; + + tx_desc = txq->tx_desc_area; + for (i = 0; i < txq->tx_ring_size; i++) { + struct tx_desc *txd = tx_desc + i; + int nexti; + + nexti = i + 1; + if (nexti == txq->tx_ring_size) + nexti = 0; + + txd->cmd_sts = 0; + txd->next_desc_ptr = txq->tx_desc_dma + + nexti * sizeof(struct tx_desc); + } + + txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char), + GFP_KERNEL); + if (!txq->tx_desc_mapping) { + ret = -ENOMEM; + goto err_free_desc_area; + } + + /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ + txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent, + txq->tx_ring_size * TSO_HEADER_SIZE, + &txq->tso_hdrs_dma, GFP_KERNEL); + if (txq->tso_hdrs == NULL) { + ret = -ENOMEM; + goto err_free_desc_mapping; + } + skb_queue_head_init(&txq->tx_skb); + + return 0; + +err_free_desc_mapping: + kfree(txq->tx_desc_mapping); +err_free_desc_area: + if (index == 0 && size <= mp->tx_desc_sram_size) + iounmap(txq->tx_desc_area); + else + dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, + txq->tx_desc_area, txq->tx_desc_dma); + return ret; +} + +static void txq_deinit(struct tx_queue *txq) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + + txq_disable(txq); + txq_reclaim(txq, txq->tx_ring_size, 1); + + BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); + + if (txq->index == 0 && + txq->tx_desc_area_size <= mp->tx_desc_sram_size) + iounmap(txq->tx_desc_area); + else + dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, + txq->tx_desc_area, txq->tx_desc_dma); + kfree(txq->tx_desc_mapping); + + if (txq->tso_hdrs) + dma_free_coherent(mp->dev->dev.parent, + txq->tx_ring_size * TSO_HEADER_SIZE, + txq->tso_hdrs, txq->tso_hdrs_dma); +} + + +/* netdev ops and related ***************************************************/ +static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp) +{ + u32 int_cause; + u32 int_cause_ext; + + int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask; + if (int_cause == 0) + return 0; + + int_cause_ext = 0; + if (int_cause & INT_EXT) { + int_cause &= ~INT_EXT; + int_cause_ext = rdlp(mp, INT_CAUSE_EXT); + } + + if (int_cause) { + wrlp(mp, INT_CAUSE, ~int_cause); + mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) & + ~(rdlp(mp, TXQ_COMMAND) & 0xff); + mp->work_rx |= (int_cause & INT_RX) >> 2; + } + + int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX; + if (int_cause_ext) { + wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext); + if (int_cause_ext & INT_EXT_LINK_PHY) + mp->work_link = 1; + mp->work_tx |= int_cause_ext & INT_EXT_TX; + } + + return 1; +} + +static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct mv643xx_eth_private *mp = netdev_priv(dev); + + if (unlikely(!mv643xx_eth_collect_events(mp))) + return IRQ_NONE; + + wrlp(mp, INT_MASK, 0); + napi_schedule(&mp->napi); + + return IRQ_HANDLED; +} + +static void handle_link_event(struct mv643xx_eth_private *mp) +{ + struct net_device *dev = mp->dev; + u32 port_status; + int speed; + int duplex; + int fc; + + port_status = rdlp(mp, PORT_STATUS); + if (!(port_status & LINK_UP)) { + if (netif_carrier_ok(dev)) { + int i; + + netdev_info(dev, "link down\n"); + + netif_carrier_off(dev); + + for (i = 0; i < mp->txq_count; i++) { + struct tx_queue *txq = mp->txq + i; + + txq_reclaim(txq, txq->tx_ring_size, 1); + txq_reset_hw_ptr(txq); + } + } + return; + } + + switch (port_status & PORT_SPEED_MASK) { + case PORT_SPEED_10: + speed = 10; + break; + case PORT_SPEED_100: + speed = 100; + break; + case PORT_SPEED_1000: + speed = 1000; + break; + default: + speed = -1; + break; + } + duplex = (port_status & FULL_DUPLEX) ? 1 : 0; + fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0; + + netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n", + speed, duplex ? "full" : "half", fc ? "en" : "dis"); + + if (!netif_carrier_ok(dev)) + netif_carrier_on(dev); +} + +static int mv643xx_eth_poll(struct napi_struct *napi, int budget) +{ + struct mv643xx_eth_private *mp; + int work_done; + + mp = container_of(napi, struct mv643xx_eth_private, napi); + + if (unlikely(mp->oom)) { + mp->oom = 0; + del_timer(&mp->rx_oom); + } + + work_done = 0; + while (work_done < budget) { + u8 queue_mask; + int queue; + int work_tbd; + + if (mp->work_link) { + mp->work_link = 0; + handle_link_event(mp); + work_done++; + continue; + } + + queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx; + if (likely(!mp->oom)) + queue_mask |= mp->work_rx_refill; + + if (!queue_mask) { + if (mv643xx_eth_collect_events(mp)) + continue; + break; + } + + queue = fls(queue_mask) - 1; + queue_mask = 1 << queue; + + work_tbd = budget - work_done; + if (work_tbd > 16) + work_tbd = 16; + + if (mp->work_tx_end & queue_mask) { + txq_kick(mp->txq + queue); + } else if (mp->work_tx & queue_mask) { + work_done += txq_reclaim(mp->txq + queue, work_tbd, 0); + txq_maybe_wake(mp->txq + queue); + } else if (mp->work_rx & queue_mask) { + work_done += rxq_process(mp->rxq + queue, work_tbd); + } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) { + work_done += rxq_refill(mp->rxq + queue, work_tbd); + } else { + BUG(); + } + } + + if (work_done < budget) { + if (mp->oom) + mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); + napi_complete_done(napi, work_done); + wrlp(mp, INT_MASK, mp->int_mask); + } + + return work_done; +} + +static inline void oom_timer_wrapper(struct timer_list *t) +{ + struct mv643xx_eth_private *mp = from_timer(mp, t, rx_oom); + + napi_schedule(&mp->napi); +} + +static void port_start(struct mv643xx_eth_private *mp) +{ + struct net_device *dev = mp->dev; + u32 pscr; + int i; + + /* + * Perform PHY reset, if there is a PHY. + */ + if (dev->phydev) { + struct ethtool_link_ksettings cmd; + + mv643xx_eth_get_link_ksettings(dev, &cmd); + phy_init_hw(dev->phydev); + mv643xx_eth_set_link_ksettings( + dev, (const struct ethtool_link_ksettings *)&cmd); + phy_start(dev->phydev); + } + + /* + * Configure basic link parameters. + */ + pscr = rdlp(mp, PORT_SERIAL_CONTROL); + + pscr |= SERIAL_PORT_ENABLE; + wrlp(mp, PORT_SERIAL_CONTROL, pscr); + + pscr |= DO_NOT_FORCE_LINK_FAIL; + if (!dev->phydev) + pscr |= FORCE_LINK_PASS; + wrlp(mp, PORT_SERIAL_CONTROL, pscr); + + /* + * Configure TX path and queues. + */ + tx_set_rate(mp, 1000000000, 16777216); + for (i = 0; i < mp->txq_count; i++) { + struct tx_queue *txq = mp->txq + i; + + txq_reset_hw_ptr(txq); + txq_set_rate(txq, 1000000000, 16777216); + txq_set_fixed_prio_mode(txq); + } + + /* + * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast + * frames to RX queue #0, and include the pseudo-header when + * calculating receive checksums. + */ + mv643xx_eth_set_features(mp->dev, mp->dev->features); + + /* + * Treat BPDUs as normal multicasts, and disable partition mode. + */ + wrlp(mp, PORT_CONFIG_EXT, 0x00000000); + + /* + * Add configured unicast addresses to address filter table. + */ + mv643xx_eth_program_unicast_filter(mp->dev); + + /* + * Enable the receive queues. + */ + for (i = 0; i < mp->rxq_count; i++) { + struct rx_queue *rxq = mp->rxq + i; + u32 addr; + + addr = (u32)rxq->rx_desc_dma; + addr += rxq->rx_curr_desc * sizeof(struct rx_desc); + wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr); + + rxq_enable(rxq); + } +} + +static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp) +{ + int skb_size; + + /* + * Reserve 2+14 bytes for an ethernet header (the hardware + * automatically prepends 2 bytes of dummy data to each + * received packet), 16 bytes for up to four VLAN tags, and + * 4 bytes for the trailing FCS -- 36 bytes total. + */ + skb_size = mp->dev->mtu + 36; + + /* + * Make sure that the skb size is a multiple of 8 bytes, as + * the lower three bits of the receive descriptor's buffer + * size field are ignored by the hardware. + */ + mp->skb_size = (skb_size + 7) & ~7; + + /* + * If NET_SKB_PAD is smaller than a cache line, + * netdev_alloc_skb() will cause skb->data to be misaligned + * to a cache line boundary. If this is the case, include + * some extra space to allow re-aligning the data area. + */ + mp->skb_size += SKB_DMA_REALIGN; +} + +static int mv643xx_eth_open(struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + int err; + int i; + + wrlp(mp, INT_CAUSE, 0); + wrlp(mp, INT_CAUSE_EXT, 0); + rdlp(mp, INT_CAUSE_EXT); + + err = request_irq(dev->irq, mv643xx_eth_irq, + IRQF_SHARED, dev->name, dev); + if (err) { + netdev_err(dev, "can't assign irq\n"); + return -EAGAIN; + } + + mv643xx_eth_recalc_skb_size(mp); + + napi_enable(&mp->napi); + + mp->int_mask = INT_EXT; + + for (i = 0; i < mp->rxq_count; i++) { + err = rxq_init(mp, i); + if (err) { + while (--i >= 0) + rxq_deinit(mp->rxq + i); + goto out; + } + + rxq_refill(mp->rxq + i, INT_MAX); + mp->int_mask |= INT_RX_0 << i; + } + + if (mp->oom) { + mp->rx_oom.expires = jiffies + (HZ / 10); + add_timer(&mp->rx_oom); + } + + for (i = 0; i < mp->txq_count; i++) { + err = txq_init(mp, i); + if (err) { + while (--i >= 0) + txq_deinit(mp->txq + i); + goto out_free; + } + mp->int_mask |= INT_TX_END_0 << i; + } + + add_timer(&mp->mib_counters_timer); + port_start(mp); + + wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); + wrlp(mp, INT_MASK, mp->int_mask); + + return 0; + + +out_free: + for (i = 0; i < mp->rxq_count; i++) + rxq_deinit(mp->rxq + i); +out: + napi_disable(&mp->napi); + free_irq(dev->irq, dev); + + return err; +} + +static void port_reset(struct mv643xx_eth_private *mp) +{ + unsigned int data; + int i; + + for (i = 0; i < mp->rxq_count; i++) + rxq_disable(mp->rxq + i); + for (i = 0; i < mp->txq_count; i++) + txq_disable(mp->txq + i); + + while (1) { + u32 ps = rdlp(mp, PORT_STATUS); + + if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY) + break; + udelay(10); + } + + /* Reset the Enable bit in the Configuration Register */ + data = rdlp(mp, PORT_SERIAL_CONTROL); + data &= ~(SERIAL_PORT_ENABLE | + DO_NOT_FORCE_LINK_FAIL | + FORCE_LINK_PASS); + wrlp(mp, PORT_SERIAL_CONTROL, data); +} + +static int mv643xx_eth_stop(struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + int i; + + wrlp(mp, INT_MASK_EXT, 0x00000000); + wrlp(mp, INT_MASK, 0x00000000); + rdlp(mp, INT_MASK); + + napi_disable(&mp->napi); + + del_timer_sync(&mp->rx_oom); + + netif_carrier_off(dev); + if (dev->phydev) + phy_stop(dev->phydev); + free_irq(dev->irq, dev); + + port_reset(mp); + mv643xx_eth_get_stats(dev); + mib_counters_update(mp); + del_timer_sync(&mp->mib_counters_timer); + + for (i = 0; i < mp->rxq_count; i++) + rxq_deinit(mp->rxq + i); + for (i = 0; i < mp->txq_count; i++) + txq_deinit(mp->txq + i); + + return 0; +} + +static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + int ret; + + if (!dev->phydev) + return -ENOTSUPP; + + ret = phy_mii_ioctl(dev->phydev, ifr, cmd); + if (!ret) + mv643xx_eth_adjust_link(dev); + return ret; +} + +static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + dev->mtu = new_mtu; + mv643xx_eth_recalc_skb_size(mp); + tx_set_rate(mp, 1000000000, 16777216); + + if (!netif_running(dev)) + return 0; + + /* + * Stop and then re-open the interface. This will allocate RX + * skbs of the new MTU. + * There is a possible danger that the open will not succeed, + * due to memory being full. + */ + mv643xx_eth_stop(dev); + if (mv643xx_eth_open(dev)) { + netdev_err(dev, + "fatal error on re-opening device after MTU change\n"); + } + + return 0; +} + +static void tx_timeout_task(struct work_struct *ugly) +{ + struct mv643xx_eth_private *mp; + + mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task); + if (netif_running(mp->dev)) { + netif_tx_stop_all_queues(mp->dev); + port_reset(mp); + port_start(mp); + netif_tx_wake_all_queues(mp->dev); + } +} + +static void mv643xx_eth_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + netdev_info(dev, "tx timeout\n"); + + schedule_work(&mp->tx_timeout_task); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void mv643xx_eth_netpoll(struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + wrlp(mp, INT_MASK, 0x00000000); + rdlp(mp, INT_MASK); + + mv643xx_eth_irq(dev->irq, dev); + + wrlp(mp, INT_MASK, mp->int_mask); +} +#endif + + +/* platform glue ************************************************************/ +static void +mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp, + const struct mbus_dram_target_info *dram) +{ + void __iomem *base = msp->base; + u32 win_enable; + u32 win_protect; + int i; + + for (i = 0; i < 6; i++) { + writel(0, base + WINDOW_BASE(i)); + writel(0, base + WINDOW_SIZE(i)); + if (i < 4) + writel(0, base + WINDOW_REMAP_HIGH(i)); + } + + win_enable = 0x3f; + win_protect = 0; + + for (i = 0; i < dram->num_cs; i++) { + const struct mbus_dram_window *cs = dram->cs + i; + + writel((cs->base & 0xffff0000) | + (cs->mbus_attr << 8) | + dram->mbus_dram_target_id, base + WINDOW_BASE(i)); + writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); + + win_enable &= ~(1 << i); + win_protect |= 3 << (2 * i); + } + + writel(win_enable, base + WINDOW_BAR_ENABLE); + msp->win_protect = win_protect; +} + +static void infer_hw_params(struct mv643xx_eth_shared_private *msp) +{ + /* + * Check whether we have a 14-bit coal limit field in bits + * [21:8], or a 16-bit coal limit in bits [25,21:7] of the + * SDMA config register. + */ + writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG); + if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000) + msp->extended_rx_coal_limit = 1; + else + msp->extended_rx_coal_limit = 0; + + /* + * Check whether the MAC supports TX rate control, and if + * yes, whether its associated registers are in the old or + * the new place. + */ + writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED); + if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) { + msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT; + } else { + writel(7, msp->base + 0x0400 + TX_BW_RATE); + if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7) + msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT; + else + msp->tx_bw_control = TX_BW_CONTROL_ABSENT; + } +} + +#if defined(CONFIG_OF) +static const struct of_device_id mv643xx_eth_shared_ids[] = { + { .compatible = "marvell,orion-eth", }, + { .compatible = "marvell,kirkwood-eth", }, + { } +}; +MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids); +#endif + +#if defined(CONFIG_OF_IRQ) && !defined(CONFIG_MV64X60) +#define mv643xx_eth_property(_np, _name, _v) \ + do { \ + u32 tmp; \ + if (!of_property_read_u32(_np, "marvell," _name, &tmp)) \ + _v = tmp; \ + } while (0) + +static struct platform_device *port_platdev[3]; + +static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, + struct device_node *pnp) +{ + struct platform_device *ppdev; + struct mv643xx_eth_platform_data ppd; + struct resource res; + const char *mac_addr; + int ret; + int dev_num = 0; + + memset(&ppd, 0, sizeof(ppd)); + ppd.shared = pdev; + + memset(&res, 0, sizeof(res)); + if (of_irq_to_resource(pnp, 0, &res) <= 0) { + dev_err(&pdev->dev, "missing interrupt on %pOFn\n", pnp); + return -EINVAL; + } + + if (of_property_read_u32(pnp, "reg", &ppd.port_number)) { + dev_err(&pdev->dev, "missing reg property on %pOFn\n", pnp); + return -EINVAL; + } + + if (ppd.port_number >= 3) { + dev_err(&pdev->dev, "invalid reg property on %pOFn\n", pnp); + return -EINVAL; + } + + while (dev_num < 3 && port_platdev[dev_num]) + dev_num++; + + if (dev_num == 3) { + dev_err(&pdev->dev, "too many ports registered\n"); + return -EINVAL; + } + + mac_addr = of_get_mac_address(pnp); + if (!IS_ERR(mac_addr)) + ether_addr_copy(ppd.mac_addr, mac_addr); + + mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size); + mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr); + mv643xx_eth_property(pnp, "tx-sram-size", ppd.tx_sram_size); + mv643xx_eth_property(pnp, "rx-queue-size", ppd.rx_queue_size); + mv643xx_eth_property(pnp, "rx-sram-addr", ppd.rx_sram_addr); + mv643xx_eth_property(pnp, "rx-sram-size", ppd.rx_sram_size); + + ppd.phy_node = of_parse_phandle(pnp, "phy-handle", 0); + if (!ppd.phy_node) { + ppd.phy_addr = MV643XX_ETH_PHY_NONE; + of_property_read_u32(pnp, "speed", &ppd.speed); + of_property_read_u32(pnp, "duplex", &ppd.duplex); + } + + ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num); + if (!ppdev) + return -ENOMEM; + ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + ppdev->dev.of_node = pnp; + + ret = platform_device_add_resources(ppdev, &res, 1); + if (ret) + goto port_err; + + ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd)); + if (ret) + goto port_err; + + ret = platform_device_add(ppdev); + if (ret) + goto port_err; + + port_platdev[dev_num] = ppdev; + + return 0; + +port_err: + platform_device_put(ppdev); + return ret; +} + +static int mv643xx_eth_shared_of_probe(struct platform_device *pdev) +{ + struct mv643xx_eth_shared_platform_data *pd; + struct device_node *pnp, *np = pdev->dev.of_node; + int ret; + + /* bail out if not registered from DT */ + if (!np) + return 0; + + pd = devm_kzalloc(&pdev->dev, sizeof(*pd), GFP_KERNEL); + if (!pd) + return -ENOMEM; + pdev->dev.platform_data = pd; + + mv643xx_eth_property(np, "tx-checksum-limit", pd->tx_csum_limit); + + for_each_available_child_of_node(np, pnp) { + ret = mv643xx_eth_shared_of_add_port(pdev, pnp); + if (ret) { + of_node_put(pnp); + return ret; + } + } + return 0; +} + +static void mv643xx_eth_shared_of_remove(void) +{ + int n; + + for (n = 0; n < 3; n++) { + platform_device_del(port_platdev[n]); + port_platdev[n] = NULL; + } +} +#else +static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev) +{ + return 0; +} + +static inline void mv643xx_eth_shared_of_remove(void) +{ +} +#endif + +static int mv643xx_eth_shared_probe(struct platform_device *pdev) +{ + static int mv643xx_eth_version_printed; + struct mv643xx_eth_shared_platform_data *pd; + struct mv643xx_eth_shared_private *msp; + const struct mbus_dram_target_info *dram; + struct resource *res; + int ret; + + if (!mv643xx_eth_version_printed++) + pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n", + mv643xx_eth_driver_version); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) + return -EINVAL; + + msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL); + if (msp == NULL) + return -ENOMEM; + platform_set_drvdata(pdev, msp); + + msp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (msp->base == NULL) + return -ENOMEM; + + msp->clk = devm_clk_get(&pdev->dev, NULL); + if (!IS_ERR(msp->clk)) + clk_prepare_enable(msp->clk); + + /* + * (Re-)program MBUS remapping windows if we are asked to. + */ + dram = mv_mbus_dram_info(); + if (dram) + mv643xx_eth_conf_mbus_windows(msp, dram); + + ret = mv643xx_eth_shared_of_probe(pdev); + if (ret) + goto err_put_clk; + pd = dev_get_platdata(&pdev->dev); + + msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? + pd->tx_csum_limit : 9 * 1024; + infer_hw_params(msp); + + return 0; + +err_put_clk: + if (!IS_ERR(msp->clk)) + clk_disable_unprepare(msp->clk); + return ret; +} + +static int mv643xx_eth_shared_remove(struct platform_device *pdev) +{ + struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); + + mv643xx_eth_shared_of_remove(); + if (!IS_ERR(msp->clk)) + clk_disable_unprepare(msp->clk); + return 0; +} + +static struct platform_driver mv643xx_eth_shared_driver = { + .probe = mv643xx_eth_shared_probe, + .remove = mv643xx_eth_shared_remove, + .driver = { + .name = MV643XX_ETH_SHARED_NAME, + .of_match_table = of_match_ptr(mv643xx_eth_shared_ids), + }, +}; + +static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr) +{ + int addr_shift = 5 * mp->port_num; + u32 data; + + data = rdl(mp, PHY_ADDR); + data &= ~(0x1f << addr_shift); + data |= (phy_addr & 0x1f) << addr_shift; + wrl(mp, PHY_ADDR, data); +} + +static int phy_addr_get(struct mv643xx_eth_private *mp) +{ + unsigned int data; + + data = rdl(mp, PHY_ADDR); + + return (data >> (5 * mp->port_num)) & 0x1f; +} + +static void set_params(struct mv643xx_eth_private *mp, + struct mv643xx_eth_platform_data *pd) +{ + struct net_device *dev = mp->dev; + unsigned int tx_ring_size; + + if (is_valid_ether_addr(pd->mac_addr)) + memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); + else + uc_addr_get(mp, dev->dev_addr); + + mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE; + if (pd->rx_queue_size) + mp->rx_ring_size = pd->rx_queue_size; + mp->rx_desc_sram_addr = pd->rx_sram_addr; + mp->rx_desc_sram_size = pd->rx_sram_size; + + mp->rxq_count = pd->rx_queue_count ? : 1; + + tx_ring_size = DEFAULT_TX_QUEUE_SIZE; + if (pd->tx_queue_size) + tx_ring_size = pd->tx_queue_size; + + mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size, + MV643XX_MAX_SKB_DESCS * 2, 4096); + if (mp->tx_ring_size != tx_ring_size) + netdev_warn(dev, "TX queue size set to %u (requested %u)\n", + mp->tx_ring_size, tx_ring_size); + + mp->tx_desc_sram_addr = pd->tx_sram_addr; + mp->tx_desc_sram_size = pd->tx_sram_size; + + mp->txq_count = pd->tx_queue_count ? : 1; +} + +static int get_phy_mode(struct mv643xx_eth_private *mp) +{ + struct device *dev = mp->dev->dev.parent; + phy_interface_t iface; + int err; + + if (dev->of_node) + err = of_get_phy_mode(dev->of_node, &iface); + + /* Historical default if unspecified. We could also read/write + * the interface state in the PSC1 + */ + if (!dev->of_node || err) + iface = PHY_INTERFACE_MODE_GMII; + return iface; +} + +static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, + int phy_addr) +{ + struct phy_device *phydev; + int start; + int num; + int i; + char phy_id[MII_BUS_ID_SIZE + 3]; + + if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) { + start = phy_addr_get(mp) & 0x1f; + num = 32; + } else { + start = phy_addr & 0x1f; + num = 1; + } + + /* Attempt to connect to the PHY using orion-mdio */ + phydev = ERR_PTR(-ENODEV); + for (i = 0; i < num; i++) { + int addr = (start + i) & 0x1f; + + snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, + "orion-mdio-mii", addr); + + phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link, + get_phy_mode(mp)); + if (!IS_ERR(phydev)) { + phy_addr_set(mp, addr); + break; + } + } + + return phydev; +} + +static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) +{ + struct net_device *dev = mp->dev; + struct phy_device *phy = dev->phydev; + + if (speed == 0) { + phy->autoneg = AUTONEG_ENABLE; + phy->speed = 0; + phy->duplex = 0; + linkmode_copy(phy->advertising, phy->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + phy->advertising); + } else { + phy->autoneg = AUTONEG_DISABLE; + linkmode_zero(phy->advertising); + phy->speed = speed; + phy->duplex = duplex; + } + phy_start_aneg(phy); +} + +static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) +{ + struct net_device *dev = mp->dev; + u32 pscr; + + pscr = rdlp(mp, PORT_SERIAL_CONTROL); + if (pscr & SERIAL_PORT_ENABLE) { + pscr &= ~SERIAL_PORT_ENABLE; + wrlp(mp, PORT_SERIAL_CONTROL, pscr); + } + + pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED; + if (!dev->phydev) { + pscr |= DISABLE_AUTO_NEG_SPEED_GMII; + if (speed == SPEED_1000) + pscr |= SET_GMII_SPEED_TO_1000; + else if (speed == SPEED_100) + pscr |= SET_MII_SPEED_TO_100; + + pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL; + + pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX; + if (duplex == DUPLEX_FULL) + pscr |= SET_FULL_DUPLEX_MODE; + } + + wrlp(mp, PORT_SERIAL_CONTROL, pscr); +} + +static const struct net_device_ops mv643xx_eth_netdev_ops = { + .ndo_open = mv643xx_eth_open, + .ndo_stop = mv643xx_eth_stop, + .ndo_start_xmit = mv643xx_eth_xmit, + .ndo_set_rx_mode = mv643xx_eth_set_rx_mode, + .ndo_set_mac_address = mv643xx_eth_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = mv643xx_eth_ioctl, + .ndo_change_mtu = mv643xx_eth_change_mtu, + .ndo_set_features = mv643xx_eth_set_features, + .ndo_tx_timeout = mv643xx_eth_tx_timeout, + .ndo_get_stats = mv643xx_eth_get_stats, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = mv643xx_eth_netpoll, +#endif +}; + +static int mv643xx_eth_probe(struct platform_device *pdev) +{ + struct mv643xx_eth_platform_data *pd; + struct mv643xx_eth_private *mp; + struct net_device *dev; + struct phy_device *phydev = NULL; + struct resource *res; + int err; + + pd = dev_get_platdata(&pdev->dev); + if (pd == NULL) { + dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n"); + return -ENODEV; + } + + if (pd->shared == NULL) { + dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n"); + return -ENODEV; + } + + dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8); + if (!dev) + return -ENOMEM; + + SET_NETDEV_DEV(dev, &pdev->dev); + mp = netdev_priv(dev); + platform_set_drvdata(pdev, mp); + + mp->shared = platform_get_drvdata(pd->shared); + mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10); + mp->port_num = pd->port_number; + + mp->dev = dev; + + /* Kirkwood resets some registers on gated clocks. Especially + * CLK125_BYPASS_EN must be cleared but is not available on + * all other SoCs/System Controllers using this driver. + */ + if (of_device_is_compatible(pdev->dev.of_node, + "marvell,kirkwood-eth-port")) + wrlp(mp, PORT_SERIAL_CONTROL1, + rdlp(mp, PORT_SERIAL_CONTROL1) & ~CLK125_BYPASS_EN); + + /* + * Start with a default rate, and if there is a clock, allow + * it to override the default. + */ + mp->t_clk = 133000000; + mp->clk = devm_clk_get(&pdev->dev, NULL); + if (!IS_ERR(mp->clk)) { + clk_prepare_enable(mp->clk); + mp->t_clk = clk_get_rate(mp->clk); + } else if (!IS_ERR(mp->shared->clk)) { + mp->t_clk = clk_get_rate(mp->shared->clk); + } + + set_params(mp, pd); + netif_set_real_num_tx_queues(dev, mp->txq_count); + netif_set_real_num_rx_queues(dev, mp->rxq_count); + + err = 0; + if (pd->phy_node) { + phydev = of_phy_connect(mp->dev, pd->phy_node, + mv643xx_eth_adjust_link, 0, + get_phy_mode(mp)); + if (!phydev) + err = -ENODEV; + else + phy_addr_set(mp, phydev->mdio.addr); + } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) { + phydev = phy_scan(mp, pd->phy_addr); + + if (IS_ERR(phydev)) + err = PTR_ERR(phydev); + else + phy_init(mp, pd->speed, pd->duplex); + } + if (err == -ENODEV) { + err = -EPROBE_DEFER; + goto out; + } + if (err) + goto out; + + dev->ethtool_ops = &mv643xx_eth_ethtool_ops; + + init_pscr(mp, pd->speed, pd->duplex); + + + mib_counters_clear(mp); + + timer_setup(&mp->mib_counters_timer, mib_counters_timer_wrapper, 0); + mp->mib_counters_timer.expires = jiffies + 30 * HZ; + + spin_lock_init(&mp->mib_counters_lock); + + INIT_WORK(&mp->tx_timeout_task, tx_timeout_task); + + netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT); + + timer_setup(&mp->rx_oom, oom_timer_wrapper, 0); + + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + BUG_ON(!res); + dev->irq = res->start; + + dev->netdev_ops = &mv643xx_eth_netdev_ops; + + dev->watchdog_timeo = 2 * HZ; + dev->base_addr = 0; + + dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; + dev->vlan_features = dev->features; + + dev->features |= NETIF_F_RXCSUM; + dev->hw_features = dev->features; + + dev->priv_flags |= IFF_UNICAST_FLT; + dev->gso_max_segs = MV643XX_MAX_TSO_SEGS; + + /* MTU range: 64 - 9500 */ + dev->min_mtu = 64; + dev->max_mtu = 9500; + + if (mp->shared->win_protect) + wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect); + + netif_carrier_off(dev); + + wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE); + + set_rx_coal(mp, 250); + set_tx_coal(mp, 0); + + err = register_netdev(dev); + if (err) + goto out; + + netdev_notice(dev, "port %d with MAC address %pM\n", + mp->port_num, dev->dev_addr); + + if (mp->tx_desc_sram_size > 0) + netdev_notice(dev, "configured with sram\n"); + + return 0; + +out: + if (!IS_ERR(mp->clk)) + clk_disable_unprepare(mp->clk); + free_netdev(dev); + + return err; +} + +static int mv643xx_eth_remove(struct platform_device *pdev) +{ + struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); + struct net_device *dev = mp->dev; + + unregister_netdev(mp->dev); + if (dev->phydev) + phy_disconnect(dev->phydev); + cancel_work_sync(&mp->tx_timeout_task); + + if (!IS_ERR(mp->clk)) + clk_disable_unprepare(mp->clk); + + free_netdev(mp->dev); + + return 0; +} + +static void mv643xx_eth_shutdown(struct platform_device *pdev) +{ + struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); + + /* Mask all interrupts on ethernet port */ + wrlp(mp, INT_MASK, 0); + rdlp(mp, INT_MASK); + + if (netif_running(mp->dev)) + port_reset(mp); +} + +static struct platform_driver mv643xx_eth_driver = { + .probe = mv643xx_eth_probe, + .remove = mv643xx_eth_remove, + .shutdown = mv643xx_eth_shutdown, + .driver = { + .name = MV643XX_ETH_NAME, + }, +}; + +static struct platform_driver * const drivers[] = { + &mv643xx_eth_shared_driver, + &mv643xx_eth_driver, +}; + +static int __init mv643xx_eth_init_module(void) +{ + return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); +} +module_init(mv643xx_eth_init_module); + +static void __exit mv643xx_eth_cleanup_module(void) +{ + platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); +} +module_exit(mv643xx_eth_cleanup_module); + +MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, " + "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek"); +MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME); +MODULE_ALIAS("platform:" MV643XX_ETH_NAME); diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c new file mode 100644 index 000000000..d14762d93 --- /dev/null +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -0,0 +1,438 @@ +/* + * Driver for the MDIO interface of Marvell network interfaces. + * + * Since the MDIO interface of Marvell network interfaces is shared + * between all network interfaces, having a single driver allows to + * handle concurrent accesses properly (you may have four Ethernet + * ports, but they in fact share the same SMI interface to access + * the MDIO bus). This driver is currently used by the mvneta and + * mv643xx_eth drivers. + * + * Copyright (C) 2012 Marvell + * + * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_mdio.h> +#include <linux/phy.h> +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/wait.h> + +#define MVMDIO_SMI_DATA_SHIFT 0 +#define MVMDIO_SMI_PHY_ADDR_SHIFT 16 +#define MVMDIO_SMI_PHY_REG_SHIFT 21 +#define MVMDIO_SMI_READ_OPERATION BIT(26) +#define MVMDIO_SMI_WRITE_OPERATION 0 +#define MVMDIO_SMI_READ_VALID BIT(27) +#define MVMDIO_SMI_BUSY BIT(28) +#define MVMDIO_ERR_INT_CAUSE 0x007C +#define MVMDIO_ERR_INT_SMI_DONE 0x00000010 +#define MVMDIO_ERR_INT_MASK 0x0080 + +#define MVMDIO_XSMI_MGNT_REG 0x0 +#define MVMDIO_XSMI_PHYADDR_SHIFT 16 +#define MVMDIO_XSMI_DEVADDR_SHIFT 21 +#define MVMDIO_XSMI_WRITE_OPERATION (0x5 << 26) +#define MVMDIO_XSMI_READ_OPERATION (0x7 << 26) +#define MVMDIO_XSMI_READ_VALID BIT(29) +#define MVMDIO_XSMI_BUSY BIT(30) +#define MVMDIO_XSMI_ADDR_REG 0x8 + +/* + * SMI Timeout measurements: + * - Kirkwood 88F6281 (Globalscale Dreamplug): 45us to 95us (Interrupt) + * - Armada 370 (Globalscale Mirabox): 41us to 43us (Polled) + */ +#define MVMDIO_SMI_TIMEOUT 1000 /* 1000us = 1ms */ +#define MVMDIO_SMI_POLL_INTERVAL_MIN 45 +#define MVMDIO_SMI_POLL_INTERVAL_MAX 55 + +#define MVMDIO_XSMI_POLL_INTERVAL_MIN 150 +#define MVMDIO_XSMI_POLL_INTERVAL_MAX 160 + +struct orion_mdio_dev { + void __iomem *regs; + struct clk *clk[4]; + /* + * If we have access to the error interrupt pin (which is + * somewhat misnamed as it not only reflects internal errors + * but also reflects SMI completion), use that to wait for + * SMI access completion instead of polling the SMI busy bit. + */ + int err_interrupt; + wait_queue_head_t smi_busy_wait; +}; + +enum orion_mdio_bus_type { + BUS_TYPE_SMI, + BUS_TYPE_XSMI +}; + +struct orion_mdio_ops { + int (*is_done)(struct orion_mdio_dev *); + unsigned int poll_interval_min; + unsigned int poll_interval_max; +}; + +/* Wait for the SMI unit to be ready for another operation + */ +static int orion_mdio_wait_ready(const struct orion_mdio_ops *ops, + struct mii_bus *bus) +{ + struct orion_mdio_dev *dev = bus->priv; + unsigned long timeout = usecs_to_jiffies(MVMDIO_SMI_TIMEOUT); + unsigned long end = jiffies + timeout; + int timedout = 0; + + while (1) { + if (ops->is_done(dev)) + return 0; + else if (timedout) + break; + + if (dev->err_interrupt <= 0) { + usleep_range(ops->poll_interval_min, + ops->poll_interval_max); + + if (time_is_before_jiffies(end)) + ++timedout; + } else { + /* wait_event_timeout does not guarantee a delay of at + * least one whole jiffie, so timeout must be no less + * than two. + */ + if (timeout < 2) + timeout = 2; + wait_event_timeout(dev->smi_busy_wait, + ops->is_done(dev), timeout); + + ++timedout; + } + } + + dev_err(bus->parent, "Timeout: SMI busy for too long\n"); + return -ETIMEDOUT; +} + +static int orion_mdio_smi_is_done(struct orion_mdio_dev *dev) +{ + return !(readl(dev->regs) & MVMDIO_SMI_BUSY); +} + +static const struct orion_mdio_ops orion_mdio_smi_ops = { + .is_done = orion_mdio_smi_is_done, + .poll_interval_min = MVMDIO_SMI_POLL_INTERVAL_MIN, + .poll_interval_max = MVMDIO_SMI_POLL_INTERVAL_MAX, +}; + +static int orion_mdio_smi_read(struct mii_bus *bus, int mii_id, + int regnum) +{ + struct orion_mdio_dev *dev = bus->priv; + u32 val; + int ret; + + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + + ret = orion_mdio_wait_ready(&orion_mdio_smi_ops, bus); + if (ret < 0) + return ret; + + writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) | + (regnum << MVMDIO_SMI_PHY_REG_SHIFT) | + MVMDIO_SMI_READ_OPERATION), + dev->regs); + + ret = orion_mdio_wait_ready(&orion_mdio_smi_ops, bus); + if (ret < 0) + return ret; + + val = readl(dev->regs); + if (!(val & MVMDIO_SMI_READ_VALID)) { + dev_err(bus->parent, "SMI bus read not valid\n"); + return -ENODEV; + } + + return val & GENMASK(15, 0); +} + +static int orion_mdio_smi_write(struct mii_bus *bus, int mii_id, + int regnum, u16 value) +{ + struct orion_mdio_dev *dev = bus->priv; + int ret; + + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + + ret = orion_mdio_wait_ready(&orion_mdio_smi_ops, bus); + if (ret < 0) + return ret; + + writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) | + (regnum << MVMDIO_SMI_PHY_REG_SHIFT) | + MVMDIO_SMI_WRITE_OPERATION | + (value << MVMDIO_SMI_DATA_SHIFT)), + dev->regs); + + return 0; +} + +static int orion_mdio_xsmi_is_done(struct orion_mdio_dev *dev) +{ + return !(readl(dev->regs + MVMDIO_XSMI_MGNT_REG) & MVMDIO_XSMI_BUSY); +} + +static const struct orion_mdio_ops orion_mdio_xsmi_ops = { + .is_done = orion_mdio_xsmi_is_done, + .poll_interval_min = MVMDIO_XSMI_POLL_INTERVAL_MIN, + .poll_interval_max = MVMDIO_XSMI_POLL_INTERVAL_MAX, +}; + +static int orion_mdio_xsmi_read(struct mii_bus *bus, int mii_id, + int regnum) +{ + struct orion_mdio_dev *dev = bus->priv; + u16 dev_addr = (regnum >> 16) & GENMASK(4, 0); + int ret; + + if (!(regnum & MII_ADDR_C45)) + return -EOPNOTSUPP; + + ret = orion_mdio_wait_ready(&orion_mdio_xsmi_ops, bus); + if (ret < 0) + return ret; + + writel(regnum & GENMASK(15, 0), dev->regs + MVMDIO_XSMI_ADDR_REG); + writel((mii_id << MVMDIO_XSMI_PHYADDR_SHIFT) | + (dev_addr << MVMDIO_XSMI_DEVADDR_SHIFT) | + MVMDIO_XSMI_READ_OPERATION, + dev->regs + MVMDIO_XSMI_MGNT_REG); + + ret = orion_mdio_wait_ready(&orion_mdio_xsmi_ops, bus); + if (ret < 0) + return ret; + + if (!(readl(dev->regs + MVMDIO_XSMI_MGNT_REG) & + MVMDIO_XSMI_READ_VALID)) { + dev_err(bus->parent, "XSMI bus read not valid\n"); + return -ENODEV; + } + + return readl(dev->regs + MVMDIO_XSMI_MGNT_REG) & GENMASK(15, 0); +} + +static int orion_mdio_xsmi_write(struct mii_bus *bus, int mii_id, + int regnum, u16 value) +{ + struct orion_mdio_dev *dev = bus->priv; + u16 dev_addr = (regnum >> 16) & GENMASK(4, 0); + int ret; + + if (!(regnum & MII_ADDR_C45)) + return -EOPNOTSUPP; + + ret = orion_mdio_wait_ready(&orion_mdio_xsmi_ops, bus); + if (ret < 0) + return ret; + + writel(regnum & GENMASK(15, 0), dev->regs + MVMDIO_XSMI_ADDR_REG); + writel((mii_id << MVMDIO_XSMI_PHYADDR_SHIFT) | + (dev_addr << MVMDIO_XSMI_DEVADDR_SHIFT) | + MVMDIO_XSMI_WRITE_OPERATION | value, + dev->regs + MVMDIO_XSMI_MGNT_REG); + + return 0; +} + +static irqreturn_t orion_mdio_err_irq(int irq, void *dev_id) +{ + struct orion_mdio_dev *dev = dev_id; + + if (readl(dev->regs + MVMDIO_ERR_INT_CAUSE) & + MVMDIO_ERR_INT_SMI_DONE) { + writel(~MVMDIO_ERR_INT_SMI_DONE, + dev->regs + MVMDIO_ERR_INT_CAUSE); + wake_up(&dev->smi_busy_wait); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static int orion_mdio_probe(struct platform_device *pdev) +{ + enum orion_mdio_bus_type type; + struct resource *r; + struct mii_bus *bus; + struct orion_mdio_dev *dev; + int i, ret; + + type = (enum orion_mdio_bus_type)of_device_get_match_data(&pdev->dev); + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(&pdev->dev, "No SMI register address given\n"); + return -ENODEV; + } + + bus = devm_mdiobus_alloc_size(&pdev->dev, + sizeof(struct orion_mdio_dev)); + if (!bus) + return -ENOMEM; + + switch (type) { + case BUS_TYPE_SMI: + bus->read = orion_mdio_smi_read; + bus->write = orion_mdio_smi_write; + break; + case BUS_TYPE_XSMI: + bus->read = orion_mdio_xsmi_read; + bus->write = orion_mdio_xsmi_write; + break; + } + + bus->name = "orion_mdio_bus"; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", + dev_name(&pdev->dev)); + bus->parent = &pdev->dev; + + dev = bus->priv; + dev->regs = devm_ioremap(&pdev->dev, r->start, resource_size(r)); + if (!dev->regs) { + dev_err(&pdev->dev, "Unable to remap SMI register\n"); + return -ENODEV; + } + + init_waitqueue_head(&dev->smi_busy_wait); + + if (pdev->dev.of_node) { + for (i = 0; i < ARRAY_SIZE(dev->clk); i++) { + dev->clk[i] = of_clk_get(pdev->dev.of_node, i); + if (PTR_ERR(dev->clk[i]) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto out_clk; + } + if (IS_ERR(dev->clk[i])) + break; + clk_prepare_enable(dev->clk[i]); + } + + if (!IS_ERR(of_clk_get(pdev->dev.of_node, + ARRAY_SIZE(dev->clk)))) + dev_warn(&pdev->dev, + "unsupported number of clocks, limiting to the first " + __stringify(ARRAY_SIZE(dev->clk)) "\n"); + } else { + dev->clk[0] = clk_get(&pdev->dev, NULL); + if (PTR_ERR(dev->clk[0]) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto out_clk; + } + if (!IS_ERR(dev->clk[0])) + clk_prepare_enable(dev->clk[0]); + } + + + dev->err_interrupt = platform_get_irq_optional(pdev, 0); + if (dev->err_interrupt > 0 && + resource_size(r) < MVMDIO_ERR_INT_MASK + 4) { + dev_err(&pdev->dev, + "disabling interrupt, resource size is too small\n"); + dev->err_interrupt = 0; + } + if (dev->err_interrupt > 0) { + ret = devm_request_irq(&pdev->dev, dev->err_interrupt, + orion_mdio_err_irq, + IRQF_SHARED, pdev->name, dev); + if (ret) + goto out_mdio; + + writel(MVMDIO_ERR_INT_SMI_DONE, + dev->regs + MVMDIO_ERR_INT_MASK); + + } else if (dev->err_interrupt == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto out_mdio; + } + + ret = of_mdiobus_register(bus, pdev->dev.of_node); + if (ret < 0) { + dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret); + goto out_mdio; + } + + platform_set_drvdata(pdev, bus); + + return 0; + +out_mdio: + if (dev->err_interrupt > 0) + writel(0, dev->regs + MVMDIO_ERR_INT_MASK); + +out_clk: + for (i = 0; i < ARRAY_SIZE(dev->clk); i++) { + if (IS_ERR(dev->clk[i])) + break; + clk_disable_unprepare(dev->clk[i]); + clk_put(dev->clk[i]); + } + + return ret; +} + +static int orion_mdio_remove(struct platform_device *pdev) +{ + struct mii_bus *bus = platform_get_drvdata(pdev); + struct orion_mdio_dev *dev = bus->priv; + int i; + + if (dev->err_interrupt > 0) + writel(0, dev->regs + MVMDIO_ERR_INT_MASK); + mdiobus_unregister(bus); + + for (i = 0; i < ARRAY_SIZE(dev->clk); i++) { + if (IS_ERR(dev->clk[i])) + break; + clk_disable_unprepare(dev->clk[i]); + clk_put(dev->clk[i]); + } + + return 0; +} + +static const struct of_device_id orion_mdio_match[] = { + { .compatible = "marvell,orion-mdio", .data = (void *)BUS_TYPE_SMI }, + { .compatible = "marvell,xmdio", .data = (void *)BUS_TYPE_XSMI }, + { } +}; +MODULE_DEVICE_TABLE(of, orion_mdio_match); + +static struct platform_driver orion_mdio_driver = { + .probe = orion_mdio_probe, + .remove = orion_mdio_remove, + .driver = { + .name = "orion-mdio", + .of_match_table = orion_mdio_match, + }, +}; + +module_platform_driver(orion_mdio_driver); + +MODULE_DESCRIPTION("Marvell MDIO interface driver"); +MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:orion-mdio"); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c new file mode 100644 index 000000000..3656a3937 --- /dev/null +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -0,0 +1,5506 @@ +/* + * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs. + * + * Copyright (C) 2012 Marvell + * + * Rami Rosen <rosenr@marvell.com> + * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/clk.h> +#include <linux/cpu.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/inetdevice.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/mbus.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/phy/phy.h> +#include <linux/phy.h> +#include <linux/phylink.h> +#include <linux/platform_device.h> +#include <linux/skbuff.h> +#include <net/hwbm.h> +#include "mvneta_bm.h" +#include <net/ip.h> +#include <net/ipv6.h> +#include <net/tso.h> +#include <net/page_pool.h> +#include <linux/bpf_trace.h> + +/* Registers */ +#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) +#define MVNETA_RXQ_HW_BUF_ALLOC BIT(0) +#define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4 +#define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30 +#define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6 +#define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0 +#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) +#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) +#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) +#define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16) +#define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2)) +#define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2)) +#define MVNETA_RXQ_BUF_SIZE_SHIFT 19 +#define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19) +#define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2)) +#define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff +#define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2)) +#define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16 +#define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255 +#define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2)) +#define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3 +#define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8 +#define MVNETA_PORT_RX_RESET 0x1cc0 +#define MVNETA_PORT_RX_DMA_RESET BIT(0) +#define MVNETA_PHY_ADDR 0x2000 +#define MVNETA_PHY_ADDR_MASK 0x1f +#define MVNETA_MBUS_RETRY 0x2010 +#define MVNETA_UNIT_INTR_CAUSE 0x2080 +#define MVNETA_UNIT_CONTROL 0x20B0 +#define MVNETA_PHY_POLLING_ENABLE BIT(1) +#define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3)) +#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) +#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) +#define MVNETA_BASE_ADDR_ENABLE 0x2290 +#define MVNETA_ACCESS_PROTECT_ENABLE 0x2294 +#define MVNETA_PORT_CONFIG 0x2400 +#define MVNETA_UNI_PROMISC_MODE BIT(0) +#define MVNETA_DEF_RXQ(q) ((q) << 1) +#define MVNETA_DEF_RXQ_ARP(q) ((q) << 4) +#define MVNETA_TX_UNSET_ERR_SUM BIT(12) +#define MVNETA_DEF_RXQ_TCP(q) ((q) << 16) +#define MVNETA_DEF_RXQ_UDP(q) ((q) << 19) +#define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22) +#define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25) +#define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \ + MVNETA_DEF_RXQ_ARP(q) | \ + MVNETA_DEF_RXQ_TCP(q) | \ + MVNETA_DEF_RXQ_UDP(q) | \ + MVNETA_DEF_RXQ_BPDU(q) | \ + MVNETA_TX_UNSET_ERR_SUM | \ + MVNETA_RX_CSUM_WITH_PSEUDO_HDR) +#define MVNETA_PORT_CONFIG_EXTEND 0x2404 +#define MVNETA_MAC_ADDR_LOW 0x2414 +#define MVNETA_MAC_ADDR_HIGH 0x2418 +#define MVNETA_SDMA_CONFIG 0x241c +#define MVNETA_SDMA_BRST_SIZE_16 4 +#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1) +#define MVNETA_RX_NO_DATA_SWAP BIT(4) +#define MVNETA_TX_NO_DATA_SWAP BIT(5) +#define MVNETA_DESC_SWAP BIT(6) +#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) +#define MVNETA_PORT_STATUS 0x2444 +#define MVNETA_TX_IN_PRGRS BIT(0) +#define MVNETA_TX_FIFO_EMPTY BIT(8) +#define MVNETA_RX_MIN_FRAME_SIZE 0x247c +/* Only exists on Armada XP and Armada 370 */ +#define MVNETA_SERDES_CFG 0x24A0 +#define MVNETA_SGMII_SERDES_PROTO 0x0cc7 +#define MVNETA_QSGMII_SERDES_PROTO 0x0667 +#define MVNETA_HSGMII_SERDES_PROTO 0x1107 +#define MVNETA_TYPE_PRIO 0x24bc +#define MVNETA_FORCE_UNI BIT(21) +#define MVNETA_TXQ_CMD_1 0x24e4 +#define MVNETA_TXQ_CMD 0x2448 +#define MVNETA_TXQ_DISABLE_SHIFT 8 +#define MVNETA_TXQ_ENABLE_MASK 0x000000ff +#define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484 +#define MVNETA_OVERRUN_FRAME_COUNT 0x2488 +#define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4 +#define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31) +#define MVNETA_ACC_MODE 0x2500 +#define MVNETA_BM_ADDRESS 0x2504 +#define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) +#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff +#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 +#define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq) +#define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8) +#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) + +/* Exception Interrupt Port/Queue Cause register + * + * Their behavior depend of the mapping done using the PCPX2Q + * registers. For a given CPU if the bit associated to a queue is not + * set, then for the register a read from this CPU will always return + * 0 and a write won't do anything + */ + +#define MVNETA_INTR_NEW_CAUSE 0x25a0 +#define MVNETA_INTR_NEW_MASK 0x25a4 + +/* bits 0..7 = TXQ SENT, one bit per queue. + * bits 8..15 = RXQ OCCUP, one bit per queue. + * bits 16..23 = RXQ FREE, one bit per queue. + * bit 29 = OLD_REG_SUM, see old reg ? + * bit 30 = TX_ERR_SUM, one bit for 4 ports + * bit 31 = MISC_SUM, one bit for 4 ports + */ +#define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0) +#define MVNETA_TX_INTR_MASK_ALL (0xff << 0) +#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8) +#define MVNETA_RX_INTR_MASK_ALL (0xff << 8) +#define MVNETA_MISCINTR_INTR_MASK BIT(31) + +#define MVNETA_INTR_OLD_CAUSE 0x25a8 +#define MVNETA_INTR_OLD_MASK 0x25ac + +/* Data Path Port/Queue Cause Register */ +#define MVNETA_INTR_MISC_CAUSE 0x25b0 +#define MVNETA_INTR_MISC_MASK 0x25b4 + +#define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0) +#define MVNETA_CAUSE_LINK_CHANGE BIT(1) +#define MVNETA_CAUSE_PTP BIT(4) + +#define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7) +#define MVNETA_CAUSE_RX_OVERRUN BIT(8) +#define MVNETA_CAUSE_RX_CRC_ERROR BIT(9) +#define MVNETA_CAUSE_RX_LARGE_PKT BIT(10) +#define MVNETA_CAUSE_TX_UNDERUN BIT(11) +#define MVNETA_CAUSE_PRBS_ERR BIT(12) +#define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13) +#define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14) + +#define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16 +#define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT) +#define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool))) + +#define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24 +#define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT) +#define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q))) + +#define MVNETA_INTR_ENABLE 0x25b8 +#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00 +#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff + +#define MVNETA_RXQ_CMD 0x2680 +#define MVNETA_RXQ_DISABLE_SHIFT 8 +#define MVNETA_RXQ_ENABLE_MASK 0x000000ff +#define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4)) +#define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4)) +#define MVNETA_GMAC_CTRL_0 0x2c00 +#define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2 +#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc +#define MVNETA_GMAC0_PORT_1000BASE_X BIT(1) +#define MVNETA_GMAC0_PORT_ENABLE BIT(0) +#define MVNETA_GMAC_CTRL_2 0x2c08 +#define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0) +#define MVNETA_GMAC2_PCS_ENABLE BIT(3) +#define MVNETA_GMAC2_PORT_RGMII BIT(4) +#define MVNETA_GMAC2_PORT_RESET BIT(6) +#define MVNETA_GMAC_STATUS 0x2c10 +#define MVNETA_GMAC_LINK_UP BIT(0) +#define MVNETA_GMAC_SPEED_1000 BIT(1) +#define MVNETA_GMAC_SPEED_100 BIT(2) +#define MVNETA_GMAC_FULL_DUPLEX BIT(3) +#define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4) +#define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5) +#define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6) +#define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7) +#define MVNETA_GMAC_AN_COMPLETE BIT(11) +#define MVNETA_GMAC_SYNC_OK BIT(14) +#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c +#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0) +#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) +#define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2) +#define MVNETA_GMAC_AN_BYPASS_ENABLE BIT(3) +#define MVNETA_GMAC_INBAND_RESTART_AN BIT(4) +#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) +#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) +#define MVNETA_GMAC_AN_SPEED_EN BIT(7) +#define MVNETA_GMAC_CONFIG_FLOW_CTRL BIT(8) +#define MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL BIT(9) +#define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11) +#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) +#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) +#define MVNETA_GMAC_CTRL_4 0x2c90 +#define MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE BIT(1) +#define MVNETA_MIB_COUNTERS_BASE 0x3000 +#define MVNETA_MIB_LATE_COLLISION 0x7c +#define MVNETA_DA_FILT_SPEC_MCAST 0x3400 +#define MVNETA_DA_FILT_OTH_MCAST 0x3500 +#define MVNETA_DA_FILT_UCAST_BASE 0x3600 +#define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2)) +#define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2)) +#define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000 +#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16) +#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) +#define MVNETA_TXQ_DEC_SENT_SHIFT 16 +#define MVNETA_TXQ_DEC_SENT_MASK 0xff +#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) +#define MVNETA_TXQ_SENT_DESC_SHIFT 16 +#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 +#define MVNETA_PORT_TX_RESET 0x3cf0 +#define MVNETA_PORT_TX_DMA_RESET BIT(0) +#define MVNETA_TX_MTU 0x3e0c +#define MVNETA_TX_TOKEN_SIZE 0x3e14 +#define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff +#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2)) +#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff + +#define MVNETA_LPI_CTRL_0 0x2cc0 +#define MVNETA_LPI_CTRL_1 0x2cc4 +#define MVNETA_LPI_REQUEST_ENABLE BIT(0) +#define MVNETA_LPI_CTRL_2 0x2cc8 +#define MVNETA_LPI_STATUS 0x2ccc + +#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff + +/* Descriptor ring Macros */ +#define MVNETA_QUEUE_NEXT_DESC(q, index) \ + (((index) < (q)->last_desc) ? ((index) + 1) : 0) + +/* Various constants */ + +/* Coalescing */ +#define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */ +#define MVNETA_RX_COAL_PKTS 32 +#define MVNETA_RX_COAL_USEC 100 + +/* The two bytes Marvell header. Either contains a special value used + * by Marvell switches when a specific hardware mode is enabled (not + * supported by this driver) or is filled automatically by zeroes on + * the RX side. Those two bytes being at the front of the Ethernet + * header, they allow to have the IP header aligned on a 4 bytes + * boundary automatically: the hardware skips those two bytes on its + * own. + */ +#define MVNETA_MH_SIZE 2 + +#define MVNETA_VLAN_TAG_LEN 4 + +#define MVNETA_TX_CSUM_DEF_SIZE 1600 +#define MVNETA_TX_CSUM_MAX_SIZE 9800 +#define MVNETA_ACC_MODE_EXT1 1 +#define MVNETA_ACC_MODE_EXT2 2 + +#define MVNETA_MAX_DECODE_WIN 6 + +/* Timeout constants */ +#define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000 +#define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000 +#define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000 + +#define MVNETA_TX_MTU_MAX 0x3ffff + +/* The RSS lookup table actually has 256 entries but we do not use + * them yet + */ +#define MVNETA_RSS_LU_TABLE_SIZE 1 + +/* Max number of Rx descriptors */ +#define MVNETA_MAX_RXD 512 + +/* Max number of Tx descriptors */ +#define MVNETA_MAX_TXD 1024 + +/* Max number of allowed TCP segments for software TSO */ +#define MVNETA_MAX_TSO_SEGS 100 + +#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) + +/* descriptor aligned size */ +#define MVNETA_DESC_ALIGNED_SIZE 32 + +/* Number of bytes to be taken into account by HW when putting incoming data + * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet + * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers. + */ +#define MVNETA_RX_PKT_OFFSET_CORRECTION 64 + +#define MVNETA_RX_PKT_SIZE(mtu) \ + ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \ + ETH_HLEN + ETH_FCS_LEN, \ + cache_line_size()) + +/* Driver assumes that the last 3 bits are 0 */ +#define MVNETA_SKB_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +#define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \ + MVNETA_SKB_HEADROOM)) +#define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD) + +#define IS_TSO_HEADER(txq, addr) \ + ((addr >= txq->tso_hdrs_phys) && \ + (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE)) + +#define MVNETA_RX_GET_BM_POOL_ID(rxd) \ + (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT) + +enum { + ETHTOOL_STAT_EEE_WAKEUP, + ETHTOOL_STAT_SKB_ALLOC_ERR, + ETHTOOL_STAT_REFILL_ERR, + ETHTOOL_XDP_REDIRECT, + ETHTOOL_XDP_PASS, + ETHTOOL_XDP_DROP, + ETHTOOL_XDP_TX, + ETHTOOL_XDP_TX_ERR, + ETHTOOL_XDP_XMIT, + ETHTOOL_XDP_XMIT_ERR, + ETHTOOL_MAX_STATS, +}; + +struct mvneta_statistic { + unsigned short offset; + unsigned short type; + const char name[ETH_GSTRING_LEN]; +}; + +#define T_REG_32 32 +#define T_REG_64 64 +#define T_SW 1 + +#define MVNETA_XDP_PASS 0 +#define MVNETA_XDP_DROPPED BIT(0) +#define MVNETA_XDP_TX BIT(1) +#define MVNETA_XDP_REDIR BIT(2) + +static const struct mvneta_statistic mvneta_statistics[] = { + { 0x3000, T_REG_64, "good_octets_received", }, + { 0x3010, T_REG_32, "good_frames_received", }, + { 0x3008, T_REG_32, "bad_octets_received", }, + { 0x3014, T_REG_32, "bad_frames_received", }, + { 0x3018, T_REG_32, "broadcast_frames_received", }, + { 0x301c, T_REG_32, "multicast_frames_received", }, + { 0x3050, T_REG_32, "unrec_mac_control_received", }, + { 0x3058, T_REG_32, "good_fc_received", }, + { 0x305c, T_REG_32, "bad_fc_received", }, + { 0x3060, T_REG_32, "undersize_received", }, + { 0x3064, T_REG_32, "fragments_received", }, + { 0x3068, T_REG_32, "oversize_received", }, + { 0x306c, T_REG_32, "jabber_received", }, + { 0x3070, T_REG_32, "mac_receive_error", }, + { 0x3074, T_REG_32, "bad_crc_event", }, + { 0x3078, T_REG_32, "collision", }, + { 0x307c, T_REG_32, "late_collision", }, + { 0x2484, T_REG_32, "rx_discard", }, + { 0x2488, T_REG_32, "rx_overrun", }, + { 0x3020, T_REG_32, "frames_64_octets", }, + { 0x3024, T_REG_32, "frames_65_to_127_octets", }, + { 0x3028, T_REG_32, "frames_128_to_255_octets", }, + { 0x302c, T_REG_32, "frames_256_to_511_octets", }, + { 0x3030, T_REG_32, "frames_512_to_1023_octets", }, + { 0x3034, T_REG_32, "frames_1024_to_max_octets", }, + { 0x3038, T_REG_64, "good_octets_sent", }, + { 0x3040, T_REG_32, "good_frames_sent", }, + { 0x3044, T_REG_32, "excessive_collision", }, + { 0x3048, T_REG_32, "multicast_frames_sent", }, + { 0x304c, T_REG_32, "broadcast_frames_sent", }, + { 0x3054, T_REG_32, "fc_sent", }, + { 0x300c, T_REG_32, "internal_mac_transmit_err", }, + { ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", }, + { ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", }, + { ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", }, + { ETHTOOL_XDP_REDIRECT, T_SW, "rx_xdp_redirect", }, + { ETHTOOL_XDP_PASS, T_SW, "rx_xdp_pass", }, + { ETHTOOL_XDP_DROP, T_SW, "rx_xdp_drop", }, + { ETHTOOL_XDP_TX, T_SW, "rx_xdp_tx", }, + { ETHTOOL_XDP_TX_ERR, T_SW, "rx_xdp_tx_errors", }, + { ETHTOOL_XDP_XMIT, T_SW, "tx_xdp_xmit", }, + { ETHTOOL_XDP_XMIT_ERR, T_SW, "tx_xdp_xmit_errors", }, +}; + +struct mvneta_stats { + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + /* xdp */ + u64 xdp_redirect; + u64 xdp_pass; + u64 xdp_drop; + u64 xdp_xmit; + u64 xdp_xmit_err; + u64 xdp_tx; + u64 xdp_tx_err; +}; + +struct mvneta_ethtool_stats { + struct mvneta_stats ps; + u64 skb_alloc_error; + u64 refill_error; +}; + +struct mvneta_pcpu_stats { + struct u64_stats_sync syncp; + + struct mvneta_ethtool_stats es; + u64 rx_dropped; + u64 rx_errors; +}; + +struct mvneta_pcpu_port { + /* Pointer to the shared port */ + struct mvneta_port *pp; + + /* Pointer to the CPU-local NAPI struct */ + struct napi_struct napi; + + /* Cause of the previous interrupt */ + u32 cause_rx_tx; +}; + +enum { + __MVNETA_DOWN, +}; + +struct mvneta_port { + u8 id; + struct mvneta_pcpu_port __percpu *ports; + struct mvneta_pcpu_stats __percpu *stats; + + unsigned long state; + + int pkt_size; + void __iomem *base; + struct mvneta_rx_queue *rxqs; + struct mvneta_tx_queue *txqs; + struct net_device *dev; + struct hlist_node node_online; + struct hlist_node node_dead; + int rxq_def; + /* Protect the access to the percpu interrupt registers, + * ensuring that the configuration remains coherent. + */ + spinlock_t lock; + bool is_stopped; + + u32 cause_rx_tx; + struct napi_struct napi; + + struct bpf_prog *xdp_prog; + + /* Core clock */ + struct clk *clk; + /* AXI clock */ + struct clk *clk_bus; + u8 mcast_count[256]; + u16 tx_ring_size; + u16 rx_ring_size; + + phy_interface_t phy_interface; + struct device_node *dn; + unsigned int tx_csum_limit; + struct phylink *phylink; + struct phylink_config phylink_config; + struct phy *comphy; + + struct mvneta_bm *bm_priv; + struct mvneta_bm_pool *pool_long; + struct mvneta_bm_pool *pool_short; + int bm_win_id; + + bool eee_enabled; + bool eee_active; + bool tx_lpi_enabled; + + u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)]; + + u32 indir[MVNETA_RSS_LU_TABLE_SIZE]; + + /* Flags for special SoC configurations */ + bool neta_armada3700; + u16 rx_offset_correction; + const struct mbus_dram_target_info *dram_target_info; +}; + +/* The mvneta_tx_desc and mvneta_rx_desc structures describe the + * layout of the transmit and reception DMA descriptors, and their + * layout is therefore defined by the hardware design + */ + +#define MVNETA_TX_L3_OFF_SHIFT 0 +#define MVNETA_TX_IP_HLEN_SHIFT 8 +#define MVNETA_TX_L4_UDP BIT(16) +#define MVNETA_TX_L3_IP6 BIT(17) +#define MVNETA_TXD_IP_CSUM BIT(18) +#define MVNETA_TXD_Z_PAD BIT(19) +#define MVNETA_TXD_L_DESC BIT(20) +#define MVNETA_TXD_F_DESC BIT(21) +#define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \ + MVNETA_TXD_L_DESC | \ + MVNETA_TXD_F_DESC) +#define MVNETA_TX_L4_CSUM_FULL BIT(30) +#define MVNETA_TX_L4_CSUM_NOT BIT(31) + +#define MVNETA_RXD_ERR_CRC 0x0 +#define MVNETA_RXD_BM_POOL_SHIFT 13 +#define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14)) +#define MVNETA_RXD_ERR_SUMMARY BIT(16) +#define MVNETA_RXD_ERR_OVERRUN BIT(17) +#define MVNETA_RXD_ERR_LEN BIT(18) +#define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18)) +#define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18)) +#define MVNETA_RXD_L3_IP4 BIT(25) +#define MVNETA_RXD_LAST_DESC BIT(26) +#define MVNETA_RXD_FIRST_DESC BIT(27) +#define MVNETA_RXD_FIRST_LAST_DESC (MVNETA_RXD_FIRST_DESC | \ + MVNETA_RXD_LAST_DESC) +#define MVNETA_RXD_L4_CSUM_OK BIT(30) + +#if defined(__LITTLE_ENDIAN) +struct mvneta_tx_desc { + u32 command; /* Options used by HW for packet transmitting.*/ + u16 reserved1; /* csum_l4 (for future use) */ + u16 data_size; /* Data size of transmitted packet in bytes */ + u32 buf_phys_addr; /* Physical addr of transmitted buffer */ + u32 reserved2; /* hw_cmd - (for future use, PMT) */ + u32 reserved3[4]; /* Reserved - (for future use) */ +}; + +struct mvneta_rx_desc { + u32 status; /* Info about received packet */ + u16 reserved1; /* pnc_info - (for future use, PnC) */ + u16 data_size; /* Size of received packet in bytes */ + + u32 buf_phys_addr; /* Physical address of the buffer */ + u32 reserved2; /* pnc_flow_id (for future use, PnC) */ + + u32 buf_cookie; /* cookie for access to RX buffer in rx path */ + u16 reserved3; /* prefetch_cmd, for future use */ + u16 reserved4; /* csum_l4 - (for future use, PnC) */ + + u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ + u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ +}; +#else +struct mvneta_tx_desc { + u16 data_size; /* Data size of transmitted packet in bytes */ + u16 reserved1; /* csum_l4 (for future use) */ + u32 command; /* Options used by HW for packet transmitting.*/ + u32 reserved2; /* hw_cmd - (for future use, PMT) */ + u32 buf_phys_addr; /* Physical addr of transmitted buffer */ + u32 reserved3[4]; /* Reserved - (for future use) */ +}; + +struct mvneta_rx_desc { + u16 data_size; /* Size of received packet in bytes */ + u16 reserved1; /* pnc_info - (for future use, PnC) */ + u32 status; /* Info about received packet */ + + u32 reserved2; /* pnc_flow_id (for future use, PnC) */ + u32 buf_phys_addr; /* Physical address of the buffer */ + + u16 reserved4; /* csum_l4 - (for future use, PnC) */ + u16 reserved3; /* prefetch_cmd, for future use */ + u32 buf_cookie; /* cookie for access to RX buffer in rx path */ + + u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ + u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ +}; +#endif + +enum mvneta_tx_buf_type { + MVNETA_TYPE_SKB, + MVNETA_TYPE_XDP_TX, + MVNETA_TYPE_XDP_NDO, +}; + +struct mvneta_tx_buf { + enum mvneta_tx_buf_type type; + union { + struct xdp_frame *xdpf; + struct sk_buff *skb; + }; +}; + +struct mvneta_tx_queue { + /* Number of this TX queue, in the range 0-7 */ + u8 id; + + /* Number of TX DMA descriptors in the descriptor ring */ + int size; + + /* Number of currently used TX DMA descriptor in the + * descriptor ring + */ + int count; + int pending; + int tx_stop_threshold; + int tx_wake_threshold; + + /* Array of transmitted buffers */ + struct mvneta_tx_buf *buf; + + /* Index of last TX DMA descriptor that was inserted */ + int txq_put_index; + + /* Index of the TX DMA descriptor to be cleaned up */ + int txq_get_index; + + u32 done_pkts_coal; + + /* Virtual address of the TX DMA descriptors array */ + struct mvneta_tx_desc *descs; + + /* DMA address of the TX DMA descriptors array */ + dma_addr_t descs_phys; + + /* Index of the last TX DMA descriptor */ + int last_desc; + + /* Index of the next TX DMA descriptor to process */ + int next_desc_to_proc; + + /* DMA buffers for TSO headers */ + char *tso_hdrs; + + /* DMA address of TSO headers */ + dma_addr_t tso_hdrs_phys; + + /* Affinity mask for CPUs*/ + cpumask_t affinity_mask; +}; + +struct mvneta_rx_queue { + /* rx queue number, in the range 0-7 */ + u8 id; + + /* num of rx descriptors in the rx descriptor ring */ + int size; + + u32 pkts_coal; + u32 time_coal; + + /* page_pool */ + struct page_pool *page_pool; + struct xdp_rxq_info xdp_rxq; + + /* Virtual address of the RX buffer */ + void **buf_virt_addr; + + /* Virtual address of the RX DMA descriptors array */ + struct mvneta_rx_desc *descs; + + /* DMA address of the RX DMA descriptors array */ + dma_addr_t descs_phys; + + /* Index of the last RX DMA descriptor */ + int last_desc; + + /* Index of the next RX DMA descriptor to process */ + int next_desc_to_proc; + + /* Index of first RX DMA descriptor to refill */ + int first_to_refill; + u32 refill_num; +}; + +static enum cpuhp_state online_hpstate; +/* The hardware supports eight (8) rx queues, but we are only allowing + * the first one to be used. Therefore, let's just allocate one queue. + */ +static int rxq_number = 8; +static int txq_number = 8; + +static int rxq_def; + +static int rx_copybreak __read_mostly = 256; + +/* HW BM need that each port be identify by a unique ID */ +static int global_port_id; + +#define MVNETA_DRIVER_NAME "mvneta" +#define MVNETA_DRIVER_VERSION "1.0" + +/* Utility/helper methods */ + +/* Write helper method */ +static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data) +{ + writel(data, pp->base + offset); +} + +/* Read helper method */ +static u32 mvreg_read(struct mvneta_port *pp, u32 offset) +{ + return readl(pp->base + offset); +} + +/* Increment txq get counter */ +static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq) +{ + txq->txq_get_index++; + if (txq->txq_get_index == txq->size) + txq->txq_get_index = 0; +} + +/* Increment txq put counter */ +static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq) +{ + txq->txq_put_index++; + if (txq->txq_put_index == txq->size) + txq->txq_put_index = 0; +} + + +/* Clear all MIB counters */ +static void mvneta_mib_counters_clear(struct mvneta_port *pp) +{ + int i; + + /* Perform dummy reads from MIB counters */ + for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4) + mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); + mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT); + mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT); +} + +/* Get System Network Statistics */ +static void +mvneta_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct mvneta_port *pp = netdev_priv(dev); + unsigned int start; + int cpu; + + for_each_possible_cpu(cpu) { + struct mvneta_pcpu_stats *cpu_stats; + u64 rx_packets; + u64 rx_bytes; + u64 rx_dropped; + u64 rx_errors; + u64 tx_packets; + u64 tx_bytes; + + cpu_stats = per_cpu_ptr(pp->stats, cpu); + do { + start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); + rx_packets = cpu_stats->es.ps.rx_packets; + rx_bytes = cpu_stats->es.ps.rx_bytes; + rx_dropped = cpu_stats->rx_dropped; + rx_errors = cpu_stats->rx_errors; + tx_packets = cpu_stats->es.ps.tx_packets; + tx_bytes = cpu_stats->es.ps.tx_bytes; + } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); + + stats->rx_packets += rx_packets; + stats->rx_bytes += rx_bytes; + stats->rx_dropped += rx_dropped; + stats->rx_errors += rx_errors; + stats->tx_packets += tx_packets; + stats->tx_bytes += tx_bytes; + } + + stats->tx_dropped = dev->stats.tx_dropped; +} + +/* Rx descriptors helper methods */ + +/* Checks whether the RX descriptor having this status is both the first + * and the last descriptor for the RX packet. Each RX packet is currently + * received through a single RX descriptor, so not having each RX + * descriptor with its first and last bits set is an error + */ +static int mvneta_rxq_desc_is_first_last(u32 status) +{ + return (status & MVNETA_RXD_FIRST_LAST_DESC) == + MVNETA_RXD_FIRST_LAST_DESC; +} + +/* Add number of descriptors ready to receive new packets */ +static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq, + int ndescs) +{ + /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can + * be added at once + */ + while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) { + mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), + (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX << + MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); + ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX; + } + + mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), + (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); +} + +/* Get number of RX descriptors occupied by received packets */ +static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq) +{ + u32 val; + + val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); + return val & MVNETA_RXQ_OCCUPIED_ALL_MASK; +} + +/* Update num of rx desc called upon return from rx path or + * from mvneta_rxq_drop_pkts(). + */ +static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq, + int rx_done, int rx_filled) +{ + u32 val; + + if ((rx_done <= 0xff) && (rx_filled <= 0xff)) { + val = rx_done | + (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT); + mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); + return; + } + + /* Only 255 descriptors can be added at once */ + while ((rx_done > 0) || (rx_filled > 0)) { + if (rx_done <= 0xff) { + val = rx_done; + rx_done = 0; + } else { + val = 0xff; + rx_done -= 0xff; + } + if (rx_filled <= 0xff) { + val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; + rx_filled = 0; + } else { + val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; + rx_filled -= 0xff; + } + mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); + } +} + +/* Get pointer to next RX descriptor to be processed by SW */ +static struct mvneta_rx_desc * +mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq) +{ + int rx_desc = rxq->next_desc_to_proc; + + rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc); + prefetch(rxq->descs + rxq->next_desc_to_proc); + return rxq->descs + rx_desc; +} + +/* Change maximum receive size of the port. */ +static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size) +{ + u32 val; + + val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); + val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK; + val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) << + MVNETA_GMAC_MAX_RX_SIZE_SHIFT; + mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); +} + + +/* Set rx queue offset */ +static void mvneta_rxq_offset_set(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq, + int offset) +{ + u32 val; + + val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); + val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK; + + /* Offset is in */ + val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3); + mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); +} + + +/* Tx descriptors helper methods */ + +/* Update HW with number of TX descriptors to be sent */ +static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, + struct mvneta_tx_queue *txq, + int pend_desc) +{ + u32 val; + + pend_desc += txq->pending; + + /* Only 255 Tx descriptors can be added at once */ + do { + val = min(pend_desc, 255); + mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); + pend_desc -= val; + } while (pend_desc > 0); + txq->pending = 0; +} + +/* Get pointer to next TX descriptor to be processed (send) by HW */ +static struct mvneta_tx_desc * +mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq) +{ + int tx_desc = txq->next_desc_to_proc; + + txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc); + return txq->descs + tx_desc; +} + +/* Release the last allocated TX descriptor. Useful to handle DMA + * mapping failures in the TX path. + */ +static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq) +{ + if (txq->next_desc_to_proc == 0) + txq->next_desc_to_proc = txq->last_desc - 1; + else + txq->next_desc_to_proc--; +} + +/* Set rxq buf size */ +static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq, + int buf_size) +{ + u32 val; + + val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); + + val &= ~MVNETA_RXQ_BUF_SIZE_MASK; + val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT); + + mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); +} + +/* Disable buffer management (BM) */ +static void mvneta_rxq_bm_disable(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq) +{ + u32 val; + + val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); + val &= ~MVNETA_RXQ_HW_BUF_ALLOC; + mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); +} + +/* Enable buffer management (BM) */ +static void mvneta_rxq_bm_enable(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq) +{ + u32 val; + + val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); + val |= MVNETA_RXQ_HW_BUF_ALLOC; + mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); +} + +/* Notify HW about port's assignment of pool for bigger packets */ +static void mvneta_rxq_long_pool_set(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq) +{ + u32 val; + + val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); + val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK; + val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT); + + mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); +} + +/* Notify HW about port's assignment of pool for smaller packets */ +static void mvneta_rxq_short_pool_set(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq) +{ + u32 val; + + val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); + val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK; + val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT); + + mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); +} + +/* Set port's receive buffer size for assigned BM pool */ +static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp, + int buf_size, + u8 pool_id) +{ + u32 val; + + if (!IS_ALIGNED(buf_size, 8)) { + dev_warn(pp->dev->dev.parent, + "illegal buf_size value %d, round to %d\n", + buf_size, ALIGN(buf_size, 8)); + buf_size = ALIGN(buf_size, 8); + } + + val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id)); + val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK; + mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val); +} + +/* Configure MBUS window in order to enable access BM internal SRAM */ +static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize, + u8 target, u8 attr) +{ + u32 win_enable, win_protect; + int i; + + win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE); + + if (pp->bm_win_id < 0) { + /* Find first not occupied window */ + for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) { + if (win_enable & (1 << i)) { + pp->bm_win_id = i; + break; + } + } + if (i == MVNETA_MAX_DECODE_WIN) + return -ENOMEM; + } else { + i = pp->bm_win_id; + } + + mvreg_write(pp, MVNETA_WIN_BASE(i), 0); + mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); + + if (i < 4) + mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); + + mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) | + (attr << 8) | target); + + mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000); + + win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE); + win_protect |= 3 << (2 * i); + mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); + + win_enable &= ~(1 << i); + mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); + + return 0; +} + +static int mvneta_bm_port_mbus_init(struct mvneta_port *pp) +{ + u32 wsize; + u8 target, attr; + int err; + + /* Get BM window information */ + err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize, + &target, &attr); + if (err < 0) + return err; + + pp->bm_win_id = -1; + + /* Open NETA -> BM window */ + err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize, + target, attr); + if (err < 0) { + netdev_info(pp->dev, "fail to configure mbus window to BM\n"); + return err; + } + return 0; +} + +/* Assign and initialize pools for port. In case of fail + * buffer manager will remain disabled for current port. + */ +static int mvneta_bm_port_init(struct platform_device *pdev, + struct mvneta_port *pp) +{ + struct device_node *dn = pdev->dev.of_node; + u32 long_pool_id, short_pool_id; + + if (!pp->neta_armada3700) { + int ret; + + ret = mvneta_bm_port_mbus_init(pp); + if (ret) + return ret; + } + + if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) { + netdev_info(pp->dev, "missing long pool id\n"); + return -EINVAL; + } + + /* Create port's long pool depending on mtu */ + pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id, + MVNETA_BM_LONG, pp->id, + MVNETA_RX_PKT_SIZE(pp->dev->mtu)); + if (!pp->pool_long) { + netdev_info(pp->dev, "fail to obtain long pool for port\n"); + return -ENOMEM; + } + + pp->pool_long->port_map |= 1 << pp->id; + + mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size, + pp->pool_long->id); + + /* If short pool id is not defined, assume using single pool */ + if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id)) + short_pool_id = long_pool_id; + + /* Create port's short pool */ + pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id, + MVNETA_BM_SHORT, pp->id, + MVNETA_BM_SHORT_PKT_SIZE); + if (!pp->pool_short) { + netdev_info(pp->dev, "fail to obtain short pool for port\n"); + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); + return -ENOMEM; + } + + if (short_pool_id != long_pool_id) { + pp->pool_short->port_map |= 1 << pp->id; + mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size, + pp->pool_short->id); + } + + return 0; +} + +/* Update settings of a pool for bigger packets */ +static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu) +{ + struct mvneta_bm_pool *bm_pool = pp->pool_long; + struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool; + int num; + + /* Release all buffers from long pool */ + mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id); + if (hwbm_pool->buf_num) { + WARN(1, "cannot free all buffers in pool %d\n", + bm_pool->id); + goto bm_mtu_err; + } + + bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu); + bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size); + hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size)); + + /* Fill entire long pool */ + num = hwbm_pool_add(hwbm_pool, hwbm_pool->size); + if (num != hwbm_pool->size) { + WARN(1, "pool %d: %d of %d allocated\n", + bm_pool->id, num, hwbm_pool->size); + goto bm_mtu_err; + } + mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id); + + return; + +bm_mtu_err: + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id); + + pp->bm_priv = NULL; + pp->rx_offset_correction = MVNETA_SKB_HEADROOM; + mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1); + netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n"); +} + +/* Start the Ethernet port RX and TX activity */ +static void mvneta_port_up(struct mvneta_port *pp) +{ + int queue; + u32 q_map; + + /* Enable all initialized TXs. */ + q_map = 0; + for (queue = 0; queue < txq_number; queue++) { + struct mvneta_tx_queue *txq = &pp->txqs[queue]; + if (txq->descs) + q_map |= (1 << queue); + } + mvreg_write(pp, MVNETA_TXQ_CMD, q_map); + + q_map = 0; + /* Enable all initialized RXQs. */ + for (queue = 0; queue < rxq_number; queue++) { + struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; + + if (rxq->descs) + q_map |= (1 << queue); + } + mvreg_write(pp, MVNETA_RXQ_CMD, q_map); +} + +/* Stop the Ethernet port activity */ +static void mvneta_port_down(struct mvneta_port *pp) +{ + u32 val; + int count; + + /* Stop Rx port activity. Check port Rx activity. */ + val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK; + + /* Issue stop command for active channels only */ + if (val != 0) + mvreg_write(pp, MVNETA_RXQ_CMD, + val << MVNETA_RXQ_DISABLE_SHIFT); + + /* Wait for all Rx activity to terminate. */ + count = 0; + do { + if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) { + netdev_warn(pp->dev, + "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n", + val); + break; + } + mdelay(1); + + val = mvreg_read(pp, MVNETA_RXQ_CMD); + } while (val & MVNETA_RXQ_ENABLE_MASK); + + /* Stop Tx port activity. Check port Tx activity. Issue stop + * command for active channels only + */ + val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; + + if (val != 0) + mvreg_write(pp, MVNETA_TXQ_CMD, + (val << MVNETA_TXQ_DISABLE_SHIFT)); + + /* Wait for all Tx activity to terminate. */ + count = 0; + do { + if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) { + netdev_warn(pp->dev, + "TIMEOUT for TX stopped status=0x%08x\n", + val); + break; + } + mdelay(1); + + /* Check TX Command reg that all Txqs are stopped */ + val = mvreg_read(pp, MVNETA_TXQ_CMD); + + } while (val & MVNETA_TXQ_ENABLE_MASK); + + /* Double check to verify that TX FIFO is empty */ + count = 0; + do { + if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) { + netdev_warn(pp->dev, + "TX FIFO empty timeout status=0x%08x\n", + val); + break; + } + mdelay(1); + + val = mvreg_read(pp, MVNETA_PORT_STATUS); + } while (!(val & MVNETA_TX_FIFO_EMPTY) && + (val & MVNETA_TX_IN_PRGRS)); + + udelay(200); +} + +/* Enable the port by setting the port enable bit of the MAC control register */ +static void mvneta_port_enable(struct mvneta_port *pp) +{ + u32 val; + + /* Enable port */ + val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); + val |= MVNETA_GMAC0_PORT_ENABLE; + mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); +} + +/* Disable the port and wait for about 200 usec before retuning */ +static void mvneta_port_disable(struct mvneta_port *pp) +{ + u32 val; + + /* Reset the Enable bit in the Serial Control Register */ + val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); + val &= ~MVNETA_GMAC0_PORT_ENABLE; + mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); + + udelay(200); +} + +/* Multicast tables methods */ + +/* Set all entries in Unicast MAC Table; queue==-1 means reject all */ +static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) +{ + int offset; + u32 val; + + if (queue == -1) { + val = 0; + } else { + val = 0x1 | (queue << 1); + val |= (val << 24) | (val << 16) | (val << 8); + } + + for (offset = 0; offset <= 0xc; offset += 4) + mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val); +} + +/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */ +static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) +{ + int offset; + u32 val; + + if (queue == -1) { + val = 0; + } else { + val = 0x1 | (queue << 1); + val |= (val << 24) | (val << 16) | (val << 8); + } + + for (offset = 0; offset <= 0xfc; offset += 4) + mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val); + +} + +/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */ +static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) +{ + int offset; + u32 val; + + if (queue == -1) { + memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); + val = 0; + } else { + memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); + val = 0x1 | (queue << 1); + val |= (val << 24) | (val << 16) | (val << 8); + } + + for (offset = 0; offset <= 0xfc; offset += 4) + mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); +} + +static void mvneta_percpu_unmask_interrupt(void *arg) +{ + struct mvneta_port *pp = arg; + + /* All the queue are unmasked, but actually only the ones + * mapped to this CPU will be unmasked + */ + mvreg_write(pp, MVNETA_INTR_NEW_MASK, + MVNETA_RX_INTR_MASK_ALL | + MVNETA_TX_INTR_MASK_ALL | + MVNETA_MISCINTR_INTR_MASK); +} + +static void mvneta_percpu_mask_interrupt(void *arg) +{ + struct mvneta_port *pp = arg; + + /* All the queue are masked, but actually only the ones + * mapped to this CPU will be masked + */ + mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); + mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); + mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); +} + +static void mvneta_percpu_clear_intr_cause(void *arg) +{ + struct mvneta_port *pp = arg; + + /* All the queue are cleared, but actually only the ones + * mapped to this CPU will be cleared + */ + mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); + mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); + mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); +} + +/* This method sets defaults to the NETA port: + * Clears interrupt Cause and Mask registers. + * Clears all MAC tables. + * Sets defaults to all registers. + * Resets RX and TX descriptor rings. + * Resets PHY. + * This method can be called after mvneta_port_down() to return the port + * settings to defaults. + */ +static void mvneta_defaults_set(struct mvneta_port *pp) +{ + int cpu; + int queue; + u32 val; + int max_cpu = num_present_cpus(); + + /* Clear all Cause registers */ + on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); + + /* Mask all interrupts */ + on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); + mvreg_write(pp, MVNETA_INTR_ENABLE, 0); + + /* Enable MBUS Retry bit16 */ + mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); + + /* Set CPU queue access map. CPUs are assigned to the RX and + * TX queues modulo their number. If there is only one TX + * queue then it is assigned to the CPU associated to the + * default RX queue. + */ + for_each_present_cpu(cpu) { + int rxq_map = 0, txq_map = 0; + int rxq, txq; + if (!pp->neta_armada3700) { + for (rxq = 0; rxq < rxq_number; rxq++) + if ((rxq % max_cpu) == cpu) + rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); + + for (txq = 0; txq < txq_number; txq++) + if ((txq % max_cpu) == cpu) + txq_map |= MVNETA_CPU_TXQ_ACCESS(txq); + + /* With only one TX queue we configure a special case + * which will allow to get all the irq on a single + * CPU + */ + if (txq_number == 1) + txq_map = (cpu == pp->rxq_def) ? + MVNETA_CPU_TXQ_ACCESS(0) : 0; + + } else { + txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK; + rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK; + } + + mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); + } + + /* Reset RX and TX DMAs */ + mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); + mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); + + /* Disable Legacy WRR, Disable EJP, Release from reset */ + mvreg_write(pp, MVNETA_TXQ_CMD_1, 0); + for (queue = 0; queue < txq_number; queue++) { + mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0); + mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0); + } + + mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); + mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); + + /* Set Port Acceleration Mode */ + if (pp->bm_priv) + /* HW buffer management + legacy parser */ + val = MVNETA_ACC_MODE_EXT2; + else + /* SW buffer management + legacy parser */ + val = MVNETA_ACC_MODE_EXT1; + mvreg_write(pp, MVNETA_ACC_MODE, val); + + if (pp->bm_priv) + mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr); + + /* Update val of portCfg register accordingly with all RxQueue types */ + val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); + mvreg_write(pp, MVNETA_PORT_CONFIG, val); + + val = 0; + mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val); + mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64); + + /* Build PORT_SDMA_CONFIG_REG */ + val = 0; + + /* Default burst size */ + val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); + val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); + val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP; + +#if defined(__BIG_ENDIAN) + val |= MVNETA_DESC_SWAP; +#endif + + /* Assign port SDMA configuration */ + mvreg_write(pp, MVNETA_SDMA_CONFIG, val); + + /* Disable PHY polling in hardware, since we're using the + * kernel phylib to do this. + */ + val = mvreg_read(pp, MVNETA_UNIT_CONTROL); + val &= ~MVNETA_PHY_POLLING_ENABLE; + mvreg_write(pp, MVNETA_UNIT_CONTROL, val); + + mvneta_set_ucast_table(pp, -1); + mvneta_set_special_mcast_table(pp, -1); + mvneta_set_other_mcast_table(pp, -1); + + /* Set port interrupt enable register - default enable all */ + mvreg_write(pp, MVNETA_INTR_ENABLE, + (MVNETA_RXQ_INTR_ENABLE_ALL_MASK + | MVNETA_TXQ_INTR_ENABLE_ALL_MASK)); + + mvneta_mib_counters_clear(pp); +} + +/* Set max sizes for tx queues */ +static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size) + +{ + u32 val, size, mtu; + int queue; + + mtu = max_tx_size * 8; + if (mtu > MVNETA_TX_MTU_MAX) + mtu = MVNETA_TX_MTU_MAX; + + /* Set MTU */ + val = mvreg_read(pp, MVNETA_TX_MTU); + val &= ~MVNETA_TX_MTU_MAX; + val |= mtu; + mvreg_write(pp, MVNETA_TX_MTU, val); + + /* TX token size and all TXQs token size must be larger that MTU */ + val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE); + + size = val & MVNETA_TX_TOKEN_SIZE_MAX; + if (size < mtu) { + size = mtu; + val &= ~MVNETA_TX_TOKEN_SIZE_MAX; + val |= size; + mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val); + } + for (queue = 0; queue < txq_number; queue++) { + val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue)); + + size = val & MVNETA_TXQ_TOKEN_SIZE_MAX; + if (size < mtu) { + size = mtu; + val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX; + val |= size; + mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val); + } + } +} + +/* Set unicast address */ +static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, + int queue) +{ + unsigned int unicast_reg; + unsigned int tbl_offset; + unsigned int reg_offset; + + /* Locate the Unicast table entry */ + last_nibble = (0xf & last_nibble); + + /* offset from unicast tbl base */ + tbl_offset = (last_nibble / 4) * 4; + + /* offset within the above reg */ + reg_offset = last_nibble % 4; + + unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset)); + + if (queue == -1) { + /* Clear accepts frame bit at specified unicast DA tbl entry */ + unicast_reg &= ~(0xff << (8 * reg_offset)); + } else { + unicast_reg &= ~(0xff << (8 * reg_offset)); + unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); + } + + mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg); +} + +/* Set mac address */ +static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr, + int queue) +{ + unsigned int mac_h; + unsigned int mac_l; + + if (queue != -1) { + mac_l = (addr[4] << 8) | (addr[5]); + mac_h = (addr[0] << 24) | (addr[1] << 16) | + (addr[2] << 8) | (addr[3] << 0); + + mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l); + mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h); + } + + /* Accept frames of this address */ + mvneta_set_ucast_addr(pp, addr[5], queue); +} + +/* Set the number of packets that will be received before RX interrupt + * will be generated by HW. + */ +static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq, u32 value) +{ + mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), + value | MVNETA_RXQ_NON_OCCUPIED(0)); +} + +/* Set the time delay in usec before RX interrupt will be generated by + * HW. + */ +static void mvneta_rx_time_coal_set(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq, u32 value) +{ + u32 val; + unsigned long clk_rate; + + clk_rate = clk_get_rate(pp->clk); + val = (clk_rate / 1000000) * value; + + mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val); +} + +/* Set threshold for TX_DONE pkts coalescing */ +static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp, + struct mvneta_tx_queue *txq, u32 value) +{ + u32 val; + + val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id)); + + val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK; + val |= MVNETA_TXQ_SENT_THRESH_MASK(value); + + mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val); +} + +/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */ +static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc, + u32 phys_addr, void *virt_addr, + struct mvneta_rx_queue *rxq) +{ + int i; + + rx_desc->buf_phys_addr = phys_addr; + i = rx_desc - rxq->descs; + rxq->buf_virt_addr[i] = virt_addr; +} + +/* Decrement sent descriptors counter */ +static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp, + struct mvneta_tx_queue *txq, + int sent_desc) +{ + u32 val; + + /* Only 255 TX descriptors can be updated at once */ + while (sent_desc > 0xff) { + val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT; + mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); + sent_desc = sent_desc - 0xff; + } + + val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT; + mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); +} + +/* Get number of TX descriptors already sent by HW */ +static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) +{ + u32 val; + int sent_desc; + + val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); + sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >> + MVNETA_TXQ_SENT_DESC_SHIFT; + + return sent_desc; +} + +/* Get number of sent descriptors and decrement counter. + * The number of sent descriptors is returned. + */ +static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) +{ + int sent_desc; + + /* Get number of sent descriptors */ + sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); + + /* Decrement sent descriptors counter */ + if (sent_desc) + mvneta_txq_sent_desc_dec(pp, txq, sent_desc); + + return sent_desc; +} + +/* Set TXQ descriptors fields relevant for CSUM calculation */ +static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto, + int ip_hdr_len, int l4_proto) +{ + u32 command; + + /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, + * G_L4_chk, L4_type; required only for checksum + * calculation + */ + command = l3_offs << MVNETA_TX_L3_OFF_SHIFT; + command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT; + + if (l3_proto == htons(ETH_P_IP)) + command |= MVNETA_TXD_IP_CSUM; + else + command |= MVNETA_TX_L3_IP6; + + if (l4_proto == IPPROTO_TCP) + command |= MVNETA_TX_L4_CSUM_FULL; + else if (l4_proto == IPPROTO_UDP) + command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL; + else + command |= MVNETA_TX_L4_CSUM_NOT; + + return command; +} + + +/* Display more error info */ +static void mvneta_rx_error(struct mvneta_port *pp, + struct mvneta_rx_desc *rx_desc) +{ + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); + u32 status = rx_desc->status; + + /* update per-cpu counter */ + u64_stats_update_begin(&stats->syncp); + stats->rx_errors++; + u64_stats_update_end(&stats->syncp); + + switch (status & MVNETA_RXD_ERR_CODE_MASK) { + case MVNETA_RXD_ERR_CRC: + netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", + status, rx_desc->data_size); + break; + case MVNETA_RXD_ERR_OVERRUN: + netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n", + status, rx_desc->data_size); + break; + case MVNETA_RXD_ERR_LEN: + netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n", + status, rx_desc->data_size); + break; + case MVNETA_RXD_ERR_RESOURCE: + netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n", + status, rx_desc->data_size); + break; + } +} + +/* Handle RX checksum offload based on the descriptor's status */ +static void mvneta_rx_csum(struct mvneta_port *pp, u32 status, + struct sk_buff *skb) +{ + if ((pp->dev->features & NETIF_F_RXCSUM) && + (status & MVNETA_RXD_L3_IP4) && + (status & MVNETA_RXD_L4_CSUM_OK)) { + skb->csum = 0; + skb->ip_summed = CHECKSUM_UNNECESSARY; + return; + } + + skb->ip_summed = CHECKSUM_NONE; +} + +/* Return tx queue pointer (find last set bit) according to <cause> returned + * form tx_done reg. <cause> must not be null. The return value is always a + * valid queue for matching the first one found in <cause>. + */ +static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp, + u32 cause) +{ + int queue = fls(cause) - 1; + + return &pp->txqs[queue]; +} + +/* Free tx queue skbuffs */ +static void mvneta_txq_bufs_free(struct mvneta_port *pp, + struct mvneta_tx_queue *txq, int num, + struct netdev_queue *nq, bool napi) +{ + unsigned int bytes_compl = 0, pkts_compl = 0; + int i; + + for (i = 0; i < num; i++) { + struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index]; + struct mvneta_tx_desc *tx_desc = txq->descs + + txq->txq_get_index; + + mvneta_txq_inc_get(txq); + + if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr) && + buf->type != MVNETA_TYPE_XDP_TX) + dma_unmap_single(pp->dev->dev.parent, + tx_desc->buf_phys_addr, + tx_desc->data_size, DMA_TO_DEVICE); + if (buf->type == MVNETA_TYPE_SKB && buf->skb) { + bytes_compl += buf->skb->len; + pkts_compl++; + dev_kfree_skb_any(buf->skb); + } else if (buf->type == MVNETA_TYPE_XDP_TX || + buf->type == MVNETA_TYPE_XDP_NDO) { + if (napi && buf->type == MVNETA_TYPE_XDP_TX) + xdp_return_frame_rx_napi(buf->xdpf); + else + xdp_return_frame(buf->xdpf); + } + } + + netdev_tx_completed_queue(nq, pkts_compl, bytes_compl); +} + +/* Handle end of transmission */ +static void mvneta_txq_done(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) +{ + struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); + int tx_done; + + tx_done = mvneta_txq_sent_desc_proc(pp, txq); + if (!tx_done) + return; + + mvneta_txq_bufs_free(pp, txq, tx_done, nq, true); + + txq->count -= tx_done; + + if (netif_tx_queue_stopped(nq)) { + if (txq->count <= txq->tx_wake_threshold) + netif_tx_wake_queue(nq); + } +} + +/* Refill processing for SW buffer management */ +/* Allocate page per descriptor */ +static int mvneta_rx_refill(struct mvneta_port *pp, + struct mvneta_rx_desc *rx_desc, + struct mvneta_rx_queue *rxq, + gfp_t gfp_mask) +{ + dma_addr_t phys_addr; + struct page *page; + + page = page_pool_alloc_pages(rxq->page_pool, + gfp_mask | __GFP_NOWARN); + if (!page) + return -ENOMEM; + + phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction; + mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq); + + return 0; +} + +/* Handle tx checksum */ +static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb) +{ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + int ip_hdr_len = 0; + __be16 l3_proto = vlan_get_protocol(skb); + u8 l4_proto; + + if (l3_proto == htons(ETH_P_IP)) { + struct iphdr *ip4h = ip_hdr(skb); + + /* Calculate IPv4 checksum and L4 checksum */ + ip_hdr_len = ip4h->ihl; + l4_proto = ip4h->protocol; + } else if (l3_proto == htons(ETH_P_IPV6)) { + struct ipv6hdr *ip6h = ipv6_hdr(skb); + + /* Read l4_protocol from one of IPv6 extra headers */ + if (skb_network_header_len(skb) > 0) + ip_hdr_len = (skb_network_header_len(skb) >> 2); + l4_proto = ip6h->nexthdr; + } else + return MVNETA_TX_L4_CSUM_NOT; + + return mvneta_txq_desc_csum(skb_network_offset(skb), + l3_proto, ip_hdr_len, l4_proto); + } + + return MVNETA_TX_L4_CSUM_NOT; +} + +/* Drop packets received by the RXQ and free buffers */ +static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq) +{ + int rx_done, i; + + rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); + if (rx_done) + mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); + + if (pp->bm_priv) { + for (i = 0; i < rx_done; i++) { + struct mvneta_rx_desc *rx_desc = + mvneta_rxq_next_desc_get(rxq); + u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc); + struct mvneta_bm_pool *bm_pool; + + bm_pool = &pp->bm_priv->bm_pools[pool_id]; + /* Return dropped buffer to the pool */ + mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, + rx_desc->buf_phys_addr); + } + return; + } + + for (i = 0; i < rxq->size; i++) { + struct mvneta_rx_desc *rx_desc = rxq->descs + i; + void *data = rxq->buf_virt_addr[i]; + if (!data || !(rx_desc->buf_phys_addr)) + continue; + + page_pool_put_full_page(rxq->page_pool, data, false); + } + if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) + xdp_rxq_info_unreg(&rxq->xdp_rxq); + page_pool_destroy(rxq->page_pool); + rxq->page_pool = NULL; +} + +static void +mvneta_update_stats(struct mvneta_port *pp, + struct mvneta_stats *ps) +{ + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); + + u64_stats_update_begin(&stats->syncp); + stats->es.ps.rx_packets += ps->rx_packets; + stats->es.ps.rx_bytes += ps->rx_bytes; + /* xdp */ + stats->es.ps.xdp_redirect += ps->xdp_redirect; + stats->es.ps.xdp_pass += ps->xdp_pass; + stats->es.ps.xdp_drop += ps->xdp_drop; + u64_stats_update_end(&stats->syncp); +} + +static inline +int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) +{ + struct mvneta_rx_desc *rx_desc; + int curr_desc = rxq->first_to_refill; + int i; + + for (i = 0; (i < rxq->refill_num) && (i < 64); i++) { + rx_desc = rxq->descs + curr_desc; + if (!(rx_desc->buf_phys_addr)) { + if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) { + struct mvneta_pcpu_stats *stats; + + pr_err("Can't refill queue %d. Done %d from %d\n", + rxq->id, i, rxq->refill_num); + + stats = this_cpu_ptr(pp->stats); + u64_stats_update_begin(&stats->syncp); + stats->es.refill_error++; + u64_stats_update_end(&stats->syncp); + break; + } + } + curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc); + } + rxq->refill_num -= i; + rxq->first_to_refill = curr_desc; + + return i; +} + +static void +mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, + struct xdp_buff *xdp, int sync_len, bool napi) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); + int i; + + for (i = 0; i < sinfo->nr_frags; i++) + page_pool_put_full_page(rxq->page_pool, + skb_frag_page(&sinfo->frags[i]), napi); + page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data), + sync_len, napi); +} + +static int +mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq, + struct xdp_frame *xdpf, bool dma_map) +{ + struct mvneta_tx_desc *tx_desc; + struct mvneta_tx_buf *buf; + dma_addr_t dma_addr; + + if (txq->count >= txq->tx_stop_threshold) + return MVNETA_XDP_DROPPED; + + tx_desc = mvneta_txq_next_desc_get(txq); + + buf = &txq->buf[txq->txq_put_index]; + if (dma_map) { + /* ndo_xdp_xmit */ + dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data, + xdpf->len, DMA_TO_DEVICE); + if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) { + mvneta_txq_desc_put(txq); + return MVNETA_XDP_DROPPED; + } + buf->type = MVNETA_TYPE_XDP_NDO; + } else { + struct page *page = virt_to_page(xdpf->data); + + dma_addr = page_pool_get_dma_addr(page) + + sizeof(*xdpf) + xdpf->headroom; + dma_sync_single_for_device(pp->dev->dev.parent, dma_addr, + xdpf->len, DMA_BIDIRECTIONAL); + buf->type = MVNETA_TYPE_XDP_TX; + } + buf->xdpf = xdpf; + + tx_desc->command = MVNETA_TXD_FLZ_DESC; + tx_desc->buf_phys_addr = dma_addr; + tx_desc->data_size = xdpf->len; + + mvneta_txq_inc_put(txq); + txq->pending++; + txq->count++; + + return MVNETA_XDP_TX; +} + +static int +mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp) +{ + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); + struct mvneta_tx_queue *txq; + struct netdev_queue *nq; + struct xdp_frame *xdpf; + int cpu; + u32 ret; + + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) + return MVNETA_XDP_DROPPED; + + cpu = smp_processor_id(); + txq = &pp->txqs[cpu % txq_number]; + nq = netdev_get_tx_queue(pp->dev, txq->id); + + __netif_tx_lock(nq, cpu); + ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false); + if (ret == MVNETA_XDP_TX) { + u64_stats_update_begin(&stats->syncp); + stats->es.ps.tx_bytes += xdpf->len; + stats->es.ps.tx_packets++; + stats->es.ps.xdp_tx++; + u64_stats_update_end(&stats->syncp); + + mvneta_txq_pend_desc_add(pp, txq, 0); + } else { + u64_stats_update_begin(&stats->syncp); + stats->es.ps.xdp_tx_err++; + u64_stats_update_end(&stats->syncp); + } + __netif_tx_unlock(nq); + + return ret; +} + +static int +mvneta_xdp_xmit(struct net_device *dev, int num_frame, + struct xdp_frame **frames, u32 flags) +{ + struct mvneta_port *pp = netdev_priv(dev); + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); + int i, nxmit_byte = 0, nxmit = num_frame; + int cpu = smp_processor_id(); + struct mvneta_tx_queue *txq; + struct netdev_queue *nq; + u32 ret; + + if (unlikely(test_bit(__MVNETA_DOWN, &pp->state))) + return -ENETDOWN; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + txq = &pp->txqs[cpu % txq_number]; + nq = netdev_get_tx_queue(pp->dev, txq->id); + + __netif_tx_lock(nq, cpu); + for (i = 0; i < num_frame; i++) { + ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true); + if (ret == MVNETA_XDP_TX) { + nxmit_byte += frames[i]->len; + } else { + xdp_return_frame_rx_napi(frames[i]); + nxmit--; + } + } + + if (unlikely(flags & XDP_XMIT_FLUSH)) + mvneta_txq_pend_desc_add(pp, txq, 0); + __netif_tx_unlock(nq); + + u64_stats_update_begin(&stats->syncp); + stats->es.ps.tx_bytes += nxmit_byte; + stats->es.ps.tx_packets += nxmit; + stats->es.ps.xdp_xmit += nxmit; + stats->es.ps.xdp_xmit_err += num_frame - nxmit; + u64_stats_update_end(&stats->syncp); + + return nxmit; +} + +static int +mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, + struct bpf_prog *prog, struct xdp_buff *xdp, + u32 frame_sz, struct mvneta_stats *stats) +{ + unsigned int len, data_len, sync; + u32 ret, act; + + len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; + data_len = xdp->data_end - xdp->data; + act = bpf_prog_run_xdp(prog, xdp); + + /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ + sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; + sync = max(sync, len); + + switch (act) { + case XDP_PASS: + stats->xdp_pass++; + return MVNETA_XDP_PASS; + case XDP_REDIRECT: { + int err; + + err = xdp_do_redirect(pp->dev, xdp, prog); + if (unlikely(err)) { + mvneta_xdp_put_buff(pp, rxq, xdp, sync, true); + ret = MVNETA_XDP_DROPPED; + } else { + ret = MVNETA_XDP_REDIR; + stats->xdp_redirect++; + } + break; + } + case XDP_TX: + ret = mvneta_xdp_xmit_back(pp, xdp); + if (ret != MVNETA_XDP_TX) + mvneta_xdp_put_buff(pp, rxq, xdp, sync, true); + break; + default: + bpf_warn_invalid_xdp_action(act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(pp->dev, prog, act); + fallthrough; + case XDP_DROP: + mvneta_xdp_put_buff(pp, rxq, xdp, sync, true); + ret = MVNETA_XDP_DROPPED; + stats->xdp_drop++; + break; + } + + stats->rx_bytes += frame_sz + xdp->data_end - xdp->data - data_len; + stats->rx_packets++; + + return ret; +} + +static void +mvneta_swbm_rx_frame(struct mvneta_port *pp, + struct mvneta_rx_desc *rx_desc, + struct mvneta_rx_queue *rxq, + struct xdp_buff *xdp, int *size, + struct page *page) +{ + unsigned char *data = page_address(page); + int data_len = -MVNETA_MH_SIZE, len; + struct net_device *dev = pp->dev; + enum dma_data_direction dma_dir; + struct skb_shared_info *sinfo; + + if (*size > MVNETA_MAX_RX_BUF_SIZE) { + len = MVNETA_MAX_RX_BUF_SIZE; + data_len += len; + } else { + len = *size; + data_len += len - ETH_FCS_LEN; + } + *size = *size - len; + + dma_dir = page_pool_get_dma_dir(rxq->page_pool); + dma_sync_single_for_cpu(dev->dev.parent, + rx_desc->buf_phys_addr, + len, dma_dir); + + rx_desc->buf_phys_addr = 0; + + /* Prefetch header */ + prefetch(data); + + xdp->data_hard_start = data; + xdp->data = data + pp->rx_offset_correction + MVNETA_MH_SIZE; + xdp->data_end = xdp->data + data_len; + xdp_set_data_meta_invalid(xdp); + + sinfo = xdp_get_shared_info_from_buff(xdp); + sinfo->nr_frags = 0; +} + +static void +mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, + struct mvneta_rx_desc *rx_desc, + struct mvneta_rx_queue *rxq, + struct xdp_buff *xdp, int *size, + struct page *page) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); + struct net_device *dev = pp->dev; + enum dma_data_direction dma_dir; + int data_len, len; + + if (*size > MVNETA_MAX_RX_BUF_SIZE) { + len = MVNETA_MAX_RX_BUF_SIZE; + data_len = len; + } else { + len = *size; + data_len = len - ETH_FCS_LEN; + } + dma_dir = page_pool_get_dma_dir(rxq->page_pool); + dma_sync_single_for_cpu(dev->dev.parent, + rx_desc->buf_phys_addr, + len, dma_dir); + rx_desc->buf_phys_addr = 0; + + if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) { + skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags]; + + skb_frag_off_set(frag, pp->rx_offset_correction); + skb_frag_size_set(frag, data_len); + __skb_frag_set_page(frag, page); + sinfo->nr_frags++; + } else { + page_pool_put_full_page(rxq->page_pool, page, true); + } + *size -= len; +} + +static struct sk_buff * +mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, + struct xdp_buff *xdp, u32 desc_status) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); + int i, num_frags = sinfo->nr_frags; + struct sk_buff *skb; + + skb = build_skb(xdp->data_hard_start, PAGE_SIZE); + if (!skb) + return ERR_PTR(-ENOMEM); + + page_pool_release_page(rxq->page_pool, virt_to_page(xdp->data)); + + skb_reserve(skb, xdp->data - xdp->data_hard_start); + skb_put(skb, xdp->data_end - xdp->data); + mvneta_rx_csum(pp, desc_status, skb); + + for (i = 0; i < num_frags; i++) { + skb_frag_t *frag = &sinfo->frags[i]; + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + skb_frag_page(frag), skb_frag_off(frag), + skb_frag_size(frag), PAGE_SIZE); + page_pool_release_page(rxq->page_pool, skb_frag_page(frag)); + } + + return skb; +} + +/* Main rx processing when using software buffer management */ +static int mvneta_rx_swbm(struct napi_struct *napi, + struct mvneta_port *pp, int budget, + struct mvneta_rx_queue *rxq) +{ + int rx_proc = 0, rx_todo, refill, size = 0; + struct net_device *dev = pp->dev; + struct xdp_buff xdp_buf = { + .frame_sz = PAGE_SIZE, + .rxq = &rxq->xdp_rxq, + }; + struct mvneta_stats ps = {}; + struct bpf_prog *xdp_prog; + u32 desc_status, frame_sz; + + /* Get number of received packets */ + rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq); + + rcu_read_lock(); + xdp_prog = READ_ONCE(pp->xdp_prog); + + /* Fairness NAPI loop */ + while (rx_proc < budget && rx_proc < rx_todo) { + struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); + u32 rx_status, index; + struct sk_buff *skb; + struct page *page; + + index = rx_desc - rxq->descs; + page = (struct page *)rxq->buf_virt_addr[index]; + + rx_status = rx_desc->status; + rx_proc++; + rxq->refill_num++; + + if (rx_status & MVNETA_RXD_FIRST_DESC) { + /* Check errors only for FIRST descriptor */ + if (rx_status & MVNETA_RXD_ERR_SUMMARY) { + mvneta_rx_error(pp, rx_desc); + goto next; + } + + size = rx_desc->data_size; + frame_sz = size - ETH_FCS_LEN; + desc_status = rx_status; + + mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf, + &size, page); + } else { + if (unlikely(!xdp_buf.data_hard_start)) { + rx_desc->buf_phys_addr = 0; + page_pool_put_full_page(rxq->page_pool, page, + true); + continue; + } + + mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf, + &size, page); + } /* Middle or Last descriptor */ + + if (!(rx_status & MVNETA_RXD_LAST_DESC)) + /* no last descriptor this time */ + continue; + + if (size) { + mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true); + goto next; + } + + if (xdp_prog && + mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps)) + goto next; + + skb = mvneta_swbm_build_skb(pp, rxq, &xdp_buf, desc_status); + if (IS_ERR(skb)) { + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); + + mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true); + + u64_stats_update_begin(&stats->syncp); + stats->es.skb_alloc_error++; + stats->rx_dropped++; + u64_stats_update_end(&stats->syncp); + + goto next; + } + + ps.rx_bytes += skb->len; + ps.rx_packets++; + + skb->protocol = eth_type_trans(skb, dev); + napi_gro_receive(napi, skb); +next: + xdp_buf.data_hard_start = NULL; + } + rcu_read_unlock(); + + if (xdp_buf.data_hard_start) + mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true); + + if (ps.xdp_redirect) + xdp_do_flush_map(); + + if (ps.rx_packets) + mvneta_update_stats(pp, &ps); + + /* return some buffers to hardware queue, one at a time is too slow */ + refill = mvneta_rx_refill_queue(pp, rxq); + + /* Update rxq management counters */ + mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill); + + return ps.rx_packets; +} + +/* Main rx processing when using hardware buffer management */ +static int mvneta_rx_hwbm(struct napi_struct *napi, + struct mvneta_port *pp, int rx_todo, + struct mvneta_rx_queue *rxq) +{ + struct net_device *dev = pp->dev; + int rx_done; + u32 rcvd_pkts = 0; + u32 rcvd_bytes = 0; + + /* Get number of received packets */ + rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); + + if (rx_todo > rx_done) + rx_todo = rx_done; + + rx_done = 0; + + /* Fairness NAPI loop */ + while (rx_done < rx_todo) { + struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); + struct mvneta_bm_pool *bm_pool = NULL; + struct sk_buff *skb; + unsigned char *data; + dma_addr_t phys_addr; + u32 rx_status, frag_size; + int rx_bytes, err; + u8 pool_id; + + rx_done++; + rx_status = rx_desc->status; + rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); + data = (u8 *)(uintptr_t)rx_desc->buf_cookie; + phys_addr = rx_desc->buf_phys_addr; + pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc); + bm_pool = &pp->bm_priv->bm_pools[pool_id]; + + if (!mvneta_rxq_desc_is_first_last(rx_status) || + (rx_status & MVNETA_RXD_ERR_SUMMARY)) { +err_drop_frame_ret_pool: + /* Return the buffer to the pool */ + mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, + rx_desc->buf_phys_addr); +err_drop_frame: + mvneta_rx_error(pp, rx_desc); + /* leave the descriptor untouched */ + continue; + } + + if (rx_bytes <= rx_copybreak) { + /* better copy a small frame and not unmap the DMA region */ + skb = netdev_alloc_skb_ip_align(dev, rx_bytes); + if (unlikely(!skb)) + goto err_drop_frame_ret_pool; + + dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev, + rx_desc->buf_phys_addr, + MVNETA_MH_SIZE + NET_SKB_PAD, + rx_bytes, + DMA_FROM_DEVICE); + skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD, + rx_bytes); + + skb->protocol = eth_type_trans(skb, dev); + mvneta_rx_csum(pp, rx_status, skb); + napi_gro_receive(napi, skb); + + rcvd_pkts++; + rcvd_bytes += rx_bytes; + + /* Return the buffer to the pool */ + mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, + rx_desc->buf_phys_addr); + + /* leave the descriptor and buffer untouched */ + continue; + } + + /* Refill processing */ + err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC); + if (err) { + struct mvneta_pcpu_stats *stats; + + netdev_err(dev, "Linux processing - Can't refill\n"); + + stats = this_cpu_ptr(pp->stats); + u64_stats_update_begin(&stats->syncp); + stats->es.refill_error++; + u64_stats_update_end(&stats->syncp); + + goto err_drop_frame_ret_pool; + } + + frag_size = bm_pool->hwbm_pool.frag_size; + + skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size); + + /* After refill old buffer has to be unmapped regardless + * the skb is successfully built or not. + */ + dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr, + bm_pool->buf_size, DMA_FROM_DEVICE); + if (!skb) + goto err_drop_frame; + + rcvd_pkts++; + rcvd_bytes += rx_bytes; + + /* Linux processing */ + skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD); + skb_put(skb, rx_bytes); + + skb->protocol = eth_type_trans(skb, dev); + + mvneta_rx_csum(pp, rx_status, skb); + + napi_gro_receive(napi, skb); + } + + if (rcvd_pkts) { + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); + + u64_stats_update_begin(&stats->syncp); + stats->es.ps.rx_packets += rcvd_pkts; + stats->es.ps.rx_bytes += rcvd_bytes; + u64_stats_update_end(&stats->syncp); + } + + /* Update rxq management counters */ + mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); + + return rx_done; +} + +static inline void +mvneta_tso_put_hdr(struct sk_buff *skb, + struct mvneta_port *pp, struct mvneta_tx_queue *txq) +{ + int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; + struct mvneta_tx_desc *tx_desc; + + tx_desc = mvneta_txq_next_desc_get(txq); + tx_desc->data_size = hdr_len; + tx_desc->command = mvneta_skb_tx_csum(pp, skb); + tx_desc->command |= MVNETA_TXD_F_DESC; + tx_desc->buf_phys_addr = txq->tso_hdrs_phys + + txq->txq_put_index * TSO_HEADER_SIZE; + buf->type = MVNETA_TYPE_SKB; + buf->skb = NULL; + + mvneta_txq_inc_put(txq); +} + +static inline int +mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, + struct sk_buff *skb, char *data, int size, + bool last_tcp, bool is_last) +{ + struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; + struct mvneta_tx_desc *tx_desc; + + tx_desc = mvneta_txq_next_desc_get(txq); + tx_desc->data_size = size; + tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data, + size, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev->dev.parent, + tx_desc->buf_phys_addr))) { + mvneta_txq_desc_put(txq); + return -ENOMEM; + } + + tx_desc->command = 0; + buf->type = MVNETA_TYPE_SKB; + buf->skb = NULL; + + if (last_tcp) { + /* last descriptor in the TCP packet */ + tx_desc->command = MVNETA_TXD_L_DESC; + + /* last descriptor in SKB */ + if (is_last) + buf->skb = skb; + } + mvneta_txq_inc_put(txq); + return 0; +} + +static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev, + struct mvneta_tx_queue *txq) +{ + int hdr_len, total_len, data_left; + int desc_count = 0; + struct mvneta_port *pp = netdev_priv(dev); + struct tso_t tso; + int i; + + /* Count needed descriptors */ + if ((txq->count + tso_count_descs(skb)) >= txq->size) + return 0; + + if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) { + pr_info("*** Is this even possible???!?!?\n"); + return 0; + } + + /* Initialize the TSO handler, and prepare the first payload */ + hdr_len = tso_start(skb, &tso); + + total_len = skb->len - hdr_len; + while (total_len > 0) { + char *hdr; + + data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); + total_len -= data_left; + desc_count++; + + /* prepare packet headers: MAC + IP + TCP */ + hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE; + tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); + + mvneta_tso_put_hdr(skb, pp, txq); + + while (data_left > 0) { + int size; + desc_count++; + + size = min_t(int, tso.size, data_left); + + if (mvneta_tso_put_data(dev, txq, skb, + tso.data, size, + size == data_left, + total_len == 0)) + goto err_release; + data_left -= size; + + tso_build_data(skb, &tso, size); + } + } + + return desc_count; + +err_release: + /* Release all used data descriptors; header descriptors must not + * be DMA-unmapped. + */ + for (i = desc_count - 1; i >= 0; i--) { + struct mvneta_tx_desc *tx_desc = txq->descs + i; + if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) + dma_unmap_single(pp->dev->dev.parent, + tx_desc->buf_phys_addr, + tx_desc->data_size, + DMA_TO_DEVICE); + mvneta_txq_desc_put(txq); + } + return 0; +} + +/* Handle tx fragmentation processing */ +static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, + struct mvneta_tx_queue *txq) +{ + struct mvneta_tx_desc *tx_desc; + int i, nr_frags = skb_shinfo(skb)->nr_frags; + + for (i = 0; i < nr_frags; i++) { + struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + void *addr = skb_frag_address(frag); + + tx_desc = mvneta_txq_next_desc_get(txq); + tx_desc->data_size = skb_frag_size(frag); + + tx_desc->buf_phys_addr = + dma_map_single(pp->dev->dev.parent, addr, + tx_desc->data_size, DMA_TO_DEVICE); + + if (dma_mapping_error(pp->dev->dev.parent, + tx_desc->buf_phys_addr)) { + mvneta_txq_desc_put(txq); + goto error; + } + + if (i == nr_frags - 1) { + /* Last descriptor */ + tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; + buf->skb = skb; + } else { + /* Descriptor in the middle: Not First, Not Last */ + tx_desc->command = 0; + buf->skb = NULL; + } + buf->type = MVNETA_TYPE_SKB; + mvneta_txq_inc_put(txq); + } + + return 0; + +error: + /* Release all descriptors that were used to map fragments of + * this packet, as well as the corresponding DMA mappings + */ + for (i = i - 1; i >= 0; i--) { + tx_desc = txq->descs + i; + dma_unmap_single(pp->dev->dev.parent, + tx_desc->buf_phys_addr, + tx_desc->data_size, + DMA_TO_DEVICE); + mvneta_txq_desc_put(txq); + } + + return -ENOMEM; +} + +/* Main tx processing */ +static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct mvneta_port *pp = netdev_priv(dev); + u16 txq_id = skb_get_queue_mapping(skb); + struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; + struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; + struct mvneta_tx_desc *tx_desc; + int len = skb->len; + int frags = 0; + u32 tx_cmd; + + if (!netif_running(dev)) + goto out; + + if (skb_is_gso(skb)) { + frags = mvneta_tx_tso(skb, dev, txq); + goto out; + } + + frags = skb_shinfo(skb)->nr_frags + 1; + + /* Get a descriptor for the first part of the packet */ + tx_desc = mvneta_txq_next_desc_get(txq); + + tx_cmd = mvneta_skb_tx_csum(pp, skb); + + tx_desc->data_size = skb_headlen(skb); + + tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data, + tx_desc->data_size, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev->dev.parent, + tx_desc->buf_phys_addr))) { + mvneta_txq_desc_put(txq); + frags = 0; + goto out; + } + + buf->type = MVNETA_TYPE_SKB; + if (frags == 1) { + /* First and Last descriptor */ + tx_cmd |= MVNETA_TXD_FLZ_DESC; + tx_desc->command = tx_cmd; + buf->skb = skb; + mvneta_txq_inc_put(txq); + } else { + /* First but not Last */ + tx_cmd |= MVNETA_TXD_F_DESC; + buf->skb = NULL; + mvneta_txq_inc_put(txq); + tx_desc->command = tx_cmd; + /* Continue with other skb fragments */ + if (mvneta_tx_frag_process(pp, skb, txq)) { + dma_unmap_single(dev->dev.parent, + tx_desc->buf_phys_addr, + tx_desc->data_size, + DMA_TO_DEVICE); + mvneta_txq_desc_put(txq); + frags = 0; + goto out; + } + } + +out: + if (frags > 0) { + struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); + + netdev_tx_sent_queue(nq, len); + + txq->count += frags; + if (txq->count >= txq->tx_stop_threshold) + netif_tx_stop_queue(nq); + + if (!netdev_xmit_more() || netif_xmit_stopped(nq) || + txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK) + mvneta_txq_pend_desc_add(pp, txq, frags); + else + txq->pending += frags; + + u64_stats_update_begin(&stats->syncp); + stats->es.ps.tx_bytes += len; + stats->es.ps.tx_packets++; + u64_stats_update_end(&stats->syncp); + } else { + dev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + } + + return NETDEV_TX_OK; +} + + +/* Free tx resources, when resetting a port */ +static void mvneta_txq_done_force(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) + +{ + struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); + int tx_done = txq->count; + + mvneta_txq_bufs_free(pp, txq, tx_done, nq, false); + + /* reset txq */ + txq->count = 0; + txq->txq_put_index = 0; + txq->txq_get_index = 0; +} + +/* Handle tx done - called in softirq context. The <cause_tx_done> argument + * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL. + */ +static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done) +{ + struct mvneta_tx_queue *txq; + struct netdev_queue *nq; + int cpu = smp_processor_id(); + + while (cause_tx_done) { + txq = mvneta_tx_done_policy(pp, cause_tx_done); + + nq = netdev_get_tx_queue(pp->dev, txq->id); + __netif_tx_lock(nq, cpu); + + if (txq->count) + mvneta_txq_done(pp, txq); + + __netif_tx_unlock(nq); + cause_tx_done &= ~((1 << txq->id)); + } +} + +/* Compute crc8 of the specified address, using a unique algorithm , + * according to hw spec, different than generic crc8 algorithm + */ +static int mvneta_addr_crc(unsigned char *addr) +{ + int crc = 0; + int i; + + for (i = 0; i < ETH_ALEN; i++) { + int j; + + crc = (crc ^ addr[i]) << 8; + for (j = 7; j >= 0; j--) { + if (crc & (0x100 << j)) + crc ^= 0x107 << j; + } + } + + return crc; +} + +/* This method controls the net device special MAC multicast support. + * The Special Multicast Table for MAC addresses supports MAC of the form + * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). + * The MAC DA[7:0] bits are used as a pointer to the Special Multicast + * Table entries in the DA-Filter table. This method set the Special + * Multicast Table appropriate entry. + */ +static void mvneta_set_special_mcast_addr(struct mvneta_port *pp, + unsigned char last_byte, + int queue) +{ + unsigned int smc_table_reg; + unsigned int tbl_offset; + unsigned int reg_offset; + + /* Register offset from SMC table base */ + tbl_offset = (last_byte / 4); + /* Entry offset within the above reg */ + reg_offset = last_byte % 4; + + smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST + + tbl_offset * 4)); + + if (queue == -1) + smc_table_reg &= ~(0xff << (8 * reg_offset)); + else { + smc_table_reg &= ~(0xff << (8 * reg_offset)); + smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); + } + + mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4, + smc_table_reg); +} + +/* This method controls the network device Other MAC multicast support. + * The Other Multicast Table is used for multicast of another type. + * A CRC-8 is used as an index to the Other Multicast Table entries + * in the DA-Filter table. + * The method gets the CRC-8 value from the calling routine and + * sets the Other Multicast Table appropriate entry according to the + * specified CRC-8 . + */ +static void mvneta_set_other_mcast_addr(struct mvneta_port *pp, + unsigned char crc8, + int queue) +{ + unsigned int omc_table_reg; + unsigned int tbl_offset; + unsigned int reg_offset; + + tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */ + reg_offset = crc8 % 4; /* Entry offset within the above reg */ + + omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset); + + if (queue == -1) { + /* Clear accepts frame bit at specified Other DA table entry */ + omc_table_reg &= ~(0xff << (8 * reg_offset)); + } else { + omc_table_reg &= ~(0xff << (8 * reg_offset)); + omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); + } + + mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg); +} + +/* The network device supports multicast using two tables: + * 1) Special Multicast Table for MAC addresses of the form + * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). + * The MAC DA[7:0] bits are used as a pointer to the Special Multicast + * Table entries in the DA-Filter table. + * 2) Other Multicast Table for multicast of another type. A CRC-8 value + * is used as an index to the Other Multicast Table entries in the + * DA-Filter table. + */ +static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr, + int queue) +{ + unsigned char crc_result = 0; + + if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) { + mvneta_set_special_mcast_addr(pp, p_addr[5], queue); + return 0; + } + + crc_result = mvneta_addr_crc(p_addr); + if (queue == -1) { + if (pp->mcast_count[crc_result] == 0) { + netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n", + crc_result); + return -EINVAL; + } + + pp->mcast_count[crc_result]--; + if (pp->mcast_count[crc_result] != 0) { + netdev_info(pp->dev, + "After delete there are %d valid Mcast for crc8=0x%02x\n", + pp->mcast_count[crc_result], crc_result); + return -EINVAL; + } + } else + pp->mcast_count[crc_result]++; + + mvneta_set_other_mcast_addr(pp, crc_result, queue); + + return 0; +} + +/* Configure Fitering mode of Ethernet port */ +static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp, + int is_promisc) +{ + u32 port_cfg_reg, val; + + port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG); + + val = mvreg_read(pp, MVNETA_TYPE_PRIO); + + /* Set / Clear UPM bit in port configuration register */ + if (is_promisc) { + /* Accept all Unicast addresses */ + port_cfg_reg |= MVNETA_UNI_PROMISC_MODE; + val |= MVNETA_FORCE_UNI; + mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff); + mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff); + } else { + /* Reject all Unicast addresses */ + port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE; + val &= ~MVNETA_FORCE_UNI; + } + + mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg); + mvreg_write(pp, MVNETA_TYPE_PRIO, val); +} + +/* register unicast and multicast addresses */ +static void mvneta_set_rx_mode(struct net_device *dev) +{ + struct mvneta_port *pp = netdev_priv(dev); + struct netdev_hw_addr *ha; + + if (dev->flags & IFF_PROMISC) { + /* Accept all: Multicast + Unicast */ + mvneta_rx_unicast_promisc_set(pp, 1); + mvneta_set_ucast_table(pp, pp->rxq_def); + mvneta_set_special_mcast_table(pp, pp->rxq_def); + mvneta_set_other_mcast_table(pp, pp->rxq_def); + } else { + /* Accept single Unicast */ + mvneta_rx_unicast_promisc_set(pp, 0); + mvneta_set_ucast_table(pp, -1); + mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def); + + if (dev->flags & IFF_ALLMULTI) { + /* Accept all multicast */ + mvneta_set_special_mcast_table(pp, pp->rxq_def); + mvneta_set_other_mcast_table(pp, pp->rxq_def); + } else { + /* Accept only initialized multicast */ + mvneta_set_special_mcast_table(pp, -1); + mvneta_set_other_mcast_table(pp, -1); + + if (!netdev_mc_empty(dev)) { + netdev_for_each_mc_addr(ha, dev) { + mvneta_mcast_addr_set(pp, ha->addr, + pp->rxq_def); + } + } + } + } +} + +/* Interrupt handling - the callback for request_irq() */ +static irqreturn_t mvneta_isr(int irq, void *dev_id) +{ + struct mvneta_port *pp = (struct mvneta_port *)dev_id; + + mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); + napi_schedule(&pp->napi); + + return IRQ_HANDLED; +} + +/* Interrupt handling - the callback for request_percpu_irq() */ +static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id) +{ + struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id; + + disable_percpu_irq(port->pp->dev->irq); + napi_schedule(&port->napi); + + return IRQ_HANDLED; +} + +static void mvneta_link_change(struct mvneta_port *pp) +{ + u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); + + phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP)); +} + +/* NAPI handler + * Bits 0 - 7 of the causeRxTx register indicate that are transmitted + * packets on the corresponding TXQ (Bit 0 is for TX queue 1). + * Bits 8 -15 of the cause Rx Tx register indicate that are received + * packets on the corresponding RXQ (Bit 8 is for RX queue 0). + * Each CPU has its own causeRxTx register + */ +static int mvneta_poll(struct napi_struct *napi, int budget) +{ + int rx_done = 0; + u32 cause_rx_tx; + int rx_queue; + struct mvneta_port *pp = netdev_priv(napi->dev); + struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); + + if (!netif_running(pp->dev)) { + napi_complete(napi); + return rx_done; + } + + /* Read cause register */ + cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE); + if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) { + u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE); + + mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); + + if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE | + MVNETA_CAUSE_LINK_CHANGE)) + mvneta_link_change(pp); + } + + /* Release Tx descriptors */ + if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) { + mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL)); + cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL; + } + + /* For the case where the last mvneta_poll did not process all + * RX packets + */ + cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx : + port->cause_rx_tx; + + rx_queue = fls(((cause_rx_tx >> 8) & 0xff)); + if (rx_queue) { + rx_queue = rx_queue - 1; + if (pp->bm_priv) + rx_done = mvneta_rx_hwbm(napi, pp, budget, + &pp->rxqs[rx_queue]); + else + rx_done = mvneta_rx_swbm(napi, pp, budget, + &pp->rxqs[rx_queue]); + } + + if (rx_done < budget) { + cause_rx_tx = 0; + napi_complete_done(napi, rx_done); + + if (pp->neta_armada3700) { + unsigned long flags; + + local_irq_save(flags); + mvreg_write(pp, MVNETA_INTR_NEW_MASK, + MVNETA_RX_INTR_MASK(rxq_number) | + MVNETA_TX_INTR_MASK(txq_number) | + MVNETA_MISCINTR_INTR_MASK); + local_irq_restore(flags); + } else { + enable_percpu_irq(pp->dev->irq, 0); + } + } + + if (pp->neta_armada3700) + pp->cause_rx_tx = cause_rx_tx; + else + port->cause_rx_tx = cause_rx_tx; + + return rx_done; +} + +static int mvneta_create_page_pool(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq, int size) +{ + struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog); + struct page_pool_params pp_params = { + .order = 0, + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .pool_size = size, + .nid = NUMA_NO_NODE, + .dev = pp->dev->dev.parent, + .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, + .offset = pp->rx_offset_correction, + .max_len = MVNETA_MAX_RX_BUF_SIZE, + }; + int err; + + rxq->page_pool = page_pool_create(&pp_params); + if (IS_ERR(rxq->page_pool)) { + err = PTR_ERR(rxq->page_pool); + rxq->page_pool = NULL; + return err; + } + + err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id); + if (err < 0) + goto err_free_pp; + + err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, + rxq->page_pool); + if (err) + goto err_unregister_rxq; + + return 0; + +err_unregister_rxq: + xdp_rxq_info_unreg(&rxq->xdp_rxq); +err_free_pp: + page_pool_destroy(rxq->page_pool); + rxq->page_pool = NULL; + return err; +} + +/* Handle rxq fill: allocates rxq skbs; called when initializing a port */ +static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, + int num) +{ + int i, err; + + err = mvneta_create_page_pool(pp, rxq, num); + if (err < 0) + return err; + + for (i = 0; i < num; i++) { + memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc)); + if (mvneta_rx_refill(pp, rxq->descs + i, rxq, + GFP_KERNEL) != 0) { + netdev_err(pp->dev, + "%s:rxq %d, %d of %d buffs filled\n", + __func__, rxq->id, i, num); + break; + } + } + + /* Add this number of RX descriptors as non occupied (ready to + * get packets) + */ + mvneta_rxq_non_occup_desc_add(pp, rxq, i); + + return i; +} + +/* Free all packets pending transmit from all TXQs and reset TX port */ +static void mvneta_tx_reset(struct mvneta_port *pp) +{ + int queue; + + /* free the skb's in the tx ring */ + for (queue = 0; queue < txq_number; queue++) + mvneta_txq_done_force(pp, &pp->txqs[queue]); + + mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); + mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); +} + +static void mvneta_rx_reset(struct mvneta_port *pp) +{ + mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); + mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); +} + +/* Rx/Tx queue initialization/cleanup methods */ + +static int mvneta_rxq_sw_init(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq) +{ + rxq->size = pp->rx_ring_size; + + /* Allocate memory for RX descriptors */ + rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, + rxq->size * MVNETA_DESC_ALIGNED_SIZE, + &rxq->descs_phys, GFP_KERNEL); + if (!rxq->descs) + return -ENOMEM; + + rxq->last_desc = rxq->size - 1; + + return 0; +} + +static void mvneta_rxq_hw_init(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq) +{ + /* Set Rx descriptors queue starting address */ + mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); + mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); + + /* Set coalescing pkts and time */ + mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); + mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); + + if (!pp->bm_priv) { + /* Set Offset */ + mvneta_rxq_offset_set(pp, rxq, 0); + mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ? + MVNETA_MAX_RX_BUF_SIZE : + MVNETA_RX_BUF_SIZE(pp->pkt_size)); + mvneta_rxq_bm_disable(pp, rxq); + mvneta_rxq_fill(pp, rxq, rxq->size); + } else { + /* Set Offset */ + mvneta_rxq_offset_set(pp, rxq, + NET_SKB_PAD - pp->rx_offset_correction); + + mvneta_rxq_bm_enable(pp, rxq); + /* Fill RXQ with buffers from RX pool */ + mvneta_rxq_long_pool_set(pp, rxq); + mvneta_rxq_short_pool_set(pp, rxq); + mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size); + } +} + +/* Create a specified RX queue */ +static int mvneta_rxq_init(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq) + +{ + int ret; + + ret = mvneta_rxq_sw_init(pp, rxq); + if (ret < 0) + return ret; + + mvneta_rxq_hw_init(pp, rxq); + + return 0; +} + +/* Cleanup Rx queue */ +static void mvneta_rxq_deinit(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq) +{ + mvneta_rxq_drop_pkts(pp, rxq); + + if (rxq->descs) + dma_free_coherent(pp->dev->dev.parent, + rxq->size * MVNETA_DESC_ALIGNED_SIZE, + rxq->descs, + rxq->descs_phys); + + rxq->descs = NULL; + rxq->last_desc = 0; + rxq->next_desc_to_proc = 0; + rxq->descs_phys = 0; + rxq->first_to_refill = 0; + rxq->refill_num = 0; +} + +static int mvneta_txq_sw_init(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) +{ + int cpu; + + txq->size = pp->tx_ring_size; + + /* A queue must always have room for at least one skb. + * Therefore, stop the queue when the free entries reaches + * the maximum number of descriptors per skb. + */ + txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS; + txq->tx_wake_threshold = txq->tx_stop_threshold / 2; + + /* Allocate memory for TX descriptors */ + txq->descs = dma_alloc_coherent(pp->dev->dev.parent, + txq->size * MVNETA_DESC_ALIGNED_SIZE, + &txq->descs_phys, GFP_KERNEL); + if (!txq->descs) + return -ENOMEM; + + txq->last_desc = txq->size - 1; + + txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL); + if (!txq->buf) + return -ENOMEM; + + /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ + txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent, + txq->size * TSO_HEADER_SIZE, + &txq->tso_hdrs_phys, GFP_KERNEL); + if (!txq->tso_hdrs) + return -ENOMEM; + + /* Setup XPS mapping */ + if (pp->neta_armada3700) + cpu = 0; + else if (txq_number > 1) + cpu = txq->id % num_present_cpus(); + else + cpu = pp->rxq_def % num_present_cpus(); + cpumask_set_cpu(cpu, &txq->affinity_mask); + netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id); + + return 0; +} + +static void mvneta_txq_hw_init(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) +{ + /* Set maximum bandwidth for enabled TXQs */ + mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); + mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); + + /* Set Tx descriptors queue starting address */ + mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); + mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); + + mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); +} + +/* Create and initialize a tx queue */ +static int mvneta_txq_init(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) +{ + int ret; + + ret = mvneta_txq_sw_init(pp, txq); + if (ret < 0) + return ret; + + mvneta_txq_hw_init(pp, txq); + + return 0; +} + +/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/ +static void mvneta_txq_sw_deinit(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) +{ + struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); + + kfree(txq->buf); + + if (txq->tso_hdrs) + dma_free_coherent(pp->dev->dev.parent, + txq->size * TSO_HEADER_SIZE, + txq->tso_hdrs, txq->tso_hdrs_phys); + if (txq->descs) + dma_free_coherent(pp->dev->dev.parent, + txq->size * MVNETA_DESC_ALIGNED_SIZE, + txq->descs, txq->descs_phys); + + netdev_tx_reset_queue(nq); + + txq->descs = NULL; + txq->last_desc = 0; + txq->next_desc_to_proc = 0; + txq->descs_phys = 0; +} + +static void mvneta_txq_hw_deinit(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) +{ + /* Set minimum bandwidth for disabled TXQs */ + mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); + mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); + + /* Set Tx descriptors queue starting address and size */ + mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); + mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); +} + +static void mvneta_txq_deinit(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) +{ + mvneta_txq_sw_deinit(pp, txq); + mvneta_txq_hw_deinit(pp, txq); +} + +/* Cleanup all Tx queues */ +static void mvneta_cleanup_txqs(struct mvneta_port *pp) +{ + int queue; + + for (queue = 0; queue < txq_number; queue++) + mvneta_txq_deinit(pp, &pp->txqs[queue]); +} + +/* Cleanup all Rx queues */ +static void mvneta_cleanup_rxqs(struct mvneta_port *pp) +{ + int queue; + + for (queue = 0; queue < rxq_number; queue++) + mvneta_rxq_deinit(pp, &pp->rxqs[queue]); +} + + +/* Init all Rx queues */ +static int mvneta_setup_rxqs(struct mvneta_port *pp) +{ + int queue; + + for (queue = 0; queue < rxq_number; queue++) { + int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); + + if (err) { + netdev_err(pp->dev, "%s: can't create rxq=%d\n", + __func__, queue); + mvneta_cleanup_rxqs(pp); + return err; + } + } + + return 0; +} + +/* Init all tx queues */ +static int mvneta_setup_txqs(struct mvneta_port *pp) +{ + int queue; + + for (queue = 0; queue < txq_number; queue++) { + int err = mvneta_txq_init(pp, &pp->txqs[queue]); + if (err) { + netdev_err(pp->dev, "%s: can't create txq=%d\n", + __func__, queue); + mvneta_cleanup_txqs(pp); + return err; + } + } + + return 0; +} + +static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface) +{ + int ret; + + ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface); + if (ret) + return ret; + + return phy_power_on(pp->comphy); +} + +static int mvneta_config_interface(struct mvneta_port *pp, + phy_interface_t interface) +{ + int ret = 0; + + if (pp->comphy) { + if (interface == PHY_INTERFACE_MODE_SGMII || + interface == PHY_INTERFACE_MODE_1000BASEX || + interface == PHY_INTERFACE_MODE_2500BASEX) { + ret = mvneta_comphy_init(pp, interface); + } + } else { + switch (interface) { + case PHY_INTERFACE_MODE_QSGMII: + mvreg_write(pp, MVNETA_SERDES_CFG, + MVNETA_QSGMII_SERDES_PROTO); + break; + + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + mvreg_write(pp, MVNETA_SERDES_CFG, + MVNETA_SGMII_SERDES_PROTO); + break; + + case PHY_INTERFACE_MODE_2500BASEX: + mvreg_write(pp, MVNETA_SERDES_CFG, + MVNETA_HSGMII_SERDES_PROTO); + break; + default: + break; + } + } + + pp->phy_interface = interface; + + return ret; +} + +static void mvneta_start_dev(struct mvneta_port *pp) +{ + int cpu; + + WARN_ON(mvneta_config_interface(pp, pp->phy_interface)); + + mvneta_max_rx_size_set(pp, pp->pkt_size); + mvneta_txq_max_tx_size_set(pp, pp->pkt_size); + + /* start the Rx/Tx activity */ + mvneta_port_enable(pp); + + if (!pp->neta_armada3700) { + /* Enable polling on the port */ + for_each_online_cpu(cpu) { + struct mvneta_pcpu_port *port = + per_cpu_ptr(pp->ports, cpu); + + napi_enable(&port->napi); + } + } else { + napi_enable(&pp->napi); + } + + /* Unmask interrupts. It has to be done from each CPU */ + on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); + + mvreg_write(pp, MVNETA_INTR_MISC_MASK, + MVNETA_CAUSE_PHY_STATUS_CHANGE | + MVNETA_CAUSE_LINK_CHANGE); + + phylink_start(pp->phylink); + + /* We may have called phylink_speed_down before */ + phylink_speed_up(pp->phylink); + + netif_tx_start_all_queues(pp->dev); + + clear_bit(__MVNETA_DOWN, &pp->state); +} + +static void mvneta_stop_dev(struct mvneta_port *pp) +{ + unsigned int cpu; + + set_bit(__MVNETA_DOWN, &pp->state); + + if (device_may_wakeup(&pp->dev->dev)) + phylink_speed_down(pp->phylink, false); + + phylink_stop(pp->phylink); + + if (!pp->neta_armada3700) { + for_each_online_cpu(cpu) { + struct mvneta_pcpu_port *port = + per_cpu_ptr(pp->ports, cpu); + + napi_disable(&port->napi); + } + } else { + napi_disable(&pp->napi); + } + + netif_carrier_off(pp->dev); + + mvneta_port_down(pp); + netif_tx_stop_all_queues(pp->dev); + + /* Stop the port activity */ + mvneta_port_disable(pp); + + /* Clear all ethernet port interrupts */ + on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); + + /* Mask all ethernet port interrupts */ + on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); + + mvneta_tx_reset(pp); + mvneta_rx_reset(pp); + + WARN_ON(phy_power_off(pp->comphy)); +} + +static void mvneta_percpu_enable(void *arg) +{ + struct mvneta_port *pp = arg; + + enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE); +} + +static void mvneta_percpu_disable(void *arg) +{ + struct mvneta_port *pp = arg; + + disable_percpu_irq(pp->dev->irq); +} + +/* Change the device mtu */ +static int mvneta_change_mtu(struct net_device *dev, int mtu) +{ + struct mvneta_port *pp = netdev_priv(dev); + int ret; + + if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) { + netdev_info(dev, "Illegal MTU value %d, rounding to %d\n", + mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8)); + mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8); + } + + if (pp->xdp_prog && mtu > MVNETA_MAX_RX_BUF_SIZE) { + netdev_info(dev, "Illegal MTU value %d for XDP mode\n", mtu); + return -EINVAL; + } + + dev->mtu = mtu; + + if (!netif_running(dev)) { + if (pp->bm_priv) + mvneta_bm_update_mtu(pp, mtu); + + netdev_update_features(dev); + return 0; + } + + /* The interface is running, so we have to force a + * reallocation of the queues + */ + mvneta_stop_dev(pp); + on_each_cpu(mvneta_percpu_disable, pp, true); + + mvneta_cleanup_txqs(pp); + mvneta_cleanup_rxqs(pp); + + if (pp->bm_priv) + mvneta_bm_update_mtu(pp, mtu); + + pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu); + + ret = mvneta_setup_rxqs(pp); + if (ret) { + netdev_err(dev, "unable to setup rxqs after MTU change\n"); + return ret; + } + + ret = mvneta_setup_txqs(pp); + if (ret) { + netdev_err(dev, "unable to setup txqs after MTU change\n"); + return ret; + } + + on_each_cpu(mvneta_percpu_enable, pp, true); + mvneta_start_dev(pp); + + netdev_update_features(dev); + + return 0; +} + +static netdev_features_t mvneta_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct mvneta_port *pp = netdev_priv(dev); + + if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) { + features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO); + netdev_info(dev, + "Disable IP checksum for MTU greater than %dB\n", + pp->tx_csum_limit); + } + + return features; +} + +/* Get mac address */ +static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr) +{ + u32 mac_addr_l, mac_addr_h; + + mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW); + mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH); + addr[0] = (mac_addr_h >> 24) & 0xFF; + addr[1] = (mac_addr_h >> 16) & 0xFF; + addr[2] = (mac_addr_h >> 8) & 0xFF; + addr[3] = mac_addr_h & 0xFF; + addr[4] = (mac_addr_l >> 8) & 0xFF; + addr[5] = mac_addr_l & 0xFF; +} + +/* Handle setting mac address */ +static int mvneta_set_mac_addr(struct net_device *dev, void *addr) +{ + struct mvneta_port *pp = netdev_priv(dev); + struct sockaddr *sockaddr = addr; + int ret; + + ret = eth_prepare_mac_addr_change(dev, addr); + if (ret < 0) + return ret; + /* Remove previous address table entry */ + mvneta_mac_addr_set(pp, dev->dev_addr, -1); + + /* Set new addr in hw */ + mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def); + + eth_commit_mac_addr_change(dev, addr); + return 0; +} + +static void mvneta_validate(struct phylink_config *config, + unsigned long *supported, + struct phylink_link_state *state) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct mvneta_port *pp = netdev_priv(ndev); + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + /* We only support QSGMII, SGMII, 802.3z and RGMII modes */ + if (state->interface != PHY_INTERFACE_MODE_NA && + state->interface != PHY_INTERFACE_MODE_QSGMII && + state->interface != PHY_INTERFACE_MODE_SGMII && + !phy_interface_mode_is_8023z(state->interface) && + !phy_interface_mode_is_rgmii(state->interface)) { + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + return; + } + + /* Allow all the expected bits */ + phylink_set(mask, Autoneg); + phylink_set_port_modes(mask); + + /* Asymmetric pause is unsupported */ + phylink_set(mask, Pause); + + /* Half-duplex at speeds higher than 100Mbit is unsupported */ + if (pp->comphy || state->interface != PHY_INTERFACE_MODE_2500BASEX) { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + } + if (pp->comphy || state->interface == PHY_INTERFACE_MODE_2500BASEX) { + phylink_set(mask, 2500baseT_Full); + phylink_set(mask, 2500baseX_Full); + } + + if (!phy_interface_mode_is_8023z(state->interface)) { + /* 10M and 100M are only supported in non-802.3z mode */ + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + } + + bitmap_and(supported, supported, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_and(state->advertising, state->advertising, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); + + /* We can only operate at 2500BaseX or 1000BaseX. If requested + * to advertise both, only report advertising at 2500BaseX. + */ + phylink_helper_basex_speed(state); +} + +static void mvneta_mac_pcs_get_state(struct phylink_config *config, + struct phylink_link_state *state) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct mvneta_port *pp = netdev_priv(ndev); + u32 gmac_stat; + + gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); + + if (gmac_stat & MVNETA_GMAC_SPEED_1000) + state->speed = + state->interface == PHY_INTERFACE_MODE_2500BASEX ? + SPEED_2500 : SPEED_1000; + else if (gmac_stat & MVNETA_GMAC_SPEED_100) + state->speed = SPEED_100; + else + state->speed = SPEED_10; + + state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE); + state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP); + state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX); + + state->pause = 0; + if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE) + state->pause |= MLO_PAUSE_RX; + if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE) + state->pause |= MLO_PAUSE_TX; +} + +static void mvneta_mac_an_restart(struct phylink_config *config) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct mvneta_port *pp = netdev_priv(ndev); + u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, + gmac_an | MVNETA_GMAC_INBAND_RESTART_AN); + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, + gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN); +} + +static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct mvneta_port *pp = netdev_priv(ndev); + u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0); + u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2); + u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4); + u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); + u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + + new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X; + new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE | + MVNETA_GMAC2_PORT_RESET); + new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE); + new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE; + new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE | + MVNETA_GMAC_INBAND_RESTART_AN | + MVNETA_GMAC_AN_SPEED_EN | + MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL | + MVNETA_GMAC_AN_FLOW_CTRL_EN | + MVNETA_GMAC_AN_DUPLEX_EN); + + /* Even though it might look weird, when we're configured in + * SGMII or QSGMII mode, the RGMII bit needs to be set. + */ + new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII; + + if (state->interface == PHY_INTERFACE_MODE_QSGMII || + state->interface == PHY_INTERFACE_MODE_SGMII || + phy_interface_mode_is_8023z(state->interface)) + new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE; + + if (phylink_test(state->advertising, Pause)) + new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL; + + if (!phylink_autoneg_inband(mode)) { + /* Phy or fixed speed - nothing to do, leave the + * configured speed, duplex and flow control as-is. + */ + } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { + /* SGMII mode receives the state from the PHY */ + new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE; + new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE; + new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN | + MVNETA_GMAC_FORCE_LINK_PASS | + MVNETA_GMAC_CONFIG_MII_SPEED | + MVNETA_GMAC_CONFIG_GMII_SPEED | + MVNETA_GMAC_CONFIG_FULL_DUPLEX)) | + MVNETA_GMAC_INBAND_AN_ENABLE | + MVNETA_GMAC_AN_SPEED_EN | + MVNETA_GMAC_AN_DUPLEX_EN; + } else { + /* 802.3z negotiation - only 1000base-X */ + new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X; + new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE; + new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN | + MVNETA_GMAC_FORCE_LINK_PASS | + MVNETA_GMAC_CONFIG_MII_SPEED)) | + MVNETA_GMAC_INBAND_AN_ENABLE | + MVNETA_GMAC_CONFIG_GMII_SPEED | + /* The MAC only supports FD mode */ + MVNETA_GMAC_CONFIG_FULL_DUPLEX; + + if (state->pause & MLO_PAUSE_AN && state->an_enabled) + new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN; + } + + /* Armada 370 documentation says we can only change the port mode + * and in-band enable when the link is down, so force it down + * while making these changes. We also do this for GMAC_CTRL2 */ + if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X || + (new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE || + (new_an ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) { + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, + (gmac_an & ~MVNETA_GMAC_FORCE_LINK_PASS) | + MVNETA_GMAC_FORCE_LINK_DOWN); + } + + + /* When at 2.5G, the link partner can send frames with shortened + * preambles. + */ + if (state->interface == PHY_INTERFACE_MODE_2500BASEX) + new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE; + + if (pp->phy_interface != state->interface) { + if (pp->comphy) + WARN_ON(phy_power_off(pp->comphy)); + WARN_ON(mvneta_config_interface(pp, state->interface)); + } + + if (new_ctrl0 != gmac_ctrl0) + mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0); + if (new_ctrl2 != gmac_ctrl2) + mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2); + if (new_ctrl4 != gmac_ctrl4) + mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4); + if (new_clk != gmac_clk) + mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk); + if (new_an != gmac_an) + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an); + + if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) { + while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & + MVNETA_GMAC2_PORT_RESET) != 0) + continue; + } +} + +static void mvneta_set_eee(struct mvneta_port *pp, bool enable) +{ + u32 lpi_ctl1; + + lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1); + if (enable) + lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE; + else + lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE; + mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1); +} + +static void mvneta_mac_link_down(struct phylink_config *config, + unsigned int mode, phy_interface_t interface) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct mvneta_port *pp = netdev_priv(ndev); + u32 val; + + mvneta_port_down(pp); + + if (!phylink_autoneg_inband(mode)) { + val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + val &= ~MVNETA_GMAC_FORCE_LINK_PASS; + val |= MVNETA_GMAC_FORCE_LINK_DOWN; + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); + } + + pp->eee_active = false; + mvneta_set_eee(pp, false); +} + +static void mvneta_mac_link_up(struct phylink_config *config, + struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct mvneta_port *pp = netdev_priv(ndev); + u32 val; + + if (!phylink_autoneg_inband(mode)) { + val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + val &= ~(MVNETA_GMAC_FORCE_LINK_DOWN | + MVNETA_GMAC_CONFIG_MII_SPEED | + MVNETA_GMAC_CONFIG_GMII_SPEED | + MVNETA_GMAC_CONFIG_FLOW_CTRL | + MVNETA_GMAC_CONFIG_FULL_DUPLEX); + val |= MVNETA_GMAC_FORCE_LINK_PASS; + + if (speed == SPEED_1000 || speed == SPEED_2500) + val |= MVNETA_GMAC_CONFIG_GMII_SPEED; + else if (speed == SPEED_100) + val |= MVNETA_GMAC_CONFIG_MII_SPEED; + + if (duplex == DUPLEX_FULL) + val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; + + if (tx_pause || rx_pause) + val |= MVNETA_GMAC_CONFIG_FLOW_CTRL; + + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); + } else { + /* When inband doesn't cover flow control or flow control is + * disabled, we need to manually configure it. This bit will + * only have effect if MVNETA_GMAC_AN_FLOW_CTRL_EN is unset. + */ + val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + val &= ~MVNETA_GMAC_CONFIG_FLOW_CTRL; + + if (tx_pause || rx_pause) + val |= MVNETA_GMAC_CONFIG_FLOW_CTRL; + + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); + } + + mvneta_port_up(pp); + + if (phy && pp->eee_enabled) { + pp->eee_active = phy_init_eee(phy, 0) >= 0; + mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled); + } +} + +static const struct phylink_mac_ops mvneta_phylink_ops = { + .validate = mvneta_validate, + .mac_pcs_get_state = mvneta_mac_pcs_get_state, + .mac_an_restart = mvneta_mac_an_restart, + .mac_config = mvneta_mac_config, + .mac_link_down = mvneta_mac_link_down, + .mac_link_up = mvneta_mac_link_up, +}; + +static int mvneta_mdio_probe(struct mvneta_port *pp) +{ + struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; + int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0); + + if (err) + netdev_err(pp->dev, "could not attach PHY: %d\n", err); + + phylink_ethtool_get_wol(pp->phylink, &wol); + device_set_wakeup_capable(&pp->dev->dev, !!wol.supported); + + /* PHY WoL may be enabled but device wakeup disabled */ + if (wol.supported) + device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts); + + return err; +} + +static void mvneta_mdio_remove(struct mvneta_port *pp) +{ + phylink_disconnect_phy(pp->phylink); +} + +/* Electing a CPU must be done in an atomic way: it should be done + * after or before the removal/insertion of a CPU and this function is + * not reentrant. + */ +static void mvneta_percpu_elect(struct mvneta_port *pp) +{ + int elected_cpu = 0, max_cpu, cpu, i = 0; + + /* Use the cpu associated to the rxq when it is online, in all + * the other cases, use the cpu 0 which can't be offline. + */ + if (pp->rxq_def < nr_cpu_ids && cpu_online(pp->rxq_def)) + elected_cpu = pp->rxq_def; + + max_cpu = num_present_cpus(); + + for_each_online_cpu(cpu) { + int rxq_map = 0, txq_map = 0; + int rxq; + + for (rxq = 0; rxq < rxq_number; rxq++) + if ((rxq % max_cpu) == cpu) + rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); + + if (cpu == elected_cpu) + /* Map the default receive queue queue to the + * elected CPU + */ + rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def); + + /* We update the TX queue map only if we have one + * queue. In this case we associate the TX queue to + * the CPU bound to the default RX queue + */ + if (txq_number == 1) + txq_map = (cpu == elected_cpu) ? + MVNETA_CPU_TXQ_ACCESS(0) : 0; + else + txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & + MVNETA_CPU_TXQ_ACCESS_ALL_MASK; + + mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); + + /* Update the interrupt mask on each CPU according the + * new mapping + */ + smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt, + pp, true); + i++; + + } +}; + +static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node) +{ + int other_cpu; + struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, + node_online); + struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); + + /* Armada 3700's per-cpu interrupt for mvneta is broken, all interrupts + * are routed to CPU 0, so we don't need all the cpu-hotplug support + */ + if (pp->neta_armada3700) + return 0; + + spin_lock(&pp->lock); + /* + * Configuring the driver for a new CPU while the driver is + * stopping is racy, so just avoid it. + */ + if (pp->is_stopped) { + spin_unlock(&pp->lock); + return 0; + } + netif_tx_stop_all_queues(pp->dev); + + /* + * We have to synchronise on tha napi of each CPU except the one + * just being woken up + */ + for_each_online_cpu(other_cpu) { + if (other_cpu != cpu) { + struct mvneta_pcpu_port *other_port = + per_cpu_ptr(pp->ports, other_cpu); + + napi_synchronize(&other_port->napi); + } + } + + /* Mask all ethernet port interrupts */ + on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); + napi_enable(&port->napi); + + /* + * Enable per-CPU interrupts on the CPU that is + * brought up. + */ + mvneta_percpu_enable(pp); + + /* + * Enable per-CPU interrupt on the one CPU we care + * about. + */ + mvneta_percpu_elect(pp); + + /* Unmask all ethernet port interrupts */ + on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); + mvreg_write(pp, MVNETA_INTR_MISC_MASK, + MVNETA_CAUSE_PHY_STATUS_CHANGE | + MVNETA_CAUSE_LINK_CHANGE); + netif_tx_start_all_queues(pp->dev); + spin_unlock(&pp->lock); + return 0; +} + +static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node) +{ + struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, + node_online); + struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); + + /* + * Thanks to this lock we are sure that any pending cpu election is + * done. + */ + spin_lock(&pp->lock); + /* Mask all ethernet port interrupts */ + on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); + spin_unlock(&pp->lock); + + napi_synchronize(&port->napi); + napi_disable(&port->napi); + /* Disable per-CPU interrupts on the CPU that is brought down. */ + mvneta_percpu_disable(pp); + return 0; +} + +static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node) +{ + struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, + node_dead); + + /* Check if a new CPU must be elected now this on is down */ + spin_lock(&pp->lock); + mvneta_percpu_elect(pp); + spin_unlock(&pp->lock); + /* Unmask all ethernet port interrupts */ + on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); + mvreg_write(pp, MVNETA_INTR_MISC_MASK, + MVNETA_CAUSE_PHY_STATUS_CHANGE | + MVNETA_CAUSE_LINK_CHANGE); + netif_tx_start_all_queues(pp->dev); + return 0; +} + +static int mvneta_open(struct net_device *dev) +{ + struct mvneta_port *pp = netdev_priv(dev); + int ret; + + pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); + + ret = mvneta_setup_rxqs(pp); + if (ret) + return ret; + + ret = mvneta_setup_txqs(pp); + if (ret) + goto err_cleanup_rxqs; + + /* Connect to port interrupt line */ + if (pp->neta_armada3700) + ret = request_irq(pp->dev->irq, mvneta_isr, 0, + dev->name, pp); + else + ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr, + dev->name, pp->ports); + if (ret) { + netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq); + goto err_cleanup_txqs; + } + + if (!pp->neta_armada3700) { + /* Enable per-CPU interrupt on all the CPU to handle our RX + * queue interrupts + */ + on_each_cpu(mvneta_percpu_enable, pp, true); + + pp->is_stopped = false; + /* Register a CPU notifier to handle the case where our CPU + * might be taken offline. + */ + ret = cpuhp_state_add_instance_nocalls(online_hpstate, + &pp->node_online); + if (ret) + goto err_free_irq; + + ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD, + &pp->node_dead); + if (ret) + goto err_free_online_hp; + } + + ret = mvneta_mdio_probe(pp); + if (ret < 0) { + netdev_err(dev, "cannot probe MDIO bus\n"); + goto err_free_dead_hp; + } + + mvneta_start_dev(pp); + + return 0; + +err_free_dead_hp: + if (!pp->neta_armada3700) + cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD, + &pp->node_dead); +err_free_online_hp: + if (!pp->neta_armada3700) + cpuhp_state_remove_instance_nocalls(online_hpstate, + &pp->node_online); +err_free_irq: + if (pp->neta_armada3700) { + free_irq(pp->dev->irq, pp); + } else { + on_each_cpu(mvneta_percpu_disable, pp, true); + free_percpu_irq(pp->dev->irq, pp->ports); + } +err_cleanup_txqs: + mvneta_cleanup_txqs(pp); +err_cleanup_rxqs: + mvneta_cleanup_rxqs(pp); + return ret; +} + +/* Stop the port, free port interrupt line */ +static int mvneta_stop(struct net_device *dev) +{ + struct mvneta_port *pp = netdev_priv(dev); + + if (!pp->neta_armada3700) { + /* Inform that we are stopping so we don't want to setup the + * driver for new CPUs in the notifiers. The code of the + * notifier for CPU online is protected by the same spinlock, + * so when we get the lock, the notifer work is done. + */ + spin_lock(&pp->lock); + pp->is_stopped = true; + spin_unlock(&pp->lock); + + mvneta_stop_dev(pp); + mvneta_mdio_remove(pp); + + cpuhp_state_remove_instance_nocalls(online_hpstate, + &pp->node_online); + cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD, + &pp->node_dead); + on_each_cpu(mvneta_percpu_disable, pp, true); + free_percpu_irq(dev->irq, pp->ports); + } else { + mvneta_stop_dev(pp); + mvneta_mdio_remove(pp); + free_irq(dev->irq, pp); + } + + mvneta_cleanup_rxqs(pp); + mvneta_cleanup_txqs(pp); + + return 0; +} + +static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mvneta_port *pp = netdev_priv(dev); + + return phylink_mii_ioctl(pp->phylink, ifr, cmd); +} + +static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + bool need_update, running = netif_running(dev); + struct mvneta_port *pp = netdev_priv(dev); + struct bpf_prog *old_prog; + + if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) { + NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP"); + return -EOPNOTSUPP; + } + + if (pp->bm_priv) { + NL_SET_ERR_MSG_MOD(extack, + "Hardware Buffer Management not supported on XDP"); + return -EOPNOTSUPP; + } + + need_update = !!pp->xdp_prog != !!prog; + if (running && need_update) + mvneta_stop(dev); + + old_prog = xchg(&pp->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + if (running && need_update) + return mvneta_open(dev); + + return 0; +} + +static int mvneta_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + switch (xdp->command) { + case XDP_SETUP_PROG: + return mvneta_xdp_setup(dev, xdp->prog, xdp->extack); + default: + return -EINVAL; + } +} + +/* Ethtool methods */ + +/* Set link ksettings (phy address, speed) for ethtools */ +static int +mvneta_ethtool_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) +{ + struct mvneta_port *pp = netdev_priv(ndev); + + return phylink_ethtool_ksettings_set(pp->phylink, cmd); +} + +/* Get link ksettings for ethtools */ +static int +mvneta_ethtool_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) +{ + struct mvneta_port *pp = netdev_priv(ndev); + + return phylink_ethtool_ksettings_get(pp->phylink, cmd); +} + +static int mvneta_ethtool_nway_reset(struct net_device *dev) +{ + struct mvneta_port *pp = netdev_priv(dev); + + return phylink_ethtool_nway_reset(pp->phylink); +} + +/* Set interrupt coalescing for ethtools */ +static int mvneta_ethtool_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *c) +{ + struct mvneta_port *pp = netdev_priv(dev); + int queue; + + for (queue = 0; queue < rxq_number; queue++) { + struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; + rxq->time_coal = c->rx_coalesce_usecs; + rxq->pkts_coal = c->rx_max_coalesced_frames; + mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); + mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); + } + + for (queue = 0; queue < txq_number; queue++) { + struct mvneta_tx_queue *txq = &pp->txqs[queue]; + txq->done_pkts_coal = c->tx_max_coalesced_frames; + mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); + } + + return 0; +} + +/* get coalescing for ethtools */ +static int mvneta_ethtool_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *c) +{ + struct mvneta_port *pp = netdev_priv(dev); + + c->rx_coalesce_usecs = pp->rxqs[0].time_coal; + c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal; + + c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal; + return 0; +} + + +static void mvneta_ethtool_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME, + sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION, + sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, dev_name(&dev->dev), + sizeof(drvinfo->bus_info)); +} + + +static void mvneta_ethtool_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct mvneta_port *pp = netdev_priv(netdev); + + ring->rx_max_pending = MVNETA_MAX_RXD; + ring->tx_max_pending = MVNETA_MAX_TXD; + ring->rx_pending = pp->rx_ring_size; + ring->tx_pending = pp->tx_ring_size; +} + +static int mvneta_ethtool_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct mvneta_port *pp = netdev_priv(dev); + + if ((ring->rx_pending == 0) || (ring->tx_pending == 0)) + return -EINVAL; + pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? + ring->rx_pending : MVNETA_MAX_RXD; + + pp->tx_ring_size = clamp_t(u16, ring->tx_pending, + MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD); + if (pp->tx_ring_size != ring->tx_pending) + netdev_warn(dev, "TX queue size set to %u (requested %u)\n", + pp->tx_ring_size, ring->tx_pending); + + if (netif_running(dev)) { + mvneta_stop(dev); + if (mvneta_open(dev)) { + netdev_err(dev, + "error on opening device after ring param change\n"); + return -ENOMEM; + } + } + + return 0; +} + +static void mvneta_ethtool_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct mvneta_port *pp = netdev_priv(dev); + + phylink_ethtool_get_pauseparam(pp->phylink, pause); +} + +static int mvneta_ethtool_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct mvneta_port *pp = netdev_priv(dev); + + return phylink_ethtool_set_pauseparam(pp->phylink, pause); +} + +static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset, + u8 *data) +{ + if (sset == ETH_SS_STATS) { + int i; + + for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) + memcpy(data + i * ETH_GSTRING_LEN, + mvneta_statistics[i].name, ETH_GSTRING_LEN); + } +} + +static void +mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp, + struct mvneta_ethtool_stats *es) +{ + unsigned int start; + int cpu; + + for_each_possible_cpu(cpu) { + struct mvneta_pcpu_stats *stats; + u64 skb_alloc_error; + u64 refill_error; + u64 xdp_redirect; + u64 xdp_xmit_err; + u64 xdp_tx_err; + u64 xdp_pass; + u64 xdp_drop; + u64 xdp_xmit; + u64 xdp_tx; + + stats = per_cpu_ptr(pp->stats, cpu); + do { + start = u64_stats_fetch_begin_irq(&stats->syncp); + skb_alloc_error = stats->es.skb_alloc_error; + refill_error = stats->es.refill_error; + xdp_redirect = stats->es.ps.xdp_redirect; + xdp_pass = stats->es.ps.xdp_pass; + xdp_drop = stats->es.ps.xdp_drop; + xdp_xmit = stats->es.ps.xdp_xmit; + xdp_xmit_err = stats->es.ps.xdp_xmit_err; + xdp_tx = stats->es.ps.xdp_tx; + xdp_tx_err = stats->es.ps.xdp_tx_err; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + + es->skb_alloc_error += skb_alloc_error; + es->refill_error += refill_error; + es->ps.xdp_redirect += xdp_redirect; + es->ps.xdp_pass += xdp_pass; + es->ps.xdp_drop += xdp_drop; + es->ps.xdp_xmit += xdp_xmit; + es->ps.xdp_xmit_err += xdp_xmit_err; + es->ps.xdp_tx += xdp_tx; + es->ps.xdp_tx_err += xdp_tx_err; + } +} + +static void mvneta_ethtool_update_stats(struct mvneta_port *pp) +{ + struct mvneta_ethtool_stats stats = {}; + const struct mvneta_statistic *s; + void __iomem *base = pp->base; + u32 high, low; + u64 val; + int i; + + mvneta_ethtool_update_pcpu_stats(pp, &stats); + for (i = 0, s = mvneta_statistics; + s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics); + s++, i++) { + switch (s->type) { + case T_REG_32: + val = readl_relaxed(base + s->offset); + pp->ethtool_stats[i] += val; + break; + case T_REG_64: + /* Docs say to read low 32-bit then high */ + low = readl_relaxed(base + s->offset); + high = readl_relaxed(base + s->offset + 4); + val = (u64)high << 32 | low; + pp->ethtool_stats[i] += val; + break; + case T_SW: + switch (s->offset) { + case ETHTOOL_STAT_EEE_WAKEUP: + val = phylink_get_eee_err(pp->phylink); + pp->ethtool_stats[i] += val; + break; + case ETHTOOL_STAT_SKB_ALLOC_ERR: + pp->ethtool_stats[i] = stats.skb_alloc_error; + break; + case ETHTOOL_STAT_REFILL_ERR: + pp->ethtool_stats[i] = stats.refill_error; + break; + case ETHTOOL_XDP_REDIRECT: + pp->ethtool_stats[i] = stats.ps.xdp_redirect; + break; + case ETHTOOL_XDP_PASS: + pp->ethtool_stats[i] = stats.ps.xdp_pass; + break; + case ETHTOOL_XDP_DROP: + pp->ethtool_stats[i] = stats.ps.xdp_drop; + break; + case ETHTOOL_XDP_TX: + pp->ethtool_stats[i] = stats.ps.xdp_tx; + break; + case ETHTOOL_XDP_TX_ERR: + pp->ethtool_stats[i] = stats.ps.xdp_tx_err; + break; + case ETHTOOL_XDP_XMIT: + pp->ethtool_stats[i] = stats.ps.xdp_xmit; + break; + case ETHTOOL_XDP_XMIT_ERR: + pp->ethtool_stats[i] = stats.ps.xdp_xmit_err; + break; + } + break; + } + } +} + +static void mvneta_ethtool_get_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct mvneta_port *pp = netdev_priv(dev); + int i; + + mvneta_ethtool_update_stats(pp); + + for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) + *data++ = pp->ethtool_stats[i]; +} + +static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset) +{ + if (sset == ETH_SS_STATS) + return ARRAY_SIZE(mvneta_statistics); + return -EOPNOTSUPP; +} + +static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev) +{ + return MVNETA_RSS_LU_TABLE_SIZE; +} + +static int mvneta_ethtool_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *info, + u32 *rules __always_unused) +{ + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = rxq_number; + return 0; + case ETHTOOL_GRXFH: + return -EOPNOTSUPP; + default: + return -EOPNOTSUPP; + } +} + +static int mvneta_config_rss(struct mvneta_port *pp) +{ + int cpu; + u32 val; + + netif_tx_stop_all_queues(pp->dev); + + on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); + + if (!pp->neta_armada3700) { + /* We have to synchronise on the napi of each CPU */ + for_each_online_cpu(cpu) { + struct mvneta_pcpu_port *pcpu_port = + per_cpu_ptr(pp->ports, cpu); + + napi_synchronize(&pcpu_port->napi); + napi_disable(&pcpu_port->napi); + } + } else { + napi_synchronize(&pp->napi); + napi_disable(&pp->napi); + } + + pp->rxq_def = pp->indir[0]; + + /* Update unicast mapping */ + mvneta_set_rx_mode(pp->dev); + + /* Update val of portCfg register accordingly with all RxQueue types */ + val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); + mvreg_write(pp, MVNETA_PORT_CONFIG, val); + + /* Update the elected CPU matching the new rxq_def */ + spin_lock(&pp->lock); + mvneta_percpu_elect(pp); + spin_unlock(&pp->lock); + + if (!pp->neta_armada3700) { + /* We have to synchronise on the napi of each CPU */ + for_each_online_cpu(cpu) { + struct mvneta_pcpu_port *pcpu_port = + per_cpu_ptr(pp->ports, cpu); + + napi_enable(&pcpu_port->napi); + } + } else { + napi_enable(&pp->napi); + } + + netif_tx_start_all_queues(pp->dev); + + return 0; +} + +static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct mvneta_port *pp = netdev_priv(dev); + + /* Current code for Armada 3700 doesn't support RSS features yet */ + if (pp->neta_armada3700) + return -EOPNOTSUPP; + + /* We require at least one supported parameter to be changed + * and no change in any of the unsupported parameters + */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + + if (!indir) + return 0; + + memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE); + + return mvneta_config_rss(pp); +} + +static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct mvneta_port *pp = netdev_priv(dev); + + /* Current code for Armada 3700 doesn't support RSS features yet */ + if (pp->neta_armada3700) + return -EOPNOTSUPP; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (!indir) + return 0; + + memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE); + + return 0; +} + +static void mvneta_ethtool_get_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + struct mvneta_port *pp = netdev_priv(dev); + + phylink_ethtool_get_wol(pp->phylink, wol); +} + +static int mvneta_ethtool_set_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + struct mvneta_port *pp = netdev_priv(dev); + int ret; + + ret = phylink_ethtool_set_wol(pp->phylink, wol); + if (!ret) + device_set_wakeup_enable(&dev->dev, !!wol->wolopts); + + return ret; +} + +static int mvneta_ethtool_get_eee(struct net_device *dev, + struct ethtool_eee *eee) +{ + struct mvneta_port *pp = netdev_priv(dev); + u32 lpi_ctl0; + + lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); + + eee->eee_enabled = pp->eee_enabled; + eee->eee_active = pp->eee_active; + eee->tx_lpi_enabled = pp->tx_lpi_enabled; + eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale; + + return phylink_ethtool_get_eee(pp->phylink, eee); +} + +static int mvneta_ethtool_set_eee(struct net_device *dev, + struct ethtool_eee *eee) +{ + struct mvneta_port *pp = netdev_priv(dev); + u32 lpi_ctl0; + + /* The Armada 37x documents do not give limits for this other than + * it being an 8-bit register. */ + if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255) + return -EINVAL; + + lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); + lpi_ctl0 &= ~(0xff << 8); + lpi_ctl0 |= eee->tx_lpi_timer << 8; + mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0); + + pp->eee_enabled = eee->eee_enabled; + pp->tx_lpi_enabled = eee->tx_lpi_enabled; + + mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled); + + return phylink_ethtool_set_eee(pp->phylink, eee); +} + +static const struct net_device_ops mvneta_netdev_ops = { + .ndo_open = mvneta_open, + .ndo_stop = mvneta_stop, + .ndo_start_xmit = mvneta_tx, + .ndo_set_rx_mode = mvneta_set_rx_mode, + .ndo_set_mac_address = mvneta_set_mac_addr, + .ndo_change_mtu = mvneta_change_mtu, + .ndo_fix_features = mvneta_fix_features, + .ndo_get_stats64 = mvneta_get_stats64, + .ndo_do_ioctl = mvneta_ioctl, + .ndo_bpf = mvneta_xdp, + .ndo_xdp_xmit = mvneta_xdp_xmit, +}; + +static const struct ethtool_ops mvneta_eth_tool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | + ETHTOOL_COALESCE_MAX_FRAMES, + .nway_reset = mvneta_ethtool_nway_reset, + .get_link = ethtool_op_get_link, + .set_coalesce = mvneta_ethtool_set_coalesce, + .get_coalesce = mvneta_ethtool_get_coalesce, + .get_drvinfo = mvneta_ethtool_get_drvinfo, + .get_ringparam = mvneta_ethtool_get_ringparam, + .set_ringparam = mvneta_ethtool_set_ringparam, + .get_pauseparam = mvneta_ethtool_get_pauseparam, + .set_pauseparam = mvneta_ethtool_set_pauseparam, + .get_strings = mvneta_ethtool_get_strings, + .get_ethtool_stats = mvneta_ethtool_get_stats, + .get_sset_count = mvneta_ethtool_get_sset_count, + .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size, + .get_rxnfc = mvneta_ethtool_get_rxnfc, + .get_rxfh = mvneta_ethtool_get_rxfh, + .set_rxfh = mvneta_ethtool_set_rxfh, + .get_link_ksettings = mvneta_ethtool_get_link_ksettings, + .set_link_ksettings = mvneta_ethtool_set_link_ksettings, + .get_wol = mvneta_ethtool_get_wol, + .set_wol = mvneta_ethtool_set_wol, + .get_eee = mvneta_ethtool_get_eee, + .set_eee = mvneta_ethtool_set_eee, +}; + +/* Initialize hw */ +static int mvneta_init(struct device *dev, struct mvneta_port *pp) +{ + int queue; + + /* Disable port */ + mvneta_port_disable(pp); + + /* Set port default values */ + mvneta_defaults_set(pp); + + pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL); + if (!pp->txqs) + return -ENOMEM; + + /* Initialize TX descriptor rings */ + for (queue = 0; queue < txq_number; queue++) { + struct mvneta_tx_queue *txq = &pp->txqs[queue]; + txq->id = queue; + txq->size = pp->tx_ring_size; + txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS; + } + + pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL); + if (!pp->rxqs) + return -ENOMEM; + + /* Create Rx descriptor rings */ + for (queue = 0; queue < rxq_number; queue++) { + struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; + rxq->id = queue; + rxq->size = pp->rx_ring_size; + rxq->pkts_coal = MVNETA_RX_COAL_PKTS; + rxq->time_coal = MVNETA_RX_COAL_USEC; + rxq->buf_virt_addr + = devm_kmalloc_array(pp->dev->dev.parent, + rxq->size, + sizeof(*rxq->buf_virt_addr), + GFP_KERNEL); + if (!rxq->buf_virt_addr) + return -ENOMEM; + } + + return 0; +} + +/* platform glue : initialize decoding windows */ +static void mvneta_conf_mbus_windows(struct mvneta_port *pp, + const struct mbus_dram_target_info *dram) +{ + u32 win_enable; + u32 win_protect; + int i; + + for (i = 0; i < 6; i++) { + mvreg_write(pp, MVNETA_WIN_BASE(i), 0); + mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); + + if (i < 4) + mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); + } + + win_enable = 0x3f; + win_protect = 0; + + if (dram) { + for (i = 0; i < dram->num_cs; i++) { + const struct mbus_dram_window *cs = dram->cs + i; + + mvreg_write(pp, MVNETA_WIN_BASE(i), + (cs->base & 0xffff0000) | + (cs->mbus_attr << 8) | + dram->mbus_dram_target_id); + + mvreg_write(pp, MVNETA_WIN_SIZE(i), + (cs->size - 1) & 0xffff0000); + + win_enable &= ~(1 << i); + win_protect |= 3 << (2 * i); + } + } else { + /* For Armada3700 open default 4GB Mbus window, leaving + * arbitration of target/attribute to a different layer + * of configuration. + */ + mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000); + win_enable &= ~BIT(0); + win_protect = 3; + } + + mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); + mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); +} + +/* Power up the port */ +static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) +{ + /* MAC Cause register should be cleared */ + mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); + + if (phy_mode != PHY_INTERFACE_MODE_QSGMII && + phy_mode != PHY_INTERFACE_MODE_SGMII && + !phy_interface_mode_is_8023z(phy_mode) && + !phy_interface_mode_is_rgmii(phy_mode)) + return -EINVAL; + + return 0; +} + +/* Device initialization routine */ +static int mvneta_probe(struct platform_device *pdev) +{ + struct device_node *dn = pdev->dev.of_node; + struct device_node *bm_node; + struct mvneta_port *pp; + struct net_device *dev; + struct phylink *phylink; + struct phy *comphy; + const char *dt_mac_addr; + char hw_mac_addr[ETH_ALEN]; + phy_interface_t phy_mode; + const char *mac_from; + int tx_csum_limit; + int err; + int cpu; + + dev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct mvneta_port), + txq_number, rxq_number); + if (!dev) + return -ENOMEM; + + dev->irq = irq_of_parse_and_map(dn, 0); + if (dev->irq == 0) + return -EINVAL; + + err = of_get_phy_mode(dn, &phy_mode); + if (err) { + dev_err(&pdev->dev, "incorrect phy-mode\n"); + goto err_free_irq; + } + + comphy = devm_of_phy_get(&pdev->dev, dn, NULL); + if (comphy == ERR_PTR(-EPROBE_DEFER)) { + err = -EPROBE_DEFER; + goto err_free_irq; + } else if (IS_ERR(comphy)) { + comphy = NULL; + } + + pp = netdev_priv(dev); + spin_lock_init(&pp->lock); + + pp->phylink_config.dev = &dev->dev; + pp->phylink_config.type = PHYLINK_NETDEV; + + phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode, + phy_mode, &mvneta_phylink_ops); + if (IS_ERR(phylink)) { + err = PTR_ERR(phylink); + goto err_free_irq; + } + + dev->tx_queue_len = MVNETA_MAX_TXD; + dev->watchdog_timeo = 5 * HZ; + dev->netdev_ops = &mvneta_netdev_ops; + + dev->ethtool_ops = &mvneta_eth_tool_ops; + + pp->phylink = phylink; + pp->comphy = comphy; + pp->phy_interface = phy_mode; + pp->dn = dn; + + pp->rxq_def = rxq_def; + pp->indir[0] = rxq_def; + + /* Get special SoC configurations */ + if (of_device_is_compatible(dn, "marvell,armada-3700-neta")) + pp->neta_armada3700 = true; + + pp->clk = devm_clk_get(&pdev->dev, "core"); + if (IS_ERR(pp->clk)) + pp->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(pp->clk)) { + err = PTR_ERR(pp->clk); + goto err_free_phylink; + } + + clk_prepare_enable(pp->clk); + + pp->clk_bus = devm_clk_get(&pdev->dev, "bus"); + if (!IS_ERR(pp->clk_bus)) + clk_prepare_enable(pp->clk_bus); + + pp->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(pp->base)) { + err = PTR_ERR(pp->base); + goto err_clk; + } + + /* Alloc per-cpu port structure */ + pp->ports = alloc_percpu(struct mvneta_pcpu_port); + if (!pp->ports) { + err = -ENOMEM; + goto err_clk; + } + + /* Alloc per-cpu stats */ + pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats); + if (!pp->stats) { + err = -ENOMEM; + goto err_free_ports; + } + + dt_mac_addr = of_get_mac_address(dn); + if (!IS_ERR(dt_mac_addr)) { + mac_from = "device tree"; + ether_addr_copy(dev->dev_addr, dt_mac_addr); + } else { + mvneta_get_mac_addr(pp, hw_mac_addr); + if (is_valid_ether_addr(hw_mac_addr)) { + mac_from = "hardware"; + memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN); + } else { + mac_from = "random"; + eth_hw_addr_random(dev); + } + } + + if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) { + if (tx_csum_limit < 0 || + tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) { + tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; + dev_info(&pdev->dev, + "Wrong TX csum limit in DT, set to %dB\n", + MVNETA_TX_CSUM_DEF_SIZE); + } + } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) { + tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; + } else { + tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE; + } + + pp->tx_csum_limit = tx_csum_limit; + + pp->dram_target_info = mv_mbus_dram_info(); + /* Armada3700 requires setting default configuration of Mbus + * windows, however without using filled mbus_dram_target_info + * structure. + */ + if (pp->dram_target_info || pp->neta_armada3700) + mvneta_conf_mbus_windows(pp, pp->dram_target_info); + + pp->tx_ring_size = MVNETA_MAX_TXD; + pp->rx_ring_size = MVNETA_MAX_RXD; + + pp->dev = dev; + SET_NETDEV_DEV(dev, &pdev->dev); + + pp->id = global_port_id++; + + /* Obtain access to BM resources if enabled and already initialized */ + bm_node = of_parse_phandle(dn, "buffer-manager", 0); + if (bm_node) { + pp->bm_priv = mvneta_bm_get(bm_node); + if (pp->bm_priv) { + err = mvneta_bm_port_init(pdev, pp); + if (err < 0) { + dev_info(&pdev->dev, + "use SW buffer management\n"); + mvneta_bm_put(pp->bm_priv); + pp->bm_priv = NULL; + } + } + /* Set RX packet offset correction for platforms, whose + * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit + * platforms and 0B for 32-bit ones. + */ + pp->rx_offset_correction = max(0, + NET_SKB_PAD - + MVNETA_RX_PKT_OFFSET_CORRECTION); + } + of_node_put(bm_node); + + /* sw buffer management */ + if (!pp->bm_priv) + pp->rx_offset_correction = MVNETA_SKB_HEADROOM; + + err = mvneta_init(&pdev->dev, pp); + if (err < 0) + goto err_netdev; + + err = mvneta_port_power_up(pp, pp->phy_interface); + if (err < 0) { + dev_err(&pdev->dev, "can't power up port\n"); + goto err_netdev; + } + + /* Armada3700 network controller does not support per-cpu + * operation, so only single NAPI should be initialized. + */ + if (pp->neta_armada3700) { + netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT); + } else { + for_each_present_cpu(cpu) { + struct mvneta_pcpu_port *port = + per_cpu_ptr(pp->ports, cpu); + + netif_napi_add(dev, &port->napi, mvneta_poll, + NAPI_POLL_WEIGHT); + port->pp = pp; + } + } + + dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | NETIF_F_RXCSUM; + dev->hw_features |= dev->features; + dev->vlan_features |= dev->features; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + dev->gso_max_segs = MVNETA_MAX_TSO_SEGS; + + /* MTU range: 68 - 9676 */ + dev->min_mtu = ETH_MIN_MTU; + /* 9676 == 9700 - 20 and rounding to 8 */ + dev->max_mtu = 9676; + + err = register_netdev(dev); + if (err < 0) { + dev_err(&pdev->dev, "failed to register\n"); + goto err_netdev; + } + + netdev_info(dev, "Using %s mac address %pM\n", mac_from, + dev->dev_addr); + + platform_set_drvdata(pdev, pp->dev); + + return 0; + +err_netdev: + if (pp->bm_priv) { + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, + 1 << pp->id); + mvneta_bm_put(pp->bm_priv); + } + free_percpu(pp->stats); +err_free_ports: + free_percpu(pp->ports); +err_clk: + clk_disable_unprepare(pp->clk_bus); + clk_disable_unprepare(pp->clk); +err_free_phylink: + if (pp->phylink) + phylink_destroy(pp->phylink); +err_free_irq: + irq_dispose_mapping(dev->irq); + return err; +} + +/* Device removal routine */ +static int mvneta_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct mvneta_port *pp = netdev_priv(dev); + + unregister_netdev(dev); + clk_disable_unprepare(pp->clk_bus); + clk_disable_unprepare(pp->clk); + free_percpu(pp->ports); + free_percpu(pp->stats); + irq_dispose_mapping(dev->irq); + phylink_destroy(pp->phylink); + + if (pp->bm_priv) { + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, + 1 << pp->id); + mvneta_bm_put(pp->bm_priv); + } + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int mvneta_suspend(struct device *device) +{ + int queue; + struct net_device *dev = dev_get_drvdata(device); + struct mvneta_port *pp = netdev_priv(dev); + + if (!netif_running(dev)) + goto clean_exit; + + if (!pp->neta_armada3700) { + spin_lock(&pp->lock); + pp->is_stopped = true; + spin_unlock(&pp->lock); + + cpuhp_state_remove_instance_nocalls(online_hpstate, + &pp->node_online); + cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD, + &pp->node_dead); + } + + rtnl_lock(); + mvneta_stop_dev(pp); + rtnl_unlock(); + + for (queue = 0; queue < rxq_number; queue++) { + struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; + + mvneta_rxq_drop_pkts(pp, rxq); + } + + for (queue = 0; queue < txq_number; queue++) { + struct mvneta_tx_queue *txq = &pp->txqs[queue]; + + mvneta_txq_hw_deinit(pp, txq); + } + +clean_exit: + netif_device_detach(dev); + clk_disable_unprepare(pp->clk_bus); + clk_disable_unprepare(pp->clk); + + return 0; +} + +static int mvneta_resume(struct device *device) +{ + struct platform_device *pdev = to_platform_device(device); + struct net_device *dev = dev_get_drvdata(device); + struct mvneta_port *pp = netdev_priv(dev); + int err, queue; + + clk_prepare_enable(pp->clk); + if (!IS_ERR(pp->clk_bus)) + clk_prepare_enable(pp->clk_bus); + if (pp->dram_target_info || pp->neta_armada3700) + mvneta_conf_mbus_windows(pp, pp->dram_target_info); + if (pp->bm_priv) { + err = mvneta_bm_port_init(pdev, pp); + if (err < 0) { + dev_info(&pdev->dev, "use SW buffer management\n"); + pp->rx_offset_correction = MVNETA_SKB_HEADROOM; + pp->bm_priv = NULL; + } + } + mvneta_defaults_set(pp); + err = mvneta_port_power_up(pp, pp->phy_interface); + if (err < 0) { + dev_err(device, "can't power up port\n"); + return err; + } + + netif_device_attach(dev); + + if (!netif_running(dev)) + return 0; + + for (queue = 0; queue < rxq_number; queue++) { + struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; + + rxq->next_desc_to_proc = 0; + mvneta_rxq_hw_init(pp, rxq); + } + + for (queue = 0; queue < txq_number; queue++) { + struct mvneta_tx_queue *txq = &pp->txqs[queue]; + + txq->next_desc_to_proc = 0; + mvneta_txq_hw_init(pp, txq); + } + + if (!pp->neta_armada3700) { + spin_lock(&pp->lock); + pp->is_stopped = false; + spin_unlock(&pp->lock); + cpuhp_state_add_instance_nocalls(online_hpstate, + &pp->node_online); + cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD, + &pp->node_dead); + } + + rtnl_lock(); + mvneta_start_dev(pp); + rtnl_unlock(); + mvneta_set_rx_mode(dev); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume); + +static const struct of_device_id mvneta_match[] = { + { .compatible = "marvell,armada-370-neta" }, + { .compatible = "marvell,armada-xp-neta" }, + { .compatible = "marvell,armada-3700-neta" }, + { } +}; +MODULE_DEVICE_TABLE(of, mvneta_match); + +static struct platform_driver mvneta_driver = { + .probe = mvneta_probe, + .remove = mvneta_remove, + .driver = { + .name = MVNETA_DRIVER_NAME, + .of_match_table = mvneta_match, + .pm = &mvneta_pm_ops, + }, +}; + +static int __init mvneta_driver_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvneta:online", + mvneta_cpu_online, + mvneta_cpu_down_prepare); + if (ret < 0) + goto out; + online_hpstate = ret; + ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead", + NULL, mvneta_cpu_dead); + if (ret) + goto err_dead; + + ret = platform_driver_register(&mvneta_driver); + if (ret) + goto err; + return 0; + +err: + cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD); +err_dead: + cpuhp_remove_multi_state(online_hpstate); +out: + return ret; +} +module_init(mvneta_driver_init); + +static void __exit mvneta_driver_exit(void) +{ + platform_driver_unregister(&mvneta_driver); + cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD); + cpuhp_remove_multi_state(online_hpstate); +} +module_exit(mvneta_driver_exit); + +MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com"); +MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); +MODULE_LICENSE("GPL"); + +module_param(rxq_number, int, 0444); +module_param(txq_number, int, 0444); + +module_param(rxq_def, int, 0444); +module_param(rx_copybreak, int, 0644); diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c new file mode 100644 index 000000000..46c942ef2 --- /dev/null +++ b/drivers/net/ethernet/marvell/mvneta_bm.c @@ -0,0 +1,501 @@ +/* + * Driver for Marvell NETA network controller Buffer Manager. + * + * Copyright (C) 2015 Marvell + * + * Marcin Wojtas <mw@semihalf.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/clk.h> +#include <linux/genalloc.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/mbus.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/skbuff.h> +#include <net/hwbm.h> +#include "mvneta_bm.h" + +#define MVNETA_BM_DRIVER_NAME "mvneta_bm" +#define MVNETA_BM_DRIVER_VERSION "1.0" + +static void mvneta_bm_write(struct mvneta_bm *priv, u32 offset, u32 data) +{ + writel(data, priv->reg_base + offset); +} + +static u32 mvneta_bm_read(struct mvneta_bm *priv, u32 offset) +{ + return readl(priv->reg_base + offset); +} + +static void mvneta_bm_pool_enable(struct mvneta_bm *priv, int pool_id) +{ + u32 val; + + val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id)); + val |= MVNETA_BM_POOL_ENABLE_MASK; + mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val); + + /* Clear BM cause register */ + mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0); +} + +static void mvneta_bm_pool_disable(struct mvneta_bm *priv, int pool_id) +{ + u32 val; + + val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id)); + val &= ~MVNETA_BM_POOL_ENABLE_MASK; + mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val); +} + +static inline void mvneta_bm_config_set(struct mvneta_bm *priv, u32 mask) +{ + u32 val; + + val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG); + val |= mask; + mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val); +} + +static inline void mvneta_bm_config_clear(struct mvneta_bm *priv, u32 mask) +{ + u32 val; + + val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG); + val &= ~mask; + mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val); +} + +static void mvneta_bm_pool_target_set(struct mvneta_bm *priv, int pool_id, + u8 target_id, u8 attr) +{ + u32 val; + + val = mvneta_bm_read(priv, MVNETA_BM_XBAR_POOL_REG(pool_id)); + val &= ~MVNETA_BM_TARGET_ID_MASK(pool_id); + val &= ~MVNETA_BM_XBAR_ATTR_MASK(pool_id); + val |= MVNETA_BM_TARGET_ID_VAL(pool_id, target_id); + val |= MVNETA_BM_XBAR_ATTR_VAL(pool_id, attr); + + mvneta_bm_write(priv, MVNETA_BM_XBAR_POOL_REG(pool_id), val); +} + +int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf) +{ + struct mvneta_bm_pool *bm_pool = + (struct mvneta_bm_pool *)hwbm_pool->priv; + struct mvneta_bm *priv = bm_pool->priv; + dma_addr_t phys_addr; + + /* In order to update buf_cookie field of RX descriptor properly, + * BM hardware expects buf virtual address to be placed in the + * first four bytes of mapped buffer. + */ + *(u32 *)buf = (u32)buf; + phys_addr = dma_map_single(&priv->pdev->dev, buf, bm_pool->buf_size, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&priv->pdev->dev, phys_addr))) + return -ENOMEM; + + mvneta_bm_pool_put_bp(priv, bm_pool, phys_addr); + return 0; +} +EXPORT_SYMBOL_GPL(mvneta_bm_construct); + +/* Create pool */ +static int mvneta_bm_pool_create(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool) +{ + struct platform_device *pdev = priv->pdev; + u8 target_id, attr; + int size_bytes, err; + size_bytes = sizeof(u32) * bm_pool->hwbm_pool.size; + bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes, + &bm_pool->phys_addr, + GFP_KERNEL); + if (!bm_pool->virt_addr) + return -ENOMEM; + + if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVNETA_BM_POOL_PTR_ALIGN)) { + dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr, + bm_pool->phys_addr); + dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", + bm_pool->id, MVNETA_BM_POOL_PTR_ALIGN); + return -ENOMEM; + } + + err = mvebu_mbus_get_dram_win_info(bm_pool->phys_addr, &target_id, + &attr); + if (err < 0) { + dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr, + bm_pool->phys_addr); + return err; + } + + /* Set pool address */ + mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(bm_pool->id), + bm_pool->phys_addr); + + mvneta_bm_pool_target_set(priv, bm_pool->id, target_id, attr); + mvneta_bm_pool_enable(priv, bm_pool->id); + + return 0; +} + +/* Notify the driver that BM pool is being used as specific type and return the + * pool pointer on success + */ +struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, + enum mvneta_bm_type type, u8 port_id, + int pkt_size) +{ + struct mvneta_bm_pool *new_pool = &priv->bm_pools[pool_id]; + int num, err; + + if (new_pool->type == MVNETA_BM_LONG && + new_pool->port_map != 1 << port_id) { + dev_err(&priv->pdev->dev, + "long pool cannot be shared by the ports\n"); + return NULL; + } + + if (new_pool->type == MVNETA_BM_SHORT && new_pool->type != type) { + dev_err(&priv->pdev->dev, + "mixing pools' types between the ports is forbidden\n"); + return NULL; + } + + if (new_pool->pkt_size == 0 || type != MVNETA_BM_SHORT) + new_pool->pkt_size = pkt_size; + + /* Allocate buffers in case BM pool hasn't been used yet */ + if (new_pool->type == MVNETA_BM_FREE) { + struct hwbm_pool *hwbm_pool = &new_pool->hwbm_pool; + + new_pool->priv = priv; + new_pool->type = type; + new_pool->buf_size = MVNETA_RX_BUF_SIZE(new_pool->pkt_size); + hwbm_pool->frag_size = + SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(new_pool->pkt_size)) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + hwbm_pool->construct = mvneta_bm_construct; + hwbm_pool->priv = new_pool; + mutex_init(&hwbm_pool->buf_lock); + + /* Create new pool */ + err = mvneta_bm_pool_create(priv, new_pool); + if (err) { + dev_err(&priv->pdev->dev, "fail to create pool %d\n", + new_pool->id); + return NULL; + } + + /* Allocate buffers for this pool */ + num = hwbm_pool_add(hwbm_pool, hwbm_pool->size); + if (num != hwbm_pool->size) { + WARN(1, "pool %d: %d of %d allocated\n", + new_pool->id, num, hwbm_pool->size); + return NULL; + } + } + + return new_pool; +} +EXPORT_SYMBOL_GPL(mvneta_bm_pool_use); + +/* Free all buffers from the pool */ +void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, + u8 port_map) +{ + int i; + + bm_pool->port_map &= ~port_map; + if (bm_pool->port_map) + return; + + mvneta_bm_config_set(priv, MVNETA_BM_EMPTY_LIMIT_MASK); + + for (i = 0; i < bm_pool->hwbm_pool.buf_num; i++) { + dma_addr_t buf_phys_addr; + u32 *vaddr; + + /* Get buffer physical address (indirect access) */ + buf_phys_addr = mvneta_bm_pool_get_bp(priv, bm_pool); + + /* Work-around to the problems when destroying the pool, + * when it occurs that a read access to BPPI returns 0. + */ + if (buf_phys_addr == 0) + continue; + + vaddr = phys_to_virt(buf_phys_addr); + if (!vaddr) + break; + + dma_unmap_single(&priv->pdev->dev, buf_phys_addr, + bm_pool->buf_size, DMA_FROM_DEVICE); + hwbm_buf_free(&bm_pool->hwbm_pool, vaddr); + } + + mvneta_bm_config_clear(priv, MVNETA_BM_EMPTY_LIMIT_MASK); + + /* Update BM driver with number of buffers removed from pool */ + bm_pool->hwbm_pool.buf_num -= i; +} +EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free); + +/* Cleanup pool */ +void mvneta_bm_pool_destroy(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool, u8 port_map) +{ + struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool; + bm_pool->port_map &= ~port_map; + if (bm_pool->port_map) + return; + + bm_pool->type = MVNETA_BM_FREE; + + mvneta_bm_bufs_free(priv, bm_pool, port_map); + if (hwbm_pool->buf_num) + WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); + + if (bm_pool->virt_addr) { + dma_free_coherent(&priv->pdev->dev, + sizeof(u32) * hwbm_pool->size, + bm_pool->virt_addr, bm_pool->phys_addr); + bm_pool->virt_addr = NULL; + } + + mvneta_bm_pool_disable(priv, bm_pool->id); +} +EXPORT_SYMBOL_GPL(mvneta_bm_pool_destroy); + +static void mvneta_bm_pools_init(struct mvneta_bm *priv) +{ + struct device_node *dn = priv->pdev->dev.of_node; + struct mvneta_bm_pool *bm_pool; + char prop[15]; + u32 size; + int i; + + /* Activate BM unit */ + mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_START_MASK); + + /* Create all pools with maximum size */ + for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) { + bm_pool = &priv->bm_pools[i]; + bm_pool->id = i; + bm_pool->type = MVNETA_BM_FREE; + + /* Reset read pointer */ + mvneta_bm_write(priv, MVNETA_BM_POOL_READ_PTR_REG(i), 0); + + /* Reset write pointer */ + mvneta_bm_write(priv, MVNETA_BM_POOL_WRITE_PTR_REG(i), 0); + + /* Configure pool size according to DT or use default value */ + sprintf(prop, "pool%d,capacity", i); + if (of_property_read_u32(dn, prop, &size)) { + size = MVNETA_BM_POOL_CAP_DEF; + } else if (size > MVNETA_BM_POOL_CAP_MAX) { + dev_warn(&priv->pdev->dev, + "Illegal pool %d capacity %d, set to %d\n", + i, size, MVNETA_BM_POOL_CAP_MAX); + size = MVNETA_BM_POOL_CAP_MAX; + } else if (size < MVNETA_BM_POOL_CAP_MIN) { + dev_warn(&priv->pdev->dev, + "Illegal pool %d capacity %d, set to %d\n", + i, size, MVNETA_BM_POOL_CAP_MIN); + size = MVNETA_BM_POOL_CAP_MIN; + } else if (!IS_ALIGNED(size, MVNETA_BM_POOL_CAP_ALIGN)) { + dev_warn(&priv->pdev->dev, + "Illegal pool %d capacity %d, round to %d\n", + i, size, ALIGN(size, + MVNETA_BM_POOL_CAP_ALIGN)); + size = ALIGN(size, MVNETA_BM_POOL_CAP_ALIGN); + } + bm_pool->hwbm_pool.size = size; + + mvneta_bm_write(priv, MVNETA_BM_POOL_SIZE_REG(i), + bm_pool->hwbm_pool.size); + + /* Obtain custom pkt_size from DT */ + sprintf(prop, "pool%d,pkt-size", i); + if (of_property_read_u32(dn, prop, &bm_pool->pkt_size)) + bm_pool->pkt_size = 0; + } +} + +static void mvneta_bm_default_set(struct mvneta_bm *priv) +{ + u32 val; + + /* Mask BM all interrupts */ + mvneta_bm_write(priv, MVNETA_BM_INTR_MASK_REG, 0); + + /* Clear BM cause register */ + mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0); + + /* Set BM configuration register */ + val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG); + + /* Reduce MaxInBurstSize from 32 BPs to 16 BPs */ + val &= ~MVNETA_BM_MAX_IN_BURST_SIZE_MASK; + val |= MVNETA_BM_MAX_IN_BURST_SIZE_16BP; + mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val); +} + +static int mvneta_bm_init(struct mvneta_bm *priv) +{ + mvneta_bm_default_set(priv); + + /* Allocate and initialize BM pools structures */ + priv->bm_pools = devm_kcalloc(&priv->pdev->dev, MVNETA_BM_POOLS_NUM, + sizeof(struct mvneta_bm_pool), + GFP_KERNEL); + if (!priv->bm_pools) + return -ENOMEM; + + mvneta_bm_pools_init(priv); + + return 0; +} + +static int mvneta_bm_get_sram(struct device_node *dn, + struct mvneta_bm *priv) +{ + priv->bppi_pool = of_gen_pool_get(dn, "internal-mem", 0); + if (!priv->bppi_pool) + return -ENOMEM; + + priv->bppi_virt_addr = gen_pool_dma_alloc(priv->bppi_pool, + MVNETA_BM_BPPI_SIZE, + &priv->bppi_phys_addr); + if (!priv->bppi_virt_addr) + return -ENOMEM; + + return 0; +} + +static void mvneta_bm_put_sram(struct mvneta_bm *priv) +{ + gen_pool_free(priv->bppi_pool, priv->bppi_phys_addr, + MVNETA_BM_BPPI_SIZE); +} + +struct mvneta_bm *mvneta_bm_get(struct device_node *node) +{ + struct platform_device *pdev = of_find_device_by_node(node); + + return pdev ? platform_get_drvdata(pdev) : NULL; +} +EXPORT_SYMBOL_GPL(mvneta_bm_get); + +void mvneta_bm_put(struct mvneta_bm *priv) +{ + platform_device_put(priv->pdev); +} +EXPORT_SYMBOL_GPL(mvneta_bm_put); + +static int mvneta_bm_probe(struct platform_device *pdev) +{ + struct device_node *dn = pdev->dev.of_node; + struct mvneta_bm *priv; + int err; + + priv = devm_kzalloc(&pdev->dev, sizeof(struct mvneta_bm), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->reg_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->reg_base)) + return PTR_ERR(priv->reg_base); + + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + err = clk_prepare_enable(priv->clk); + if (err < 0) + return err; + + err = mvneta_bm_get_sram(dn, priv); + if (err < 0) { + dev_err(&pdev->dev, "failed to allocate internal memory\n"); + goto err_clk; + } + + priv->pdev = pdev; + + /* Initialize buffer manager internals */ + err = mvneta_bm_init(priv); + if (err < 0) { + dev_err(&pdev->dev, "failed to initialize controller\n"); + goto err_sram; + } + + dn->data = priv; + platform_set_drvdata(pdev, priv); + + dev_info(&pdev->dev, "Buffer Manager for network controller enabled\n"); + + return 0; + +err_sram: + mvneta_bm_put_sram(priv); +err_clk: + clk_disable_unprepare(priv->clk); + return err; +} + +static int mvneta_bm_remove(struct platform_device *pdev) +{ + struct mvneta_bm *priv = platform_get_drvdata(pdev); + u8 all_ports_map = 0xff; + int i = 0; + + for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) { + struct mvneta_bm_pool *bm_pool = &priv->bm_pools[i]; + + mvneta_bm_pool_destroy(priv, bm_pool, all_ports_map); + } + + mvneta_bm_put_sram(priv); + + /* Dectivate BM unit */ + mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_STOP_MASK); + + clk_disable_unprepare(priv->clk); + + return 0; +} + +static const struct of_device_id mvneta_bm_match[] = { + { .compatible = "marvell,armada-380-neta-bm" }, + { } +}; +MODULE_DEVICE_TABLE(of, mvneta_bm_match); + +static struct platform_driver mvneta_bm_driver = { + .probe = mvneta_bm_probe, + .remove = mvneta_bm_remove, + .driver = { + .name = MVNETA_BM_DRIVER_NAME, + .of_match_table = mvneta_bm_match, + }, +}; + +module_platform_driver(mvneta_bm_driver); + +MODULE_DESCRIPTION("Marvell NETA Buffer Manager Driver - www.marvell.com"); +MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h new file mode 100644 index 000000000..e47783ce7 --- /dev/null +++ b/drivers/net/ethernet/marvell/mvneta_bm.h @@ -0,0 +1,192 @@ +/* + * Driver for Marvell NETA network controller Buffer Manager. + * + * Copyright (C) 2015 Marvell + * + * Marcin Wojtas <mw@semihalf.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef _MVNETA_BM_H_ +#define _MVNETA_BM_H_ + +/* BM Configuration Register */ +#define MVNETA_BM_CONFIG_REG 0x0 +#define MVNETA_BM_STATUS_MASK 0x30 +#define MVNETA_BM_ACTIVE_MASK BIT(4) +#define MVNETA_BM_MAX_IN_BURST_SIZE_MASK 0x60000 +#define MVNETA_BM_MAX_IN_BURST_SIZE_16BP BIT(18) +#define MVNETA_BM_EMPTY_LIMIT_MASK BIT(19) + +/* BM Activation Register */ +#define MVNETA_BM_COMMAND_REG 0x4 +#define MVNETA_BM_START_MASK BIT(0) +#define MVNETA_BM_STOP_MASK BIT(1) +#define MVNETA_BM_PAUSE_MASK BIT(2) + +/* BM Xbar interface Register */ +#define MVNETA_BM_XBAR_01_REG 0x8 +#define MVNETA_BM_XBAR_23_REG 0xc +#define MVNETA_BM_XBAR_POOL_REG(pool) \ + (((pool) < 2) ? MVNETA_BM_XBAR_01_REG : MVNETA_BM_XBAR_23_REG) +#define MVNETA_BM_TARGET_ID_OFFS(pool) (((pool) & 1) ? 16 : 0) +#define MVNETA_BM_TARGET_ID_MASK(pool) \ + (0xf << MVNETA_BM_TARGET_ID_OFFS(pool)) +#define MVNETA_BM_TARGET_ID_VAL(pool, id) \ + ((id) << MVNETA_BM_TARGET_ID_OFFS(pool)) +#define MVNETA_BM_XBAR_ATTR_OFFS(pool) (((pool) & 1) ? 20 : 4) +#define MVNETA_BM_XBAR_ATTR_MASK(pool) \ + (0xff << MVNETA_BM_XBAR_ATTR_OFFS(pool)) +#define MVNETA_BM_XBAR_ATTR_VAL(pool, attr) \ + ((attr) << MVNETA_BM_XBAR_ATTR_OFFS(pool)) + +/* Address of External Buffer Pointers Pool Register */ +#define MVNETA_BM_POOL_BASE_REG(pool) (0x10 + ((pool) << 4)) +#define MVNETA_BM_POOL_ENABLE_MASK BIT(0) + +/* External Buffer Pointers Pool RD pointer Register */ +#define MVNETA_BM_POOL_READ_PTR_REG(pool) (0x14 + ((pool) << 4)) +#define MVNETA_BM_POOL_SET_READ_PTR_MASK 0xfffc +#define MVNETA_BM_POOL_GET_READ_PTR_OFFS 16 +#define MVNETA_BM_POOL_GET_READ_PTR_MASK 0xfffc0000 + +/* External Buffer Pointers Pool WR pointer */ +#define MVNETA_BM_POOL_WRITE_PTR_REG(pool) (0x18 + ((pool) << 4)) +#define MVNETA_BM_POOL_SET_WRITE_PTR_OFFS 0 +#define MVNETA_BM_POOL_SET_WRITE_PTR_MASK 0xfffc +#define MVNETA_BM_POOL_GET_WRITE_PTR_OFFS 16 +#define MVNETA_BM_POOL_GET_WRITE_PTR_MASK 0xfffc0000 + +/* External Buffer Pointers Pool Size Register */ +#define MVNETA_BM_POOL_SIZE_REG(pool) (0x1c + ((pool) << 4)) +#define MVNETA_BM_POOL_SIZE_MASK 0x3fff + +/* BM Interrupt Cause Register */ +#define MVNETA_BM_INTR_CAUSE_REG (0x50) + +/* BM interrupt Mask Register */ +#define MVNETA_BM_INTR_MASK_REG (0x54) + +/* Other definitions */ +#define MVNETA_BM_SHORT_PKT_SIZE 256 +#define MVNETA_BM_POOLS_NUM 4 +#define MVNETA_BM_POOL_CAP_MIN 128 +#define MVNETA_BM_POOL_CAP_DEF 2048 +#define MVNETA_BM_POOL_CAP_MAX \ + (16 * 1024 - MVNETA_BM_POOL_CAP_ALIGN) +#define MVNETA_BM_POOL_CAP_ALIGN 32 +#define MVNETA_BM_POOL_PTR_ALIGN 32 + +#define MVNETA_BM_POOL_ACCESS_OFFS 8 + +#define MVNETA_BM_BPPI_SIZE 0x100000 + +#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) + +enum mvneta_bm_type { + MVNETA_BM_FREE, + MVNETA_BM_LONG, + MVNETA_BM_SHORT +}; + +struct mvneta_bm { + void __iomem *reg_base; + struct clk *clk; + struct platform_device *pdev; + + struct gen_pool *bppi_pool; + /* BPPI virtual base address */ + void __iomem *bppi_virt_addr; + /* BPPI physical base address */ + dma_addr_t bppi_phys_addr; + + /* BM pools */ + struct mvneta_bm_pool *bm_pools; +}; + +struct mvneta_bm_pool { + struct hwbm_pool hwbm_pool; + /* Pool number in the range 0-3 */ + u8 id; + enum mvneta_bm_type type; + + /* Packet size */ + int pkt_size; + /* Size of the buffer acces through DMA*/ + u32 buf_size; + + /* BPPE virtual base address */ + u32 *virt_addr; + /* BPPE physical base address */ + dma_addr_t phys_addr; + + /* Ports using BM pool */ + u8 port_map; + + struct mvneta_bm *priv; +}; + +/* Declarations and definitions */ +#if IS_ENABLED(CONFIG_MVNETA_BM) +struct mvneta_bm *mvneta_bm_get(struct device_node *node); +void mvneta_bm_put(struct mvneta_bm *priv); + +void mvneta_bm_pool_destroy(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool, u8 port_map); +void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, + u8 port_map); +int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf); +int mvneta_bm_pool_refill(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool); +struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, + enum mvneta_bm_type type, u8 port_id, + int pkt_size); + +static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool, + dma_addr_t buf_phys_addr) +{ + writel_relaxed(buf_phys_addr, priv->bppi_virt_addr + + (bm_pool->id << MVNETA_BM_POOL_ACCESS_OFFS)); +} + +static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool) +{ + return readl_relaxed(priv->bppi_virt_addr + + (bm_pool->id << MVNETA_BM_POOL_ACCESS_OFFS)); +} +#else +static inline void mvneta_bm_pool_destroy(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool, + u8 port_map) {} +static inline void mvneta_bm_bufs_free(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool, + u8 port_map) {} +static inline int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf) +{ return 0; } +static inline int mvneta_bm_pool_refill(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool) +{ return 0; } +static inline struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, + u8 pool_id, + enum mvneta_bm_type type, + u8 port_id, + int pkt_size) +{ return NULL; } + +static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool, + dma_addr_t buf_phys_addr) {} + +static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool) +{ return 0; } +static inline struct mvneta_bm *mvneta_bm_get(struct device_node *node) +{ return NULL; } +static inline void mvneta_bm_put(struct mvneta_bm *priv) {} +#endif /* CONFIG_MVNETA_BM */ +#endif diff --git a/drivers/net/ethernet/marvell/mvpp2/Makefile b/drivers/net/ethernet/marvell/mvpp2/Makefile new file mode 100644 index 000000000..9bd8e7964 --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Marvell PPv2 driver. +# +obj-$(CONFIG_MVPP2) := mvpp2.o + +mvpp2-y := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o mvpp2_debugfs.o +mvpp2-$(CONFIG_MVPP2_PTP) += mvpp2_tai.o diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h new file mode 100644 index 000000000..e999ac2de --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -0,0 +1,1471 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Definitions for Marvell PPv2 network controller for Armada 375 SoC. + * + * Copyright (C) 2014 Marvell + * + * Marcin Wojtas <mw@semihalf.com> + */ +#ifndef _MVPP2_H_ +#define _MVPP2_H_ + +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/net_tstamp.h> +#include <linux/phy.h> +#include <linux/phylink.h> +#include <net/flow_offload.h> +#include <net/page_pool.h> +#include <linux/bpf.h> +#include <net/xdp.h> + +/* The PacketOffset field is measured in units of 32 bytes and is 3 bits wide, + * so the maximum offset is 7 * 32 = 224 + */ +#define MVPP2_SKB_HEADROOM min(max(XDP_PACKET_HEADROOM, NET_SKB_PAD), 224) + +#define MVPP2_XDP_PASS 0 +#define MVPP2_XDP_DROPPED BIT(0) +#define MVPP2_XDP_TX BIT(1) +#define MVPP2_XDP_REDIR BIT(2) + +/* Fifo Registers */ +#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port)) +#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port)) +#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60 +#define MVPP2_RX_FIFO_INIT_REG 0x64 +#define MVPP22_TX_FIFO_THRESH_REG(port) (0x8840 + 4 * (port)) +#define MVPP22_TX_FIFO_SIZE_REG(port) (0x8860 + 4 * (port)) + +/* RX DMA Top Registers */ +#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port)) +#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16) +#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31) +#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool)) +#define MVPP2_POOL_BUF_SIZE_OFFSET 5 +#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq)) +#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff +#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9) +#define MVPP2_RXQ_POOL_SHORT_OFFS 20 +#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000 +#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000 +#define MVPP2_RXQ_POOL_LONG_OFFS 24 +#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000 +#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000 +#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28 +#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000 +#define MVPP2_RXQ_DISABLE_MASK BIT(31) + +/* Top Registers */ +#define MVPP2_MH_REG(port) (0x5040 + 4 * (port)) +#define MVPP2_DSA_EXTENDED BIT(5) + +/* Parser Registers */ +#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000 +#define MVPP2_PRS_PORT_LU_MAX 0xf +#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4)) +#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4)) +#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4)) +#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8)) +#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8)) +#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4)) +#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8)) +#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8)) +#define MVPP2_PRS_TCAM_IDX_REG 0x1100 +#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4) +#define MVPP2_PRS_TCAM_INV_MASK BIT(31) +#define MVPP2_PRS_SRAM_IDX_REG 0x1200 +#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4) +#define MVPP2_PRS_TCAM_CTRL_REG 0x1230 +#define MVPP2_PRS_TCAM_EN_MASK BIT(0) +#define MVPP2_PRS_TCAM_HIT_IDX_REG 0x1240 +#define MVPP2_PRS_TCAM_HIT_CNT_REG 0x1244 +#define MVPP2_PRS_TCAM_HIT_CNT_MASK GENMASK(15, 0) + +/* RSS Registers */ +#define MVPP22_RSS_INDEX 0x1500 +#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx) +#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8) +#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16) +#define MVPP22_RXQ2RSS_TABLE 0x1504 +#define MVPP22_RSS_TABLE_POINTER(p) (p) +#define MVPP22_RSS_TABLE_ENTRY 0x1508 +#define MVPP22_RSS_WIDTH 0x150c + +/* Classifier Registers */ +#define MVPP2_CLS_MODE_REG 0x1800 +#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0) +#define MVPP2_CLS_PORT_WAY_REG 0x1810 +#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port)) +#define MVPP2_CLS_LKP_INDEX_REG 0x1814 +#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6 +#define MVPP2_CLS_LKP_TBL_REG 0x1818 +#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff +#define MVPP2_CLS_LKP_FLOW_PTR(flow) ((flow) << 16) +#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25) +#define MVPP2_CLS_FLOW_INDEX_REG 0x1820 +#define MVPP2_CLS_FLOW_TBL0_REG 0x1824 +#define MVPP2_CLS_FLOW_TBL0_LAST BIT(0) +#define MVPP2_CLS_FLOW_TBL0_ENG_MASK 0x7 +#define MVPP2_CLS_FLOW_TBL0_OFFS 1 +#define MVPP2_CLS_FLOW_TBL0_ENG(x) ((x) << 1) +#define MVPP2_CLS_FLOW_TBL0_PORT_ID_MASK 0xff +#define MVPP2_CLS_FLOW_TBL0_PORT_ID(port) ((port) << 4) +#define MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL BIT(23) +#define MVPP2_CLS_FLOW_TBL1_REG 0x1828 +#define MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK 0x7 +#define MVPP2_CLS_FLOW_TBL1_N_FIELDS(x) (x) +#define MVPP2_CLS_FLOW_TBL1_LU_TYPE(lu) (((lu) & 0x3f) << 3) +#define MVPP2_CLS_FLOW_TBL1_PRIO_MASK 0x3f +#define MVPP2_CLS_FLOW_TBL1_PRIO(x) ((x) << 9) +#define MVPP2_CLS_FLOW_TBL1_SEQ_MASK 0x7 +#define MVPP2_CLS_FLOW_TBL1_SEQ(x) ((x) << 15) +#define MVPP2_CLS_FLOW_TBL2_REG 0x182c +#define MVPP2_CLS_FLOW_TBL2_FLD_MASK 0x3f +#define MVPP2_CLS_FLOW_TBL2_FLD_OFFS(n) ((n) * 6) +#define MVPP2_CLS_FLOW_TBL2_FLD(n, x) ((x) << ((n) * 6)) +#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4)) +#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3 +#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7 +#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4)) +#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0 +#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port)) + +/* Classifier C2 engine Registers */ +#define MVPP22_CLS_C2_TCAM_IDX 0x1b00 +#define MVPP22_CLS_C2_TCAM_DATA0 0x1b10 +#define MVPP22_CLS_C2_TCAM_DATA1 0x1b14 +#define MVPP22_CLS_C2_TCAM_DATA2 0x1b18 +#define MVPP22_CLS_C2_TCAM_DATA3 0x1b1c +#define MVPP22_CLS_C2_TCAM_DATA4 0x1b20 +#define MVPP22_CLS_C2_LU_TYPE(lu) ((lu) & 0x3f) +#define MVPP22_CLS_C2_PORT_ID(port) ((port) << 8) +#define MVPP22_CLS_C2_PORT_MASK (0xff << 8) +#define MVPP22_CLS_C2_TCAM_INV 0x1b24 +#define MVPP22_CLS_C2_TCAM_INV_BIT BIT(31) +#define MVPP22_CLS_C2_HIT_CTR 0x1b50 +#define MVPP22_CLS_C2_ACT 0x1b60 +#define MVPP22_CLS_C2_ACT_RSS_EN(act) (((act) & 0x3) << 19) +#define MVPP22_CLS_C2_ACT_FWD(act) (((act) & 0x7) << 13) +#define MVPP22_CLS_C2_ACT_QHIGH(act) (((act) & 0x3) << 11) +#define MVPP22_CLS_C2_ACT_QLOW(act) (((act) & 0x3) << 9) +#define MVPP22_CLS_C2_ACT_COLOR(act) ((act) & 0x7) +#define MVPP22_CLS_C2_ATTR0 0x1b64 +#define MVPP22_CLS_C2_ATTR0_QHIGH(qh) (((qh) & 0x1f) << 24) +#define MVPP22_CLS_C2_ATTR0_QHIGH_MASK 0x1f +#define MVPP22_CLS_C2_ATTR0_QHIGH_OFFS 24 +#define MVPP22_CLS_C2_ATTR0_QLOW(ql) (((ql) & 0x7) << 21) +#define MVPP22_CLS_C2_ATTR0_QLOW_MASK 0x7 +#define MVPP22_CLS_C2_ATTR0_QLOW_OFFS 21 +#define MVPP22_CLS_C2_ATTR1 0x1b68 +#define MVPP22_CLS_C2_ATTR2 0x1b6c +#define MVPP22_CLS_C2_ATTR2_RSS_EN BIT(30) +#define MVPP22_CLS_C2_ATTR3 0x1b70 +#define MVPP22_CLS_C2_TCAM_CTRL 0x1b90 +#define MVPP22_CLS_C2_TCAM_BYPASS_FIFO BIT(0) + +/* Descriptor Manager Top Registers */ +#define MVPP2_RXQ_NUM_REG 0x2040 +#define MVPP2_RXQ_DESC_ADDR_REG 0x2044 +#define MVPP22_DESC_ADDR_OFFS 8 +#define MVPP2_RXQ_DESC_SIZE_REG 0x2048 +#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0 +#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq)) +#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0 +#define MVPP2_RXQ_NUM_NEW_OFFSET 16 +#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq)) +#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff +#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16 +#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000 +#define MVPP2_RXQ_THRESH_REG 0x204c +#define MVPP2_OCCUPIED_THRESH_OFFSET 0 +#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff +#define MVPP2_RXQ_INDEX_REG 0x2050 +#define MVPP2_TXQ_NUM_REG 0x2080 +#define MVPP2_TXQ_DESC_ADDR_REG 0x2084 +#define MVPP2_TXQ_DESC_SIZE_REG 0x2088 +#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0 +#define MVPP2_TXQ_THRESH_REG 0x2094 +#define MVPP2_TXQ_THRESH_OFFSET 16 +#define MVPP2_TXQ_THRESH_MASK 0x3fff +#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090 +#define MVPP2_TXQ_INDEX_REG 0x2098 +#define MVPP2_TXQ_PREF_BUF_REG 0x209c +#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff) +#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13)) +#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14)) +#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17) +#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31) +#define MVPP2_TXQ_PENDING_REG 0x20a0 +#define MVPP2_TXQ_PENDING_MASK 0x3fff +#define MVPP2_TXQ_INT_STATUS_REG 0x20a4 +#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq)) +#define MVPP2_TRANSMITTED_COUNT_OFFSET 16 +#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000 +#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0 +#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16 +#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4 +#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff +#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8 +#define MVPP2_TXQ_RSVD_CLR_OFFSET 16 +#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu)) +#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8 +#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu)) +#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0 +#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu)) +#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff +#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu)) + +/* MBUS bridge registers */ +#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2)) +#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2)) +#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2)) +#define MVPP2_BASE_ADDR_ENABLE 0x4060 + +/* AXI Bridge Registers */ +#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100 +#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104 +#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110 +#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114 +#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118 +#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c +#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120 +#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130 +#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150 +#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154 +#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160 +#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164 + +/* Values for AXI Bridge registers */ +#define MVPP22_AXI_ATTR_CACHE_OFFS 0 +#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12 + +#define MVPP22_AXI_CODE_CACHE_OFFS 0 +#define MVPP22_AXI_CODE_DOMAIN_OFFS 4 + +#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3 +#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7 +#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb + +#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2 +#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3 + +/* Interrupt Cause and Mask registers */ +#define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port)) +#define MVPP2_MAX_ISR_TX_THRESHOLD 0xfffff0 + +#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) +#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0 +#define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port)) + +#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400 +#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf +#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 +#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7 + +#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf +#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 + +#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404 +#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f +#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00 +#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8 + +#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) +#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) +#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000) +#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port)) +#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(version) \ + ((version) == MVPP21 ? 0xffff : 0xff) +#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000 +#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16 +#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24) +#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25) +#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26) +#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29) +#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30) +#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31) +#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port)) +#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc +#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff +#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000 +#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31) +#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0 + +/* Buffer Manager registers */ +#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4)) +#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80 +#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4)) +#define MVPP2_BM_POOL_SIZE_MASK 0xfff0 +#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4)) +#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0 +#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4)) +#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0 +#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4)) +#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4)) +#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff +#define MVPP22_BM_POOL_PTRS_NUM_MASK 0xfff8 +#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16) +#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4)) +#define MVPP2_BM_START_MASK BIT(0) +#define MVPP2_BM_STOP_MASK BIT(1) +#define MVPP2_BM_STATE_MASK BIT(4) +#define MVPP2_BM_LOW_THRESH_OFFS 8 +#define MVPP2_BM_LOW_THRESH_MASK 0x7f00 +#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \ + MVPP2_BM_LOW_THRESH_OFFS) +#define MVPP2_BM_HIGH_THRESH_OFFS 16 +#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000 +#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \ + MVPP2_BM_HIGH_THRESH_OFFS) +#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4)) +#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0) +#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1) +#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2) +#define MVPP2_BM_BPPE_FULL_MASK BIT(3) +#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4) +#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4)) +#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4)) +#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0) +#define MVPP2_BM_VIRT_ALLOC_REG 0x6440 +#define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444 +#define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff +#define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00 +#define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8 +#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4)) +#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0) +#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1) +#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2) +#define MVPP2_BM_VIRT_RLS_REG 0x64c0 +#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4 +#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff +#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00 +#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8 + +/* Packet Processor per-port counters */ +#define MVPP2_OVERRUN_ETH_DROP 0x7000 +#define MVPP2_CLS_ETH_DROP 0x7020 + +/* Hit counters registers */ +#define MVPP2_CTRS_IDX 0x7040 +#define MVPP22_CTRS_TX_CTR(port, txq) ((txq) | ((port) << 3) | BIT(7)) +#define MVPP2_TX_DESC_ENQ_CTR 0x7100 +#define MVPP2_TX_DESC_ENQ_TO_DDR_CTR 0x7104 +#define MVPP2_TX_BUFF_ENQ_TO_DDR_CTR 0x7108 +#define MVPP2_TX_DESC_ENQ_HW_FWD_CTR 0x710c +#define MVPP2_RX_DESC_ENQ_CTR 0x7120 +#define MVPP2_TX_PKTS_DEQ_CTR 0x7130 +#define MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR 0x7200 +#define MVPP2_TX_PKTS_EARLY_DROP_CTR 0x7204 +#define MVPP2_TX_PKTS_BM_DROP_CTR 0x7208 +#define MVPP2_TX_PKTS_BM_MC_DROP_CTR 0x720c +#define MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR 0x7220 +#define MVPP2_RX_PKTS_EARLY_DROP_CTR 0x7224 +#define MVPP2_RX_PKTS_BM_DROP_CTR 0x7228 +#define MVPP2_CLS_DEC_TBL_HIT_CTR 0x7700 +#define MVPP2_CLS_FLOW_TBL_HIT_CTR 0x7704 + +/* TX Scheduler registers */ +#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000 +#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004 +#define MVPP2_TXP_SCHED_ENQ_MASK 0xff +#define MVPP2_TXP_SCHED_DISQ_OFFSET 8 +#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010 +#define MVPP2_TXP_SCHED_FIXED_PRIO_REG 0x8014 +#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018 +#define MVPP2_TXP_SCHED_MTU_REG 0x801c +#define MVPP2_TXP_MTU_MAX 0x7FFFF +#define MVPP2_TXP_SCHED_REFILL_REG 0x8020 +#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff +#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000 +#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20) +#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024 +#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff +#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2)) +#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff +#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000 +#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20) +#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2)) +#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff +#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2)) +#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff + +/* TX general registers */ +#define MVPP2_TX_SNOOP_REG 0x8800 +#define MVPP2_TX_PORT_FLUSH_REG 0x8810 +#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port)) + +/* LMS registers */ +#define MVPP2_SRC_ADDR_MIDDLE 0x24 +#define MVPP2_SRC_ADDR_HIGH 0x28 +#define MVPP2_PHY_AN_CFG0_REG 0x34 +#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7) +#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c +#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 + +/* Per-port registers */ +#define MVPP2_GMAC_CTRL_0_REG 0x0 +#define MVPP2_GMAC_PORT_EN_MASK BIT(0) +#define MVPP2_GMAC_PORT_TYPE_MASK BIT(1) +#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2 +#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc +#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15) +#define MVPP2_GMAC_CTRL_1_REG 0x4 +#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1) +#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5) +#define MVPP2_GMAC_PCS_LB_EN_BIT 6 +#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6) +#define MVPP2_GMAC_SA_LOW_OFFS 7 +#define MVPP2_GMAC_CTRL_2_REG 0x8 +#define MVPP2_GMAC_INBAND_AN_MASK BIT(0) +#define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1) +#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) +#define MVPP2_GMAC_INTERNAL_CLK_MASK BIT(4) +#define MVPP2_GMAC_DISABLE_PADDING BIT(5) +#define MVPP2_GMAC_PORT_RESET_MASK BIT(6) +#define MVPP2_GMAC_AUTONEG_CONFIG 0xc +#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0) +#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1) +#define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2) +#define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3) +#define MVPP2_GMAC_IN_BAND_RESTART_AN BIT(4) +#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) +#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) +#define MVPP2_GMAC_AN_SPEED_EN BIT(7) +#define MVPP2_GMAC_FC_ADV_EN BIT(9) +#define MVPP2_GMAC_FC_ADV_ASM_EN BIT(10) +#define MVPP2_GMAC_FLOW_CTRL_AUTONEG BIT(11) +#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12) +#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13) +#define MVPP2_GMAC_STATUS0 0x10 +#define MVPP2_GMAC_STATUS0_LINK_UP BIT(0) +#define MVPP2_GMAC_STATUS0_GMII_SPEED BIT(1) +#define MVPP2_GMAC_STATUS0_MII_SPEED BIT(2) +#define MVPP2_GMAC_STATUS0_FULL_DUPLEX BIT(3) +#define MVPP2_GMAC_STATUS0_RX_PAUSE BIT(4) +#define MVPP2_GMAC_STATUS0_TX_PAUSE BIT(5) +#define MVPP2_GMAC_STATUS0_AN_COMPLETE BIT(11) +#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c +#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 +#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 +#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ + MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK) +#define MVPP22_GMAC_INT_STAT 0x20 +#define MVPP22_GMAC_INT_STAT_LINK BIT(1) +#define MVPP22_GMAC_INT_MASK 0x24 +#define MVPP22_GMAC_INT_MASK_LINK_STAT BIT(1) +#define MVPP22_GMAC_CTRL_4_REG 0x90 +#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0) +#define MVPP22_CTRL4_RX_FC_EN BIT(3) +#define MVPP22_CTRL4_TX_FC_EN BIT(4) +#define MVPP22_CTRL4_DP_CLK_SEL BIT(5) +#define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6) +#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7) +#define MVPP22_GMAC_INT_SUM_STAT 0xa0 +#define MVPP22_GMAC_INT_SUM_STAT_INTERNAL BIT(1) +#define MVPP22_GMAC_INT_SUM_STAT_PTP BIT(2) +#define MVPP22_GMAC_INT_SUM_MASK 0xa4 +#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1) +#define MVPP22_GMAC_INT_SUM_MASK_PTP BIT(2) + +/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0, + * relative to port->base. + */ +#define MVPP22_XLG_CTRL0_REG 0x100 +#define MVPP22_XLG_CTRL0_PORT_EN BIT(0) +#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1) +#define MVPP22_XLG_CTRL0_FORCE_LINK_DOWN BIT(2) +#define MVPP22_XLG_CTRL0_FORCE_LINK_PASS BIT(3) +#define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7) +#define MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN BIT(8) +#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14) +#define MVPP22_XLG_CTRL1_REG 0x104 +#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS 0 +#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK 0x1fff +#define MVPP22_XLG_STATUS 0x10c +#define MVPP22_XLG_STATUS_LINK_UP BIT(0) +#define MVPP22_XLG_INT_STAT 0x114 +#define MVPP22_XLG_INT_STAT_LINK BIT(1) +#define MVPP22_XLG_INT_MASK 0x118 +#define MVPP22_XLG_INT_MASK_LINK BIT(1) +#define MVPP22_XLG_CTRL3_REG 0x11c +#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13) +#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13) +#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13) +#define MVPP22_XLG_EXT_INT_STAT 0x158 +#define MVPP22_XLG_EXT_INT_STAT_XLG BIT(1) +#define MVPP22_XLG_EXT_INT_STAT_PTP BIT(7) +#define MVPP22_XLG_EXT_INT_MASK 0x15c +#define MVPP22_XLG_EXT_INT_MASK_XLG BIT(1) +#define MVPP22_XLG_EXT_INT_MASK_GIG BIT(2) +#define MVPP22_XLG_EXT_INT_MASK_PTP BIT(7) +#define MVPP22_XLG_CTRL4_REG 0x184 +#define MVPP22_XLG_CTRL4_FWD_FC BIT(5) +#define MVPP22_XLG_CTRL4_FWD_PFC BIT(6) +#define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12) +#define MVPP22_XLG_CTRL4_EN_IDLE_CHECK BIT(14) + +/* SMI registers. PPv2.2 only, relative to priv->iface_base. */ +#define MVPP22_SMI_MISC_CFG_REG 0x1204 +#define MVPP22_SMI_POLLING_EN BIT(10) + +/* TAI registers, PPv2.2 only, relative to priv->iface_base */ +#define MVPP22_TAI_INT_CAUSE 0x1400 +#define MVPP22_TAI_INT_MASK 0x1404 +#define MVPP22_TAI_CR0 0x1408 +#define MVPP22_TAI_CR1 0x140c +#define MVPP22_TAI_TCFCR0 0x1410 +#define MVPP22_TAI_TCFCR1 0x1414 +#define MVPP22_TAI_TCFCR2 0x1418 +#define MVPP22_TAI_FATWR 0x141c +#define MVPP22_TAI_TOD_STEP_NANO_CR 0x1420 +#define MVPP22_TAI_TOD_STEP_FRAC_HIGH 0x1424 +#define MVPP22_TAI_TOD_STEP_FRAC_LOW 0x1428 +#define MVPP22_TAI_TAPDC_HIGH 0x142c +#define MVPP22_TAI_TAPDC_LOW 0x1430 +#define MVPP22_TAI_TGTOD_SEC_HIGH 0x1434 +#define MVPP22_TAI_TGTOD_SEC_MED 0x1438 +#define MVPP22_TAI_TGTOD_SEC_LOW 0x143c +#define MVPP22_TAI_TGTOD_NANO_HIGH 0x1440 +#define MVPP22_TAI_TGTOD_NANO_LOW 0x1444 +#define MVPP22_TAI_TGTOD_FRAC_HIGH 0x1448 +#define MVPP22_TAI_TGTOD_FRAC_LOW 0x144c +#define MVPP22_TAI_TLV_SEC_HIGH 0x1450 +#define MVPP22_TAI_TLV_SEC_MED 0x1454 +#define MVPP22_TAI_TLV_SEC_LOW 0x1458 +#define MVPP22_TAI_TLV_NANO_HIGH 0x145c +#define MVPP22_TAI_TLV_NANO_LOW 0x1460 +#define MVPP22_TAI_TLV_FRAC_HIGH 0x1464 +#define MVPP22_TAI_TLV_FRAC_LOW 0x1468 +#define MVPP22_TAI_TCV0_SEC_HIGH 0x146c +#define MVPP22_TAI_TCV0_SEC_MED 0x1470 +#define MVPP22_TAI_TCV0_SEC_LOW 0x1474 +#define MVPP22_TAI_TCV0_NANO_HIGH 0x1478 +#define MVPP22_TAI_TCV0_NANO_LOW 0x147c +#define MVPP22_TAI_TCV0_FRAC_HIGH 0x1480 +#define MVPP22_TAI_TCV0_FRAC_LOW 0x1484 +#define MVPP22_TAI_TCV1_SEC_HIGH 0x1488 +#define MVPP22_TAI_TCV1_SEC_MED 0x148c +#define MVPP22_TAI_TCV1_SEC_LOW 0x1490 +#define MVPP22_TAI_TCV1_NANO_HIGH 0x1494 +#define MVPP22_TAI_TCV1_NANO_LOW 0x1498 +#define MVPP22_TAI_TCV1_FRAC_HIGH 0x149c +#define MVPP22_TAI_TCV1_FRAC_LOW 0x14a0 +#define MVPP22_TAI_TCSR 0x14a4 +#define MVPP22_TAI_TUC_LSB 0x14a8 +#define MVPP22_TAI_GFM_SEC_HIGH 0x14ac +#define MVPP22_TAI_GFM_SEC_MED 0x14b0 +#define MVPP22_TAI_GFM_SEC_LOW 0x14b4 +#define MVPP22_TAI_GFM_NANO_HIGH 0x14b8 +#define MVPP22_TAI_GFM_NANO_LOW 0x14bc +#define MVPP22_TAI_GFM_FRAC_HIGH 0x14c0 +#define MVPP22_TAI_GFM_FRAC_LOW 0x14c4 +#define MVPP22_TAI_PCLK_DA_HIGH 0x14c8 +#define MVPP22_TAI_PCLK_DA_LOW 0x14cc +#define MVPP22_TAI_CTCR 0x14d0 +#define MVPP22_TAI_PCLK_CCC_HIGH 0x14d4 +#define MVPP22_TAI_PCLK_CCC_LOW 0x14d8 +#define MVPP22_TAI_DTC_HIGH 0x14dc +#define MVPP22_TAI_DTC_LOW 0x14e0 +#define MVPP22_TAI_CCC_HIGH 0x14e4 +#define MVPP22_TAI_CCC_LOW 0x14e8 +#define MVPP22_TAI_ICICE 0x14f4 +#define MVPP22_TAI_ICICC_LOW 0x14f8 +#define MVPP22_TAI_TUC_MSB 0x14fc + +#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00) + +#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff + +/* Descriptor ring Macros */ +#define MVPP2_QUEUE_NEXT_DESC(q, index) \ + (((index) < (q)->last_desc) ? ((index) + 1) : 0) + +/* XPCS registers. PPv2.2 only */ +#define MVPP22_MPCS_BASE(port) (0x7000 + (port) * 0x1000) +#define MVPP22_MPCS_CTRL 0x14 +#define MVPP22_MPCS_CTRL_FWD_ERR_CONN BIT(10) +#define MVPP22_MPCS_CLK_RESET 0x14c +#define MAC_CLK_RESET_SD_TX BIT(0) +#define MAC_CLK_RESET_SD_RX BIT(1) +#define MAC_CLK_RESET_MAC BIT(2) +#define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4) +#define MVPP22_MPCS_CLK_RESET_DIV_SET BIT(11) + +/* XPCS registers. PPv2.2 only */ +#define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000) +#define MVPP22_XPCS_CFG0 0x0 +#define MVPP22_XPCS_CFG0_RESET_DIS BIT(0) +#define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3) +#define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5) + +/* PTP registers. PPv2.2 only */ +#define MVPP22_PTP_BASE(port) (0x7800 + (port * 0x1000)) +#define MVPP22_PTP_INT_CAUSE 0x00 +#define MVPP22_PTP_INT_CAUSE_QUEUE1 BIT(6) +#define MVPP22_PTP_INT_CAUSE_QUEUE0 BIT(5) +#define MVPP22_PTP_INT_MASK 0x04 +#define MVPP22_PTP_INT_MASK_QUEUE1 BIT(6) +#define MVPP22_PTP_INT_MASK_QUEUE0 BIT(5) +#define MVPP22_PTP_GCR 0x08 +#define MVPP22_PTP_GCR_RX_RESET BIT(13) +#define MVPP22_PTP_GCR_TX_RESET BIT(1) +#define MVPP22_PTP_GCR_TSU_ENABLE BIT(0) +#define MVPP22_PTP_TX_Q0_R0 0x0c +#define MVPP22_PTP_TX_Q0_R1 0x10 +#define MVPP22_PTP_TX_Q0_R2 0x14 +#define MVPP22_PTP_TX_Q1_R0 0x18 +#define MVPP22_PTP_TX_Q1_R1 0x1c +#define MVPP22_PTP_TX_Q1_R2 0x20 +#define MVPP22_PTP_TPCR 0x24 +#define MVPP22_PTP_V1PCR 0x28 +#define MVPP22_PTP_V2PCR 0x2c +#define MVPP22_PTP_Y1731PCR 0x30 +#define MVPP22_PTP_NTPTSPCR 0x34 +#define MVPP22_PTP_NTPRXPCR 0x38 +#define MVPP22_PTP_NTPTXPCR 0x3c +#define MVPP22_PTP_WAMPPCR 0x40 +#define MVPP22_PTP_NAPCR 0x44 +#define MVPP22_PTP_FAPCR 0x48 +#define MVPP22_PTP_CAPCR 0x50 +#define MVPP22_PTP_ATAPCR 0x54 +#define MVPP22_PTP_ACTAPCR 0x58 +#define MVPP22_PTP_CATAPCR 0x5c +#define MVPP22_PTP_CACTAPCR 0x60 +#define MVPP22_PTP_AITAPCR 0x64 +#define MVPP22_PTP_CAITAPCR 0x68 +#define MVPP22_PTP_CITAPCR 0x6c +#define MVPP22_PTP_NTP_OFF_HIGH 0x70 +#define MVPP22_PTP_NTP_OFF_LOW 0x74 +#define MVPP22_PTP_TX_PIPE_STATUS_DELAY 0x78 + +/* System controller registers. Accessed through a regmap. */ +#define GENCONF_SOFT_RESET1 0x1108 +#define GENCONF_SOFT_RESET1_GOP BIT(6) +#define GENCONF_PORT_CTRL0 0x1110 +#define GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT BIT(1) +#define GENCONF_PORT_CTRL0_RX_DATA_SAMPLE BIT(29) +#define GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR BIT(31) +#define GENCONF_PORT_CTRL1 0x1114 +#define GENCONF_PORT_CTRL1_EN(p) BIT(p) +#define GENCONF_PORT_CTRL1_RESET(p) (BIT(p) << 28) +#define GENCONF_CTRL0 0x1120 +#define GENCONF_CTRL0_PORT0_RGMII BIT(0) +#define GENCONF_CTRL0_PORT1_RGMII_MII BIT(1) +#define GENCONF_CTRL0_PORT1_RGMII BIT(2) + +/* Various constants */ + +/* Coalescing */ +#define MVPP2_TXDONE_COAL_PKTS_THRESH 64 +#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL +#define MVPP2_TXDONE_COAL_USEC 1000 +#define MVPP2_RX_COAL_PKTS 32 +#define MVPP2_RX_COAL_USEC 64 + +/* The two bytes Marvell header. Either contains a special value used + * by Marvell switches when a specific hardware mode is enabled (not + * supported by this driver) or is filled automatically by zeroes on + * the RX side. Those two bytes being at the front of the Ethernet + * header, they allow to have the IP header aligned on a 4 bytes + * boundary automatically: the hardware skips those two bytes on its + * own. + */ +#define MVPP2_MH_SIZE 2 +#define MVPP2_ETH_TYPE_LEN 2 +#define MVPP2_PPPOE_HDR_SIZE 8 +#define MVPP2_VLAN_TAG_LEN 4 +#define MVPP2_VLAN_TAG_EDSA_LEN 8 + +/* Lbtd 802.3 type */ +#define MVPP2_IP_LBDT_TYPE 0xfffa + +#define MVPP2_TX_CSUM_MAX_SIZE 9800 + +/* Timeout constants */ +#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000 +#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000 + +#define MVPP2_TX_MTU_MAX 0x7ffff + +/* Maximum number of T-CONTs of PON port */ +#define MVPP2_MAX_TCONT 16 + +/* Maximum number of supported ports */ +#define MVPP2_MAX_PORTS 4 + +/* Maximum number of TXQs used by single port */ +#define MVPP2_MAX_TXQ 8 + +/* MVPP2_MAX_TSO_SEGS is the maximum number of fragments to allow in the GSO + * skb. As we need a maxium of two descriptors per fragments (1 header, 1 data), + * multiply this value by two to count the maximum number of skb descs needed. + */ +#define MVPP2_MAX_TSO_SEGS 300 +#define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) + +/* Max number of RXQs per port */ +#define MVPP2_PORT_MAX_RXQ 32 + +/* Max number of Rx descriptors */ +#define MVPP2_MAX_RXD_MAX 1024 +#define MVPP2_MAX_RXD_DFLT 128 + +/* Max number of Tx descriptors */ +#define MVPP2_MAX_TXD_MAX 2048 +#define MVPP2_MAX_TXD_DFLT 1024 + +/* Amount of Tx descriptors that can be reserved at once by CPU */ +#define MVPP2_CPU_DESC_CHUNK 64 + +/* Max number of Tx descriptors in each aggregated queue */ +#define MVPP2_AGGR_TXQ_SIZE 256 + +/* Descriptor aligned size */ +#define MVPP2_DESC_ALIGNED_SIZE 32 + +/* Descriptor alignment mask */ +#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1) + +/* RX FIFO constants */ +#define MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB 0x8000 +#define MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB 0x2000 +#define MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB 0x1000 +#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB 0x200 +#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB 0x80 +#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB 0x40 +#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80 + +/* TX FIFO constants */ +#define MVPP22_TX_FIFO_DATA_SIZE_10KB 0xa +#define MVPP22_TX_FIFO_DATA_SIZE_3KB 0x3 +#define MVPP2_TX_FIFO_THRESHOLD_MIN 256 +#define MVPP2_TX_FIFO_THRESHOLD_10KB \ + (MVPP22_TX_FIFO_DATA_SIZE_10KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN) +#define MVPP2_TX_FIFO_THRESHOLD_3KB \ + (MVPP22_TX_FIFO_DATA_SIZE_3KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN) + +/* RX buffer constants */ +#define MVPP2_SKB_SHINFO_SIZE \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + +#define MVPP2_RX_PKT_SIZE(mtu) \ + ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \ + ETH_HLEN + ETH_FCS_LEN, cache_line_size()) + +#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + MVPP2_SKB_HEADROOM) +#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE) +#define MVPP2_RX_MAX_PKT_SIZE(total_size) \ + ((total_size) - MVPP2_SKB_HEADROOM - MVPP2_SKB_SHINFO_SIZE) + +#define MVPP2_MAX_RX_BUF_SIZE (PAGE_SIZE - MVPP2_SKB_SHINFO_SIZE - MVPP2_SKB_HEADROOM) + +#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8) +#define MVPP2_BIT_TO_WORD(bit) ((bit) / 32) +#define MVPP2_BIT_IN_WORD(bit) ((bit) % 32) + +#define MVPP2_N_PRS_FLOWS 52 +#define MVPP2_N_RFS_ENTRIES_PER_FLOW 4 + +/* There are 7 supported high-level flows */ +#define MVPP2_N_RFS_RULES (MVPP2_N_RFS_ENTRIES_PER_FLOW * 7) + +/* RSS constants */ +#define MVPP22_N_RSS_TABLES 8 +#define MVPP22_RSS_TABLE_ENTRIES 32 + +/* IPv6 max L3 address size */ +#define MVPP2_MAX_L3_ADDR_SIZE 16 + +/* Port flags */ +#define MVPP2_F_LOOPBACK BIT(0) +#define MVPP2_F_DT_COMPAT BIT(1) + +/* Marvell tag types */ +enum mvpp2_tag_type { + MVPP2_TAG_TYPE_NONE = 0, + MVPP2_TAG_TYPE_MH = 1, + MVPP2_TAG_TYPE_DSA = 2, + MVPP2_TAG_TYPE_EDSA = 3, + MVPP2_TAG_TYPE_VLAN = 4, + MVPP2_TAG_TYPE_LAST = 5 +}; + +/* L2 cast enum */ +enum mvpp2_prs_l2_cast { + MVPP2_PRS_L2_UNI_CAST, + MVPP2_PRS_L2_MULTI_CAST, +}; + +/* L3 cast enum */ +enum mvpp2_prs_l3_cast { + MVPP2_PRS_L3_UNI_CAST, + MVPP2_PRS_L3_MULTI_CAST, + MVPP2_PRS_L3_BROAD_CAST +}; + +/* PTP descriptor constants. The low bits of the descriptor are stored + * separately from the high bits. + */ +#define MVPP22_PTP_DESC_MASK_LOW 0xfff + +/* PTPAction */ +enum mvpp22_ptp_action { + MVPP22_PTP_ACTION_NONE = 0, + MVPP22_PTP_ACTION_FORWARD = 1, + MVPP22_PTP_ACTION_CAPTURE = 3, + /* The following have not been verified */ + MVPP22_PTP_ACTION_ADDTIME = 4, + MVPP22_PTP_ACTION_ADDCORRECTEDTIME = 5, + MVPP22_PTP_ACTION_CAPTUREADDTIME = 6, + MVPP22_PTP_ACTION_CAPTUREADDCORRECTEDTIME = 7, + MVPP22_PTP_ACTION_ADDINGRESSTIME = 8, + MVPP22_PTP_ACTION_CAPTUREADDINGRESSTIME = 9, + MVPP22_PTP_ACTION_CAPTUREINGRESSTIME = 10, +}; + +/* PTPPacketFormat */ +enum mvpp22_ptp_packet_format { + MVPP22_PTP_PKT_FMT_PTPV2 = 0, + MVPP22_PTP_PKT_FMT_PTPV1 = 1, + MVPP22_PTP_PKT_FMT_Y1731 = 2, + MVPP22_PTP_PKT_FMT_NTPTS = 3, + MVPP22_PTP_PKT_FMT_NTPRX = 4, + MVPP22_PTP_PKT_FMT_NTPTX = 5, + MVPP22_PTP_PKT_FMT_TWAMP = 6, +}; + +#define MVPP22_PTP_ACTION(x) (((x) & 15) << 0) +#define MVPP22_PTP_PACKETFORMAT(x) (((x) & 7) << 4) +#define MVPP22_PTP_MACTIMESTAMPINGEN BIT(11) +#define MVPP22_PTP_TIMESTAMPENTRYID(x) (((x) & 31) << 12) +#define MVPP22_PTP_TIMESTAMPQUEUESELECT BIT(18) + +/* BM constants */ +#define MVPP2_BM_JUMBO_BUF_NUM 512 +#define MVPP2_BM_LONG_BUF_NUM 1024 +#define MVPP2_BM_SHORT_BUF_NUM 2048 +#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4) +#define MVPP2_BM_POOL_PTR_ALIGN 128 +#define MVPP2_BM_MAX_POOLS 8 + +/* BM cookie (32 bits) definition */ +#define MVPP2_BM_COOKIE_POOL_OFFS 8 +#define MVPP2_BM_COOKIE_CPU_OFFS 24 + +#define MVPP2_BM_SHORT_FRAME_SIZE 736 /* frame size 128 */ +#define MVPP2_BM_LONG_FRAME_SIZE 2240 /* frame size 1664 */ +#define MVPP2_BM_JUMBO_FRAME_SIZE 10432 /* frame size 9856 */ +/* BM short pool packet size + * These value assure that for SWF the total number + * of bytes allocated for each buffer will be 512 + */ +#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_SHORT_FRAME_SIZE) +#define MVPP2_BM_LONG_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_LONG_FRAME_SIZE) +#define MVPP2_BM_JUMBO_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_JUMBO_FRAME_SIZE) + +#define MVPP21_ADDR_SPACE_SZ 0 +#define MVPP22_ADDR_SPACE_SZ SZ_64K + +#define MVPP2_MAX_THREADS 9 +#define MVPP2_MAX_QVECS MVPP2_MAX_THREADS + +/* GMAC MIB Counters register definitions */ +#define MVPP21_MIB_COUNTERS_OFFSET 0x1000 +#define MVPP21_MIB_COUNTERS_PORT_SZ 0x400 +#define MVPP22_MIB_COUNTERS_OFFSET 0x0 +#define MVPP22_MIB_COUNTERS_PORT_SZ 0x100 + +#define MVPP2_MIB_GOOD_OCTETS_RCVD 0x0 +#define MVPP2_MIB_BAD_OCTETS_RCVD 0x8 +#define MVPP2_MIB_CRC_ERRORS_SENT 0xc +#define MVPP2_MIB_UNICAST_FRAMES_RCVD 0x10 +#define MVPP2_MIB_BROADCAST_FRAMES_RCVD 0x18 +#define MVPP2_MIB_MULTICAST_FRAMES_RCVD 0x1c +#define MVPP2_MIB_FRAMES_64_OCTETS 0x20 +#define MVPP2_MIB_FRAMES_65_TO_127_OCTETS 0x24 +#define MVPP2_MIB_FRAMES_128_TO_255_OCTETS 0x28 +#define MVPP2_MIB_FRAMES_256_TO_511_OCTETS 0x2c +#define MVPP2_MIB_FRAMES_512_TO_1023_OCTETS 0x30 +#define MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34 +#define MVPP2_MIB_GOOD_OCTETS_SENT 0x38 +#define MVPP2_MIB_UNICAST_FRAMES_SENT 0x40 +#define MVPP2_MIB_MULTICAST_FRAMES_SENT 0x48 +#define MVPP2_MIB_BROADCAST_FRAMES_SENT 0x4c +#define MVPP2_MIB_FC_SENT 0x54 +#define MVPP2_MIB_FC_RCVD 0x58 +#define MVPP2_MIB_RX_FIFO_OVERRUN 0x5c +#define MVPP2_MIB_UNDERSIZE_RCVD 0x60 +#define MVPP2_MIB_FRAGMENTS_RCVD 0x64 +#define MVPP2_MIB_OVERSIZE_RCVD 0x68 +#define MVPP2_MIB_JABBER_RCVD 0x6c +#define MVPP2_MIB_MAC_RCV_ERROR 0x70 +#define MVPP2_MIB_BAD_CRC_EVENT 0x74 +#define MVPP2_MIB_COLLISION 0x78 +#define MVPP2_MIB_LATE_COLLISION 0x7c + +#define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ) + +#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40) + +/* Buffer header info bits */ +#define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff +#define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK) +#define MVPP2_B_HDR_INFO_LAST_OFFS 12 +#define MVPP2_B_HDR_INFO_LAST_MASK BIT(12) +#define MVPP2_B_HDR_INFO_IS_LAST(info) \ + (((info) & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS) + +struct mvpp2_tai; + +/* Definitions */ +struct mvpp2_dbgfs_entries; + +struct mvpp2_rss_table { + u32 indir[MVPP22_RSS_TABLE_ENTRIES]; +}; + +struct mvpp2_buff_hdr { + __le32 next_phys_addr; + __le32 next_dma_addr; + __le16 byte_count; + __le16 info; + __le16 reserved1; /* bm_qset (for future use, BM) */ + u8 next_phys_addr_high; + u8 next_dma_addr_high; + __le16 reserved2; + __le16 reserved3; + __le16 reserved4; + __le16 reserved5; +}; + +/* Shared Packet Processor resources */ +struct mvpp2 { + /* Shared registers' base addresses */ + void __iomem *lms_base; + void __iomem *iface_base; + + /* On PPv2.2, each "software thread" can access the base + * register through a separate address space, each 64 KB apart + * from each other. Typically, such address spaces will be + * used per CPU. + */ + void __iomem *swth_base[MVPP2_MAX_THREADS]; + + /* On PPv2.2, some port control registers are located into the system + * controller space. These registers are accessible through a regmap. + */ + struct regmap *sysctrl_base; + + /* Common clocks */ + struct clk *pp_clk; + struct clk *gop_clk; + struct clk *mg_clk; + struct clk *mg_core_clk; + struct clk *axi_clk; + + /* List of pointers to port structures */ + int port_count; + struct mvpp2_port *port_list[MVPP2_MAX_PORTS]; + struct mvpp2_tai *tai; + + /* Number of Tx threads used */ + unsigned int nthreads; + /* Map of threads needing locking */ + unsigned long lock_map; + + /* Aggregated TXQs */ + struct mvpp2_tx_queue *aggr_txqs; + + /* Are we using page_pool with per-cpu pools? */ + int percpu_pools; + + /* BM pools */ + struct mvpp2_bm_pool *bm_pools; + + /* PRS shadow table */ + struct mvpp2_prs_shadow *prs_shadow; + /* PRS auxiliary table for double vlan entries control */ + bool *prs_double_vlans; + + /* Tclk value */ + u32 tclk; + + /* HW version */ + enum { MVPP21, MVPP22 } hw_version; + + /* Maximum number of RXQs per port */ + unsigned int max_port_rxqs; + + /* Workqueue to gather hardware statistics */ + char queue_name[30]; + struct workqueue_struct *stats_queue; + + /* Debugfs root entry */ + struct dentry *dbgfs_dir; + + /* Debugfs entries private data */ + struct mvpp2_dbgfs_entries *dbgfs_entries; + + /* RSS Indirection tables */ + struct mvpp2_rss_table *rss_tables[MVPP22_N_RSS_TABLES]; + + /* page_pool allocator */ + struct page_pool *page_pool[MVPP2_PORT_MAX_RXQ]; +}; + +struct mvpp2_pcpu_stats { + struct u64_stats_sync syncp; + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + /* XDP */ + u64 xdp_redirect; + u64 xdp_pass; + u64 xdp_drop; + u64 xdp_xmit; + u64 xdp_xmit_err; + u64 xdp_tx; + u64 xdp_tx_err; +}; + +/* Per-CPU port control */ +struct mvpp2_port_pcpu { + struct hrtimer tx_done_timer; + struct net_device *dev; + bool timer_scheduled; +}; + +struct mvpp2_queue_vector { + int irq; + struct napi_struct napi; + enum { MVPP2_QUEUE_VECTOR_SHARED, MVPP2_QUEUE_VECTOR_PRIVATE } type; + int sw_thread_id; + u16 sw_thread_mask; + int first_rxq; + int nrxqs; + u32 pending_cause_rx; + struct mvpp2_port *port; + struct cpumask *mask; +}; + +/* Internal represention of a Flow Steering rule */ +struct mvpp2_rfs_rule { + /* Rule location inside the flow*/ + int loc; + + /* Flow type, such as TCP_V4_FLOW, IP6_FLOW, etc. */ + int flow_type; + + /* Index of the C2 TCAM entry handling this rule */ + int c2_index; + + /* Header fields that needs to be extracted to match this flow */ + u16 hek_fields; + + /* CLS engine : only c2 is supported for now. */ + u8 engine; + + /* TCAM key and mask for C2-based steering. These fields should be + * encapsulated in a union should we add more engines. + */ + u64 c2_tcam; + u64 c2_tcam_mask; + + struct flow_rule *flow; +}; + +struct mvpp2_ethtool_fs { + struct mvpp2_rfs_rule rule; + struct ethtool_rxnfc rxnfc; +}; + +struct mvpp2_hwtstamp_queue { + struct sk_buff *skb[32]; + u8 next; +}; + +struct mvpp2_port { + u8 id; + + /* Index of the port from the "group of ports" complex point + * of view. This is specific to PPv2.2. + */ + int gop_id; + + int port_irq; + + struct mvpp2 *priv; + + /* Firmware node associated to the port */ + struct fwnode_handle *fwnode; + + /* Is a PHY always connected to the port */ + bool has_phy; + + /* Per-port registers' base address */ + void __iomem *base; + void __iomem *stats_base; + + struct mvpp2_rx_queue **rxqs; + unsigned int nrxqs; + struct mvpp2_tx_queue **txqs; + unsigned int ntxqs; + struct net_device *dev; + + struct bpf_prog *xdp_prog; + + int pkt_size; + + /* Per-CPU port control */ + struct mvpp2_port_pcpu __percpu *pcpu; + + /* Protect the BM refills and the Tx paths when a thread is used on more + * than a single CPU. + */ + spinlock_t bm_lock[MVPP2_MAX_THREADS]; + spinlock_t tx_lock[MVPP2_MAX_THREADS]; + + /* Flags */ + unsigned long flags; + + u16 tx_ring_size; + u16 rx_ring_size; + struct mvpp2_pcpu_stats __percpu *stats; + u64 *ethtool_stats; + + unsigned long state; + + /* Per-port work and its lock to gather hardware statistics */ + struct mutex gather_stats_lock; + struct delayed_work stats_work; + + struct device_node *of_node; + + phy_interface_t phy_interface; + struct phylink *phylink; + struct phylink_config phylink_config; + struct phylink_pcs phylink_pcs; + struct phy *comphy; + + struct mvpp2_bm_pool *pool_long; + struct mvpp2_bm_pool *pool_short; + + /* Index of first port's physical RXQ */ + u8 first_rxq; + + struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS]; + unsigned int nqvecs; + bool has_tx_irqs; + + u32 tx_time_coal; + + /* List of steering rules active on that port */ + struct mvpp2_ethtool_fs *rfs_rules[MVPP2_N_RFS_ENTRIES_PER_FLOW]; + int n_rfs_rules; + + /* Each port has its own view of the rss contexts, so that it can number + * them from 0 + */ + int rss_ctx[MVPP22_N_RSS_TABLES]; + + bool hwtstamp; + bool rx_hwtstamp; + enum hwtstamp_tx_types tx_hwtstamp_type; + struct mvpp2_hwtstamp_queue tx_hwtstamp_queue[2]; +}; + +/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the + * layout of the transmit and reception DMA descriptors, and their + * layout is therefore defined by the hardware design + */ + +#define MVPP2_TXD_L3_OFF_SHIFT 0 +#define MVPP2_TXD_IP_HLEN_SHIFT 8 +#define MVPP2_TXD_L4_CSUM_FRAG BIT(13) +#define MVPP2_TXD_L4_CSUM_NOT BIT(14) +#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15) +#define MVPP2_TXD_PADDING_DISABLE BIT(23) +#define MVPP2_TXD_L4_UDP BIT(24) +#define MVPP2_TXD_L3_IP6 BIT(26) +#define MVPP2_TXD_L_DESC BIT(28) +#define MVPP2_TXD_F_DESC BIT(29) + +#define MVPP2_RXD_ERR_SUMMARY BIT(15) +#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14)) +#define MVPP2_RXD_ERR_CRC 0x0 +#define MVPP2_RXD_ERR_OVERRUN BIT(13) +#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14)) +#define MVPP2_RXD_BM_POOL_ID_OFFS 16 +#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18)) +#define MVPP2_RXD_HWF_SYNC BIT(21) +#define MVPP2_RXD_L4_CSUM_OK BIT(22) +#define MVPP2_RXD_IP4_HEADER_ERR BIT(24) +#define MVPP2_RXD_L4_TCP BIT(25) +#define MVPP2_RXD_L4_UDP BIT(26) +#define MVPP2_RXD_L3_IP4 BIT(28) +#define MVPP2_RXD_L3_IP6 BIT(30) +#define MVPP2_RXD_BUF_HDR BIT(31) + +/* HW TX descriptor for PPv2.1 */ +struct mvpp21_tx_desc { + __le32 command; /* Options used by HW for packet transmitting.*/ + u8 packet_offset; /* the offset from the buffer beginning */ + u8 phys_txq; /* destination queue ID */ + __le16 data_size; /* data size of transmitted packet in bytes */ + __le32 buf_dma_addr; /* physical addr of transmitted buffer */ + __le32 buf_cookie; /* cookie for access to TX buffer in tx path */ + __le32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ + __le32 reserved2; /* reserved (for future use) */ +}; + +/* HW RX descriptor for PPv2.1 */ +struct mvpp21_rx_desc { + __le32 status; /* info about received packet */ + __le16 reserved1; /* parser_info (for future use, PnC) */ + __le16 data_size; /* size of received packet in bytes */ + __le32 buf_dma_addr; /* physical address of the buffer */ + __le32 buf_cookie; /* cookie for access to RX buffer in rx path */ + __le16 reserved2; /* gem_port_id (for future use, PON) */ + __le16 reserved3; /* csum_l4 (for future use, PnC) */ + u8 reserved4; /* bm_qset (for future use, BM) */ + u8 reserved5; + __le16 reserved6; /* classify_info (for future use, PnC) */ + __le32 reserved7; /* flow_id (for future use, PnC) */ + __le32 reserved8; +}; + +/* HW TX descriptor for PPv2.2 */ +struct mvpp22_tx_desc { + __le32 command; + u8 packet_offset; + u8 phys_txq; + __le16 data_size; + __le32 ptp_descriptor; + __le32 reserved2; + __le64 buf_dma_addr_ptp; + __le64 buf_cookie_misc; +}; + +/* HW RX descriptor for PPv2.2 */ +struct mvpp22_rx_desc { + __le32 status; + __le16 reserved1; + __le16 data_size; + __le32 reserved2; + __le32 timestamp; + __le64 buf_dma_addr_key_hash; + __le64 buf_cookie_misc; +}; + +/* Opaque type used by the driver to manipulate the HW TX and RX + * descriptors + */ +struct mvpp2_tx_desc { + union { + struct mvpp21_tx_desc pp21; + struct mvpp22_tx_desc pp22; + }; +}; + +struct mvpp2_rx_desc { + union { + struct mvpp21_rx_desc pp21; + struct mvpp22_rx_desc pp22; + }; +}; + +enum mvpp2_tx_buf_type { + MVPP2_TYPE_SKB, + MVPP2_TYPE_XDP_TX, + MVPP2_TYPE_XDP_NDO, +}; + +struct mvpp2_txq_pcpu_buf { + enum mvpp2_tx_buf_type type; + + /* Transmitted SKB */ + union { + struct xdp_frame *xdpf; + struct sk_buff *skb; + }; + + /* Physical address of transmitted buffer */ + dma_addr_t dma; + + /* Size transmitted */ + size_t size; +}; + +/* Per-CPU Tx queue control */ +struct mvpp2_txq_pcpu { + unsigned int thread; + + /* Number of Tx DMA descriptors in the descriptor ring */ + int size; + + /* Number of currently used Tx DMA descriptor in the + * descriptor ring + */ + int count; + + int wake_threshold; + int stop_threshold; + + /* Number of Tx DMA descriptors reserved for each CPU */ + int reserved_num; + + /* Infos about transmitted buffers */ + struct mvpp2_txq_pcpu_buf *buffs; + + /* Index of last TX DMA descriptor that was inserted */ + int txq_put_index; + + /* Index of the TX DMA descriptor to be cleaned up */ + int txq_get_index; + + /* DMA buffer for TSO headers */ + char *tso_headers; + dma_addr_t tso_headers_dma; +}; + +struct mvpp2_tx_queue { + /* Physical number of this Tx queue */ + u8 id; + + /* Logical number of this Tx queue */ + u8 log_id; + + /* Number of Tx DMA descriptors in the descriptor ring */ + int size; + + /* Number of currently used Tx DMA descriptor in the descriptor ring */ + int count; + + /* Per-CPU control of physical Tx queues */ + struct mvpp2_txq_pcpu __percpu *pcpu; + + u32 done_pkts_coal; + + /* Virtual address of thex Tx DMA descriptors array */ + struct mvpp2_tx_desc *descs; + + /* DMA address of the Tx DMA descriptors array */ + dma_addr_t descs_dma; + + /* Index of the last Tx DMA descriptor */ + int last_desc; + + /* Index of the next Tx DMA descriptor to process */ + int next_desc_to_proc; +}; + +struct mvpp2_rx_queue { + /* RX queue number, in the range 0-31 for physical RXQs */ + u8 id; + + /* Num of rx descriptors in the rx descriptor ring */ + int size; + + u32 pkts_coal; + u32 time_coal; + + /* Virtual address of the RX DMA descriptors array */ + struct mvpp2_rx_desc *descs; + + /* DMA address of the RX DMA descriptors array */ + dma_addr_t descs_dma; + + /* Index of the last RX DMA descriptor */ + int last_desc; + + /* Index of the next RX DMA descriptor to process */ + int next_desc_to_proc; + + /* ID of port to which physical RXQ is mapped */ + int port; + + /* Port's logic RXQ number to which physical RXQ is mapped */ + int logic_rxq; + + /* XDP memory accounting */ + struct xdp_rxq_info xdp_rxq_short; + struct xdp_rxq_info xdp_rxq_long; +}; + +struct mvpp2_bm_pool { + /* Pool number in the range 0-7 */ + int id; + + /* Buffer Pointers Pool External (BPPE) size */ + int size; + /* BPPE size in bytes */ + int size_bytes; + /* Number of buffers for this pool */ + int buf_num; + /* Pool buffer size */ + int buf_size; + /* Packet size */ + int pkt_size; + int frag_size; + + /* BPPE virtual base address */ + u32 *virt_addr; + /* BPPE DMA base address */ + dma_addr_t dma_addr; + + /* Ports using BM pool */ + u32 port_map; +}; + +#define IS_TSO_HEADER(txq_pcpu, addr) \ + ((addr) >= (txq_pcpu)->tso_headers_dma && \ + (addr) < (txq_pcpu)->tso_headers_dma + \ + (txq_pcpu)->size * TSO_HEADER_SIZE) + +#define MVPP2_DRIVER_NAME "mvpp2" +#define MVPP2_DRIVER_VERSION "1.0" + +void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data); +u32 mvpp2_read(struct mvpp2 *priv, u32 offset); + +void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name); + +void mvpp2_dbgfs_cleanup(struct mvpp2 *priv); +void mvpp2_dbgfs_exit(void); + +#ifdef CONFIG_MVPP2_PTP +int mvpp22_tai_probe(struct device *dev, struct mvpp2 *priv); +void mvpp22_tai_tstamp(struct mvpp2_tai *tai, u32 tstamp, + struct skb_shared_hwtstamps *hwtstamp); +void mvpp22_tai_start(struct mvpp2_tai *tai); +void mvpp22_tai_stop(struct mvpp2_tai *tai); +int mvpp22_tai_ptp_clock_index(struct mvpp2_tai *tai); +#else +static inline int mvpp22_tai_probe(struct device *dev, struct mvpp2 *priv) +{ + return 0; +} +static inline void mvpp22_tai_tstamp(struct mvpp2_tai *tai, u32 tstamp, + struct skb_shared_hwtstamps *hwtstamp) +{ +} +static inline void mvpp22_tai_start(struct mvpp2_tai *tai) +{ +} +static inline void mvpp22_tai_stop(struct mvpp2_tai *tai) +{ +} +static inline int mvpp22_tai_ptp_clock_index(struct mvpp2_tai *tai) +{ + return -1; +} +#endif + +static inline bool mvpp22_rx_hwtstamping(struct mvpp2_port *port) +{ + return IS_ENABLED(CONFIG_MVPP2_PTP) && port->rx_hwtstamp; +} +#endif diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c new file mode 100644 index 000000000..41d935d1a --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -0,0 +1,1742 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * RSS and Classifier helpers for Marvell PPv2 Network Controller + * + * Copyright (C) 2014 Marvell + * + * Marcin Wojtas <mw@semihalf.com> + */ + +#include "mvpp2.h" +#include "mvpp2_cls.h" +#include "mvpp2_prs.h" + +#define MVPP2_DEF_FLOW(_type, _id, _opts, _ri, _ri_mask) \ +{ \ + .flow_type = _type, \ + .flow_id = _id, \ + .supported_hash_opts = _opts, \ + .prs_ri = { \ + .ri = _ri, \ + .ri_mask = _ri_mask \ + } \ +} + +static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = { + /* TCP over IPv4 flows, Not fragmented, no vlan tag */ + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG, + MVPP22_CLS_HEK_IP4_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG, + MVPP22_CLS_HEK_IP4_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG, + MVPP22_CLS_HEK_IP4_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* TCP over IPv4 flows, Not fragmented, with vlan tag */ + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG, + MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED, + MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG, + MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED, + MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG, + MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED, + MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + /* TCP over IPv4 flows, fragmented, no vlan tag */ + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* TCP over IPv4 flows, fragmented, with vlan tag */ + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED, + MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED, + MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED, + MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + /* UDP over IPv4 flows, Not fragmented, no vlan tag */ + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG, + MVPP22_CLS_HEK_IP4_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG, + MVPP22_CLS_HEK_IP4_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG, + MVPP22_CLS_HEK_IP4_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* UDP over IPv4 flows, Not fragmented, with vlan tag */ + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG, + MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED, + MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG, + MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED, + MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG, + MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED, + MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + /* UDP over IPv4 flows, fragmented, no vlan tag */ + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* UDP over IPv4 flows, fragmented, with vlan tag */ + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED, + MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED, + MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED, + MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + /* TCP over IPv6 flows, not fragmented, no vlan tag */ + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG, + MVPP22_CLS_HEK_IP6_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG, + MVPP22_CLS_HEK_IP6_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* TCP over IPv6 flows, not fragmented, with vlan tag */ + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG, + MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED, + MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG, + MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED, + MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + /* TCP over IPv6 flows, fragmented, no vlan tag */ + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP6_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | + MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP6_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | + MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* TCP over IPv6 flows, fragmented, with vlan tag */ + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG, + MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED, + MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG, + MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED, + MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + /* UDP over IPv6 flows, not fragmented, no vlan tag */ + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG, + MVPP22_CLS_HEK_IP6_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG, + MVPP22_CLS_HEK_IP6_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* UDP over IPv6 flows, not fragmented, with vlan tag */ + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG, + MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED, + MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG, + MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED, + MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + /* UDP over IPv6 flows, fragmented, no vlan tag */ + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP6_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | + MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP6_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | + MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* UDP over IPv6 flows, fragmented, with vlan tag */ + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG, + MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED, + MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG, + MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED, + MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + /* IPv4 flows, no vlan tag */ + MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), + MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT, + MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), + MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER, + MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), + + /* IPv4 flows, with vlan tag */ + MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED, + MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_L3_PROTO_MASK), + MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED, + MVPP2_PRS_RI_L3_IP4_OPT, + MVPP2_PRS_RI_L3_PROTO_MASK), + MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED, + MVPP2_PRS_RI_L3_IP4_OTHER, + MVPP2_PRS_RI_L3_PROTO_MASK), + + /* IPv6 flows, no vlan tag */ + MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG, + MVPP22_CLS_HEK_IP6_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), + MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG, + MVPP22_CLS_HEK_IP6_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), + + /* IPv6 flows, with vlan tag */ + MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG, + MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED, + MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_L3_PROTO_MASK), + MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG, + MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED, + MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_L3_PROTO_MASK), + + /* Non IP flow, no vlan tag */ + MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_UNTAG, + 0, + MVPP2_PRS_RI_VLAN_NONE, + MVPP2_PRS_RI_VLAN_MASK), + /* Non IP flow, with vlan tag */ + MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG, + MVPP22_CLS_HEK_OPT_VLAN, + 0, 0), +}; + +u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index) +{ + mvpp2_write(priv, MVPP2_CTRS_IDX, index); + + return mvpp2_read(priv, MVPP2_CLS_FLOW_TBL_HIT_CTR); +} + +void mvpp2_cls_flow_read(struct mvpp2 *priv, int index, + struct mvpp2_cls_flow_entry *fe) +{ + fe->index = index; + mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, index); + fe->data[0] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL0_REG); + fe->data[1] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL1_REG); + fe->data[2] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL2_REG); +} + +/* Update classification flow table registers */ +static void mvpp2_cls_flow_write(struct mvpp2 *priv, + struct mvpp2_cls_flow_entry *fe) +{ + mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index); + mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]); + mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]); + mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); +} + +u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index) +{ + mvpp2_write(priv, MVPP2_CTRS_IDX, index); + + return mvpp2_read(priv, MVPP2_CLS_DEC_TBL_HIT_CTR); +} + +void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way, + struct mvpp2_cls_lookup_entry *le) +{ + u32 val; + + val = (way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | lkpid; + mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val); + le->way = way; + le->lkpid = lkpid; + le->data = mvpp2_read(priv, MVPP2_CLS_LKP_TBL_REG); +} + +/* Update classification lookup table register */ +static void mvpp2_cls_lookup_write(struct mvpp2 *priv, + struct mvpp2_cls_lookup_entry *le) +{ + u32 val; + + val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid; + mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val); + mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data); +} + +/* Operations on flow entry */ +static int mvpp2_cls_flow_hek_num_get(struct mvpp2_cls_flow_entry *fe) +{ + return fe->data[1] & MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK; +} + +static void mvpp2_cls_flow_hek_num_set(struct mvpp2_cls_flow_entry *fe, + int num_of_fields) +{ + fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK; + fe->data[1] |= MVPP2_CLS_FLOW_TBL1_N_FIELDS(num_of_fields); +} + +static int mvpp2_cls_flow_hek_get(struct mvpp2_cls_flow_entry *fe, + int field_index) +{ + return (fe->data[2] >> MVPP2_CLS_FLOW_TBL2_FLD_OFFS(field_index)) & + MVPP2_CLS_FLOW_TBL2_FLD_MASK; +} + +static void mvpp2_cls_flow_hek_set(struct mvpp2_cls_flow_entry *fe, + int field_index, int field_id) +{ + fe->data[2] &= ~MVPP2_CLS_FLOW_TBL2_FLD(field_index, + MVPP2_CLS_FLOW_TBL2_FLD_MASK); + fe->data[2] |= MVPP2_CLS_FLOW_TBL2_FLD(field_index, field_id); +} + +static void mvpp2_cls_flow_eng_set(struct mvpp2_cls_flow_entry *fe, + int engine) +{ + fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_ENG(MVPP2_CLS_FLOW_TBL0_ENG_MASK); + fe->data[0] |= MVPP2_CLS_FLOW_TBL0_ENG(engine); +} + +int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe) +{ + return (fe->data[0] >> MVPP2_CLS_FLOW_TBL0_OFFS) & + MVPP2_CLS_FLOW_TBL0_ENG_MASK; +} + +static void mvpp2_cls_flow_port_id_sel(struct mvpp2_cls_flow_entry *fe, + bool from_packet) +{ + if (from_packet) + fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL; + else + fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL; +} + +static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry *fe, + bool is_last) +{ + fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_LAST; + fe->data[0] |= !!is_last; +} + +static void mvpp2_cls_flow_pri_set(struct mvpp2_cls_flow_entry *fe, int prio) +{ + fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_PRIO(MVPP2_CLS_FLOW_TBL1_PRIO_MASK); + fe->data[1] |= MVPP2_CLS_FLOW_TBL1_PRIO(prio); +} + +static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry *fe, + u32 port) +{ + fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port); +} + +static void mvpp2_cls_flow_port_remove(struct mvpp2_cls_flow_entry *fe, + u32 port) +{ + fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID(port); +} + +static void mvpp2_cls_flow_lu_type_set(struct mvpp2_cls_flow_entry *fe, + u8 lu_type) +{ + fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK); + fe->data[1] |= MVPP2_CLS_FLOW_TBL1_LU_TYPE(lu_type); +} + +/* Initialize the parser entry for the given flow */ +static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv, + const struct mvpp2_cls_flow *flow) +{ + mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri, + flow->prs_ri.ri_mask); +} + +/* Initialize the Lookup Id table entry for the given flow */ +static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv, + const struct mvpp2_cls_flow *flow) +{ + struct mvpp2_cls_lookup_entry le; + + le.way = 0; + le.lkpid = flow->flow_id; + + /* The default RxQ for this port is set in the C2 lookup */ + le.data = 0; + + /* We point on the first lookup in the sequence for the flow, that is + * the C2 lookup. + */ + le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_CLS_FLT_FIRST(flow->flow_id)); + + /* CLS is always enabled, RSS is enabled/disabled in C2 lookup */ + le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; + + mvpp2_cls_lookup_write(priv, &le); +} + +static void mvpp2_cls_c2_write(struct mvpp2 *priv, + struct mvpp2_cls_c2_entry *c2) +{ + u32 val; + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index); + + val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV); + if (c2->valid) + val &= ~MVPP22_CLS_C2_TCAM_INV_BIT; + else + val |= MVPP22_CLS_C2_TCAM_INV_BIT; + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_INV, val); + + mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act); + + mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]); + mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]); + mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]); + mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]); + + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]); + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]); + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]); + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]); + /* Writing TCAM_DATA4 flushes writes to TCAM_DATA0-4 and INV to HW */ + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]); +} + +void mvpp2_cls_c2_read(struct mvpp2 *priv, int index, + struct mvpp2_cls_c2_entry *c2) +{ + u32 val; + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index); + + c2->index = index; + + c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0); + c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1); + c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2); + c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3); + c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4); + + c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT); + + c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0); + c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1); + c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2); + c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3); + + val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV); + c2->valid = !(val & MVPP22_CLS_C2_TCAM_INV_BIT); +} + +static int mvpp2_cls_ethtool_flow_to_type(int flow_type) +{ + switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) { + case ETHER_FLOW: + return MVPP22_FLOW_ETHERNET; + case TCP_V4_FLOW: + return MVPP22_FLOW_TCP4; + case TCP_V6_FLOW: + return MVPP22_FLOW_TCP6; + case UDP_V4_FLOW: + return MVPP22_FLOW_UDP4; + case UDP_V6_FLOW: + return MVPP22_FLOW_UDP6; + case IPV4_FLOW: + return MVPP22_FLOW_IP4; + case IPV6_FLOW: + return MVPP22_FLOW_IP6; + default: + return -EOPNOTSUPP; + } +} + +static int mvpp2_cls_c2_port_flow_index(struct mvpp2_port *port, int loc) +{ + return MVPP22_CLS_C2_RFS_LOC(port->id, loc); +} + +/* Initialize the flow table entries for the given flow */ +static void mvpp2_cls_flow_init(struct mvpp2 *priv, + const struct mvpp2_cls_flow *flow) +{ + struct mvpp2_cls_flow_entry fe; + int i, pri = 0; + + /* Assign default values to all entries in the flow */ + for (i = MVPP2_CLS_FLT_FIRST(flow->flow_id); + i <= MVPP2_CLS_FLT_LAST(flow->flow_id); i++) { + memset(&fe, 0, sizeof(fe)); + fe.index = i; + mvpp2_cls_flow_pri_set(&fe, pri++); + + if (i == MVPP2_CLS_FLT_LAST(flow->flow_id)) + mvpp2_cls_flow_last_set(&fe, 1); + + mvpp2_cls_flow_write(priv, &fe); + } + + /* RSS config C2 lookup */ + mvpp2_cls_flow_read(priv, MVPP2_CLS_FLT_C2_RSS_ENTRY(flow->flow_id), + &fe); + + mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2); + mvpp2_cls_flow_port_id_sel(&fe, true); + mvpp2_cls_flow_lu_type_set(&fe, MVPP22_CLS_LU_TYPE_ALL); + + /* Add all ports */ + for (i = 0; i < MVPP2_MAX_PORTS; i++) + mvpp2_cls_flow_port_add(&fe, BIT(i)); + + mvpp2_cls_flow_write(priv, &fe); + + /* C3Hx lookups */ + for (i = 0; i < MVPP2_MAX_PORTS; i++) { + mvpp2_cls_flow_read(priv, + MVPP2_CLS_FLT_HASH_ENTRY(i, flow->flow_id), + &fe); + + /* Set a default engine. Will be overwritten when setting the + * real HEK parameters + */ + mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C3HA); + mvpp2_cls_flow_port_id_sel(&fe, true); + mvpp2_cls_flow_port_add(&fe, BIT(i)); + + mvpp2_cls_flow_write(priv, &fe); + } +} + +/* Adds a field to the Header Extracted Key generation parameters*/ +static int mvpp2_flow_add_hek_field(struct mvpp2_cls_flow_entry *fe, + u32 field_id) +{ + int nb_fields = mvpp2_cls_flow_hek_num_get(fe); + + if (nb_fields == MVPP2_FLOW_N_FIELDS) + return -EINVAL; + + mvpp2_cls_flow_hek_set(fe, nb_fields, field_id); + + mvpp2_cls_flow_hek_num_set(fe, nb_fields + 1); + + return 0; +} + +static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe, + unsigned long hash_opts) +{ + u32 field_id; + int i; + + /* Clear old fields */ + mvpp2_cls_flow_hek_num_set(fe, 0); + fe->data[2] = 0; + + for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) { + switch (BIT(i)) { + case MVPP22_CLS_HEK_OPT_MAC_DA: + field_id = MVPP22_CLS_FIELD_MAC_DA; + break; + case MVPP22_CLS_HEK_OPT_VLAN: + field_id = MVPP22_CLS_FIELD_VLAN; + break; + case MVPP22_CLS_HEK_OPT_VLAN_PRI: + field_id = MVPP22_CLS_FIELD_VLAN_PRI; + break; + case MVPP22_CLS_HEK_OPT_IP4SA: + field_id = MVPP22_CLS_FIELD_IP4SA; + break; + case MVPP22_CLS_HEK_OPT_IP4DA: + field_id = MVPP22_CLS_FIELD_IP4DA; + break; + case MVPP22_CLS_HEK_OPT_IP6SA: + field_id = MVPP22_CLS_FIELD_IP6SA; + break; + case MVPP22_CLS_HEK_OPT_IP6DA: + field_id = MVPP22_CLS_FIELD_IP6DA; + break; + case MVPP22_CLS_HEK_OPT_L4SIP: + field_id = MVPP22_CLS_FIELD_L4SIP; + break; + case MVPP22_CLS_HEK_OPT_L4DIP: + field_id = MVPP22_CLS_FIELD_L4DIP; + break; + default: + return -EINVAL; + } + if (mvpp2_flow_add_hek_field(fe, field_id)) + return -EINVAL; + } + + return 0; +} + +/* Returns the size, in bits, of the corresponding HEK field */ +static int mvpp2_cls_hek_field_size(u32 field) +{ + switch (field) { + case MVPP22_CLS_HEK_OPT_MAC_DA: + return 48; + case MVPP22_CLS_HEK_OPT_VLAN: + return 12; + case MVPP22_CLS_HEK_OPT_VLAN_PRI: + return 3; + case MVPP22_CLS_HEK_OPT_IP4SA: + case MVPP22_CLS_HEK_OPT_IP4DA: + return 32; + case MVPP22_CLS_HEK_OPT_IP6SA: + case MVPP22_CLS_HEK_OPT_IP6DA: + return 128; + case MVPP22_CLS_HEK_OPT_L4SIP: + case MVPP22_CLS_HEK_OPT_L4DIP: + return 16; + default: + return -1; + } +} + +const struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow) +{ + if (flow >= MVPP2_N_PRS_FLOWS) + return NULL; + + return &cls_flows[flow]; +} + +/* Set the hash generation options for the given traffic flow. + * One traffic flow (in the ethtool sense) has multiple classification flows, + * to handle specific cases such as fragmentation, or the presence of a + * VLAN / DSA Tag. + * + * Each of these individual flows has different constraints, for example we + * can't hash fragmented packets on L4 data (else we would risk having packet + * re-ordering), so each classification flows masks the options with their + * supported ones. + * + */ +static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type, + u16 requested_opts) +{ + const struct mvpp2_cls_flow *flow; + struct mvpp2_cls_flow_entry fe; + int i, engine, flow_index; + u16 hash_opts; + + for_each_cls_flow_id_with_type(i, flow_type) { + flow = mvpp2_cls_flow_get(i); + if (!flow) + return -EINVAL; + + flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id); + + mvpp2_cls_flow_read(port->priv, flow_index, &fe); + + hash_opts = flow->supported_hash_opts & requested_opts; + + /* Use C3HB engine to access L4 infos. This adds L4 infos to the + * hash parameters + */ + if (hash_opts & MVPP22_CLS_HEK_L4_OPTS) + engine = MVPP22_CLS_ENGINE_C3HB; + else + engine = MVPP22_CLS_ENGINE_C3HA; + + if (mvpp2_flow_set_hek_fields(&fe, hash_opts)) + return -EINVAL; + + mvpp2_cls_flow_eng_set(&fe, engine); + + mvpp2_cls_flow_write(port->priv, &fe); + } + + return 0; +} + +u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe) +{ + u16 hash_opts = 0; + int n_fields, i, field; + + n_fields = mvpp2_cls_flow_hek_num_get(fe); + + for (i = 0; i < n_fields; i++) { + field = mvpp2_cls_flow_hek_get(fe, i); + + switch (field) { + case MVPP22_CLS_FIELD_MAC_DA: + hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA; + break; + case MVPP22_CLS_FIELD_VLAN: + hash_opts |= MVPP22_CLS_HEK_OPT_VLAN; + break; + case MVPP22_CLS_FIELD_VLAN_PRI: + hash_opts |= MVPP22_CLS_HEK_OPT_VLAN_PRI; + break; + case MVPP22_CLS_FIELD_L3_PROTO: + hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO; + break; + case MVPP22_CLS_FIELD_IP4SA: + hash_opts |= MVPP22_CLS_HEK_OPT_IP4SA; + break; + case MVPP22_CLS_FIELD_IP4DA: + hash_opts |= MVPP22_CLS_HEK_OPT_IP4DA; + break; + case MVPP22_CLS_FIELD_IP6SA: + hash_opts |= MVPP22_CLS_HEK_OPT_IP6SA; + break; + case MVPP22_CLS_FIELD_IP6DA: + hash_opts |= MVPP22_CLS_HEK_OPT_IP6DA; + break; + case MVPP22_CLS_FIELD_L4SIP: + hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP; + break; + case MVPP22_CLS_FIELD_L4DIP: + hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP; + break; + default: + break; + } + } + return hash_opts; +} + +/* Returns the hash opts for this flow. There are several classifier flows + * for one traffic flow, this returns an aggregation of all configurations. + */ +static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type) +{ + const struct mvpp2_cls_flow *flow; + struct mvpp2_cls_flow_entry fe; + int i, flow_index; + u16 hash_opts = 0; + + for_each_cls_flow_id_with_type(i, flow_type) { + flow = mvpp2_cls_flow_get(i); + if (!flow) + return 0; + + flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id); + + mvpp2_cls_flow_read(port->priv, flow_index, &fe); + + hash_opts |= mvpp2_flow_get_hek_fields(&fe); + } + + return hash_opts; +} + +static void mvpp2_cls_port_init_flows(struct mvpp2 *priv) +{ + const struct mvpp2_cls_flow *flow; + int i; + + for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) { + flow = mvpp2_cls_flow_get(i); + if (!flow) + break; + + mvpp2_cls_flow_prs_init(priv, flow); + mvpp2_cls_flow_lkp_init(priv, flow); + mvpp2_cls_flow_init(priv, flow); + } +} + +static void mvpp2_port_c2_cls_init(struct mvpp2_port *port) +{ + struct mvpp2_cls_c2_entry c2; + u8 qh, ql, pmap; + + memset(&c2, 0, sizeof(c2)); + + c2.index = MVPP22_CLS_C2_RSS_ENTRY(port->id); + + pmap = BIT(port->id); + c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap); + c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap)); + + /* Match on Lookup Type */ + c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK)); + c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(MVPP22_CLS_LU_TYPE_ALL); + + /* Update RSS status after matching this entry */ + c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK); + + /* Mark packet as "forwarded to software", needed for RSS */ + c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK); + + /* Configure the default rx queue : Update Queue Low and Queue High, but + * don't lock, since the rx queue selection might be overridden by RSS + */ + c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD) | + MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD); + + qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK; + ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK; + + c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) | + MVPP22_CLS_C2_ATTR0_QLOW(ql); + + c2.valid = true; + + mvpp2_cls_c2_write(port->priv, &c2); +} + +/* Classifier default initialization */ +void mvpp2_cls_init(struct mvpp2 *priv) +{ + struct mvpp2_cls_lookup_entry le; + struct mvpp2_cls_flow_entry fe; + struct mvpp2_cls_c2_entry c2; + int index; + + /* Enable classifier */ + mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); + + /* Clear classifier flow table */ + memset(&fe.data, 0, sizeof(fe.data)); + for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { + fe.index = index; + mvpp2_cls_flow_write(priv, &fe); + } + + /* Clear classifier lookup table */ + le.data = 0; + for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) { + le.lkpid = index; + le.way = 0; + mvpp2_cls_lookup_write(priv, &le); + + le.way = 1; + mvpp2_cls_lookup_write(priv, &le); + } + + /* Clear C2 TCAM engine table */ + memset(&c2, 0, sizeof(c2)); + c2.valid = false; + for (index = 0; index < MVPP22_CLS_C2_N_ENTRIES; index++) { + c2.index = index; + mvpp2_cls_c2_write(priv, &c2); + } + + /* Disable the FIFO stages in C2 engine, which are only used in BIST + * mode + */ + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_CTRL, + MVPP22_CLS_C2_TCAM_BYPASS_FIFO); + + mvpp2_cls_port_init_flows(priv); +} + +void mvpp2_cls_port_config(struct mvpp2_port *port) +{ + struct mvpp2_cls_lookup_entry le; + u32 val; + + /* Set way for the port */ + val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG); + val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id); + mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val); + + /* Pick the entry to be accessed in lookup ID decoding table + * according to the way and lkpid. + */ + le.lkpid = port->id; + le.way = 0; + le.data = 0; + + /* Set initial CPU queue for receiving packets */ + le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK; + le.data |= port->first_rxq; + + /* Disable classification engines */ + le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; + + /* Update lookup ID table entry */ + mvpp2_cls_lookup_write(port->priv, &le); + + mvpp2_port_c2_cls_init(port); +} + +u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index) +{ + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2_index); + + return mvpp2_read(priv, MVPP22_CLS_C2_HIT_CTR); +} + +static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port, u32 ctx) +{ + struct mvpp2_cls_c2_entry c2; + u8 qh, ql; + + mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2); + + /* The RxQ number is used to select the RSS table. It that case, we set + * it to be the ctx number. + */ + qh = (ctx >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK; + ql = ctx & MVPP22_CLS_C2_ATTR0_QLOW_MASK; + + c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) | + MVPP22_CLS_C2_ATTR0_QLOW(ql); + + c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN; + + mvpp2_cls_c2_write(port->priv, &c2); +} + +static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port) +{ + struct mvpp2_cls_c2_entry c2; + u8 qh, ql; + + mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2); + + /* Reset the default destination RxQ to the port's first rx queue. */ + qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK; + ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK; + + c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) | + MVPP22_CLS_C2_ATTR0_QLOW(ql); + + c2.attr[2] &= ~MVPP22_CLS_C2_ATTR2_RSS_EN; + + mvpp2_cls_c2_write(port->priv, &c2); +} + +static inline int mvpp22_rss_ctx(struct mvpp2_port *port, int port_rss_ctx) +{ + return port->rss_ctx[port_rss_ctx]; +} + +int mvpp22_port_rss_enable(struct mvpp2_port *port) +{ + if (mvpp22_rss_ctx(port, 0) < 0) + return -EINVAL; + + mvpp2_rss_port_c2_enable(port, mvpp22_rss_ctx(port, 0)); + + return 0; +} + +int mvpp22_port_rss_disable(struct mvpp2_port *port) +{ + if (mvpp22_rss_ctx(port, 0) < 0) + return -EINVAL; + + mvpp2_rss_port_c2_disable(port); + + return 0; +} + +static void mvpp22_port_c2_lookup_disable(struct mvpp2_port *port, int entry) +{ + struct mvpp2_cls_c2_entry c2; + + mvpp2_cls_c2_read(port->priv, entry, &c2); + + /* Clear the port map so that the entry doesn't match anymore */ + c2.tcam[4] &= ~(MVPP22_CLS_C2_PORT_ID(BIT(port->id))); + + mvpp2_cls_c2_write(port->priv, &c2); +} + +/* Set CPU queue number for oversize packets */ +void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) +{ + u32 val; + + mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id), + port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK); + + mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id), + (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS)); + + val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); + val &= ~MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); + mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); +} + +static int mvpp2_port_c2_tcam_rule_add(struct mvpp2_port *port, + struct mvpp2_rfs_rule *rule) +{ + struct flow_action_entry *act; + struct mvpp2_cls_c2_entry c2; + u8 qh, ql, pmap; + int index, ctx; + + if (!flow_action_basic_hw_stats_check(&rule->flow->action, NULL)) + return -EOPNOTSUPP; + + memset(&c2, 0, sizeof(c2)); + + index = mvpp2_cls_c2_port_flow_index(port, rule->loc); + if (index < 0) + return -EINVAL; + c2.index = index; + + act = &rule->flow->action.entries[0]; + + rule->c2_index = c2.index; + + c2.tcam[3] = (rule->c2_tcam & 0xffff) | + ((rule->c2_tcam_mask & 0xffff) << 16); + c2.tcam[2] = ((rule->c2_tcam >> 16) & 0xffff) | + (((rule->c2_tcam_mask >> 16) & 0xffff) << 16); + c2.tcam[1] = ((rule->c2_tcam >> 32) & 0xffff) | + (((rule->c2_tcam_mask >> 32) & 0xffff) << 16); + c2.tcam[0] = ((rule->c2_tcam >> 48) & 0xffff) | + (((rule->c2_tcam_mask >> 48) & 0xffff) << 16); + + pmap = BIT(port->id); + c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap); + c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap)); + + /* Match on Lookup Type */ + c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK)); + c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(rule->loc); + + if (act->id == FLOW_ACTION_DROP) { + c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_RED_LOCK); + } else { + /* We want to keep the default color derived from the Header + * Parser drop entries, for VLAN and MAC filtering. This will + * assign a default color of Green or Red, and we want matches + * with a non-drop action to keep that color. + */ + c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_NO_UPD_LOCK); + + /* Update RSS status after matching this entry */ + if (act->queue.ctx) + c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN; + + /* Always lock the RSS_EN decision. We might have high prio + * rules steering to an RXQ, and a lower one steering to RSS, + * we don't want the low prio RSS rule overwriting this flag. + */ + c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK); + + /* Mark packet as "forwarded to software", needed for RSS */ + c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK); + + c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD_LOCK) | + MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD_LOCK); + + if (act->queue.ctx) { + /* Get the global ctx number */ + ctx = mvpp22_rss_ctx(port, act->queue.ctx); + if (ctx < 0) + return -EINVAL; + + qh = (ctx >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK; + ql = ctx & MVPP22_CLS_C2_ATTR0_QLOW_MASK; + } else { + qh = ((act->queue.index + port->first_rxq) >> 3) & + MVPP22_CLS_C2_ATTR0_QHIGH_MASK; + ql = (act->queue.index + port->first_rxq) & + MVPP22_CLS_C2_ATTR0_QLOW_MASK; + } + + c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) | + MVPP22_CLS_C2_ATTR0_QLOW(ql); + } + + c2.valid = true; + + mvpp2_cls_c2_write(port->priv, &c2); + + return 0; +} + +static int mvpp2_port_c2_rfs_rule_insert(struct mvpp2_port *port, + struct mvpp2_rfs_rule *rule) +{ + return mvpp2_port_c2_tcam_rule_add(port, rule); +} + +static int mvpp2_port_cls_rfs_rule_remove(struct mvpp2_port *port, + struct mvpp2_rfs_rule *rule) +{ + const struct mvpp2_cls_flow *flow; + struct mvpp2_cls_flow_entry fe; + int index, i; + + for_each_cls_flow_id_containing_type(i, rule->flow_type) { + flow = mvpp2_cls_flow_get(i); + if (!flow) + return 0; + + index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc); + + mvpp2_cls_flow_read(port->priv, index, &fe); + mvpp2_cls_flow_port_remove(&fe, BIT(port->id)); + mvpp2_cls_flow_write(port->priv, &fe); + } + + if (rule->c2_index >= 0) + mvpp22_port_c2_lookup_disable(port, rule->c2_index); + + return 0; +} + +static int mvpp2_port_flt_rfs_rule_insert(struct mvpp2_port *port, + struct mvpp2_rfs_rule *rule) +{ + const struct mvpp2_cls_flow *flow; + struct mvpp2 *priv = port->priv; + struct mvpp2_cls_flow_entry fe; + int index, ret, i; + + if (rule->engine != MVPP22_CLS_ENGINE_C2) + return -EOPNOTSUPP; + + ret = mvpp2_port_c2_rfs_rule_insert(port, rule); + if (ret) + return ret; + + for_each_cls_flow_id_containing_type(i, rule->flow_type) { + flow = mvpp2_cls_flow_get(i); + if (!flow) + return 0; + + if ((rule->hek_fields & flow->supported_hash_opts) != rule->hek_fields) + continue; + + index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc); + + mvpp2_cls_flow_read(priv, index, &fe); + mvpp2_cls_flow_eng_set(&fe, rule->engine); + mvpp2_cls_flow_port_id_sel(&fe, true); + mvpp2_flow_set_hek_fields(&fe, rule->hek_fields); + mvpp2_cls_flow_lu_type_set(&fe, rule->loc); + mvpp2_cls_flow_port_add(&fe, 0xf); + + mvpp2_cls_flow_write(priv, &fe); + } + + return 0; +} + +static int mvpp2_cls_c2_build_match(struct mvpp2_rfs_rule *rule) +{ + struct flow_rule *flow = rule->flow; + int offs = 0; + + /* The order of insertion in C2 tcam must match the order in which + * the fields are found in the header + */ + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(flow, &match); + if (match.mask->vlan_id) { + rule->hek_fields |= MVPP22_CLS_HEK_OPT_VLAN; + + rule->c2_tcam |= ((u64)match.key->vlan_id) << offs; + rule->c2_tcam_mask |= ((u64)match.mask->vlan_id) << offs; + + /* Don't update the offset yet */ + } + + if (match.mask->vlan_priority) { + rule->hek_fields |= MVPP22_CLS_HEK_OPT_VLAN_PRI; + + /* VLAN pri is always at offset 13 relative to the + * current offset + */ + rule->c2_tcam |= ((u64)match.key->vlan_priority) << + (offs + 13); + rule->c2_tcam_mask |= ((u64)match.mask->vlan_priority) << + (offs + 13); + } + + if (match.mask->vlan_dei) + return -EOPNOTSUPP; + + /* vlan id and prio always seem to take a full 16-bit slot in + * the Header Extracted Key. + */ + offs += 16; + } + + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + + flow_rule_match_ports(flow, &match); + if (match.mask->src) { + rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4SIP; + + rule->c2_tcam |= ((u64)ntohs(match.key->src)) << offs; + rule->c2_tcam_mask |= ((u64)ntohs(match.mask->src)) << offs; + offs += mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4SIP); + } + + if (match.mask->dst) { + rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4DIP; + + rule->c2_tcam |= ((u64)ntohs(match.key->dst)) << offs; + rule->c2_tcam_mask |= ((u64)ntohs(match.mask->dst)) << offs; + offs += mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4DIP); + } + } + + if (hweight16(rule->hek_fields) > MVPP2_FLOW_N_FIELDS) + return -EOPNOTSUPP; + + return 0; +} + +static int mvpp2_cls_rfs_parse_rule(struct mvpp2_rfs_rule *rule) +{ + struct flow_rule *flow = rule->flow; + struct flow_action_entry *act; + + if (!flow_action_basic_hw_stats_check(&rule->flow->action, NULL)) + return -EOPNOTSUPP; + + act = &flow->action.entries[0]; + if (act->id != FLOW_ACTION_QUEUE && act->id != FLOW_ACTION_DROP) + return -EOPNOTSUPP; + + /* When both an RSS context and an queue index are set, the index + * is considered as an offset to be added to the indirection table + * entries. We don't support this, so reject this rule. + */ + if (act->queue.ctx && act->queue.index) + return -EOPNOTSUPP; + + /* For now, only use the C2 engine which has a HEK size limited to 64 + * bits for TCAM matching. + */ + rule->engine = MVPP22_CLS_ENGINE_C2; + + if (mvpp2_cls_c2_build_match(rule)) + return -EINVAL; + + return 0; +} + +int mvpp2_ethtool_cls_rule_get(struct mvpp2_port *port, + struct ethtool_rxnfc *rxnfc) +{ + struct mvpp2_ethtool_fs *efs; + + if (rxnfc->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW) + return -EINVAL; + + efs = port->rfs_rules[rxnfc->fs.location]; + if (!efs) + return -ENOENT; + + memcpy(rxnfc, &efs->rxnfc, sizeof(efs->rxnfc)); + + return 0; +} + +int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port, + struct ethtool_rxnfc *info) +{ + struct ethtool_rx_flow_spec_input input = {}; + struct ethtool_rx_flow_rule *ethtool_rule; + struct mvpp2_ethtool_fs *efs, *old_efs; + int ret = 0; + + if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW) + return -EINVAL; + + efs = kzalloc(sizeof(*efs), GFP_KERNEL); + if (!efs) + return -ENOMEM; + + input.fs = &info->fs; + + /* We need to manually set the rss_ctx, since this info isn't present + * in info->fs + */ + if (info->fs.flow_type & FLOW_RSS) + input.rss_ctx = info->rss_context; + + ethtool_rule = ethtool_rx_flow_rule_create(&input); + if (IS_ERR(ethtool_rule)) { + ret = PTR_ERR(ethtool_rule); + goto clean_rule; + } + + efs->rule.flow = ethtool_rule->rule; + efs->rule.flow_type = mvpp2_cls_ethtool_flow_to_type(info->fs.flow_type); + if (efs->rule.flow_type < 0) { + ret = efs->rule.flow_type; + goto clean_rule; + } + + ret = mvpp2_cls_rfs_parse_rule(&efs->rule); + if (ret) + goto clean_eth_rule; + + efs->rule.loc = info->fs.location; + + /* Replace an already existing rule */ + if (port->rfs_rules[efs->rule.loc]) { + old_efs = port->rfs_rules[efs->rule.loc]; + ret = mvpp2_port_cls_rfs_rule_remove(port, &old_efs->rule); + if (ret) + goto clean_eth_rule; + kfree(old_efs); + port->n_rfs_rules--; + } + + ret = mvpp2_port_flt_rfs_rule_insert(port, &efs->rule); + if (ret) + goto clean_eth_rule; + + ethtool_rx_flow_rule_destroy(ethtool_rule); + efs->rule.flow = NULL; + + memcpy(&efs->rxnfc, info, sizeof(*info)); + port->rfs_rules[efs->rule.loc] = efs; + port->n_rfs_rules++; + + return ret; + +clean_eth_rule: + ethtool_rx_flow_rule_destroy(ethtool_rule); +clean_rule: + kfree(efs); + return ret; +} + +int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port, + struct ethtool_rxnfc *info) +{ + struct mvpp2_ethtool_fs *efs; + int ret; + + if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW) + return -EINVAL; + + efs = port->rfs_rules[info->fs.location]; + if (!efs) + return -EINVAL; + + /* Remove the rule from the engines. */ + ret = mvpp2_port_cls_rfs_rule_remove(port, &efs->rule); + if (ret) + return ret; + + port->n_rfs_rules--; + port->rfs_rules[info->fs.location] = NULL; + kfree(efs); + + return 0; +} + +static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq) +{ + int nrxqs, cpu, cpus = num_possible_cpus(); + + /* Number of RXQs per CPU */ + nrxqs = port->nrxqs / cpus; + + /* CPU that will handle this rx queue */ + cpu = rxq / nrxqs; + + if (!cpu_online(cpu)) + return port->first_rxq; + + /* Indirection to better distribute the paquets on the CPUs when + * configuring the RSS queues. + */ + return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs); +} + +static void mvpp22_rss_fill_table(struct mvpp2_port *port, + struct mvpp2_rss_table *table, + u32 rss_ctx) +{ + struct mvpp2 *priv = port->priv; + int i; + + for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) { + u32 sel = MVPP22_RSS_INDEX_TABLE(rss_ctx) | + MVPP22_RSS_INDEX_TABLE_ENTRY(i); + mvpp2_write(priv, MVPP22_RSS_INDEX, sel); + + mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, + mvpp22_rxfh_indir(port, table->indir[i])); + } +} + +static int mvpp22_rss_context_create(struct mvpp2_port *port, u32 *rss_ctx) +{ + struct mvpp2 *priv = port->priv; + u32 ctx; + + /* Find the first free RSS table */ + for (ctx = 0; ctx < MVPP22_N_RSS_TABLES; ctx++) { + if (!priv->rss_tables[ctx]) + break; + } + + if (ctx == MVPP22_N_RSS_TABLES) + return -EINVAL; + + priv->rss_tables[ctx] = kzalloc(sizeof(*priv->rss_tables[ctx]), + GFP_KERNEL); + if (!priv->rss_tables[ctx]) + return -ENOMEM; + + *rss_ctx = ctx; + + /* Set the table width: replace the whole classifier Rx queue number + * with the ones configured in RSS table entries. + */ + mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(ctx)); + mvpp2_write(priv, MVPP22_RSS_WIDTH, 8); + + mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(ctx)); + mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE, MVPP22_RSS_TABLE_POINTER(ctx)); + + return 0; +} + +int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *port_ctx) +{ + u32 rss_ctx; + int ret, i; + + ret = mvpp22_rss_context_create(port, &rss_ctx); + if (ret) + return ret; + + /* Find the first available context number in the port, starting from 1. + * Context 0 on each port is reserved for the default context. + */ + for (i = 1; i < MVPP22_N_RSS_TABLES; i++) { + if (port->rss_ctx[i] < 0) + break; + } + + if (i == MVPP22_N_RSS_TABLES) + return -EINVAL; + + port->rss_ctx[i] = rss_ctx; + *port_ctx = i; + + return 0; +} + +static struct mvpp2_rss_table *mvpp22_rss_table_get(struct mvpp2 *priv, + int rss_ctx) +{ + if (rss_ctx < 0 || rss_ctx >= MVPP22_N_RSS_TABLES) + return NULL; + + return priv->rss_tables[rss_ctx]; +} + +int mvpp22_port_rss_ctx_delete(struct mvpp2_port *port, u32 port_ctx) +{ + struct mvpp2 *priv = port->priv; + struct ethtool_rxnfc *rxnfc; + int i, rss_ctx, ret; + + rss_ctx = mvpp22_rss_ctx(port, port_ctx); + + if (rss_ctx < 0 || rss_ctx >= MVPP22_N_RSS_TABLES) + return -EINVAL; + + /* Invalidate any active classification rule that use this context */ + for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) { + if (!port->rfs_rules[i]) + continue; + + rxnfc = &port->rfs_rules[i]->rxnfc; + if (!(rxnfc->fs.flow_type & FLOW_RSS) || + rxnfc->rss_context != port_ctx) + continue; + + ret = mvpp2_ethtool_cls_rule_del(port, rxnfc); + if (ret) { + netdev_warn(port->dev, + "couldn't remove classification rule %d associated to this context", + rxnfc->fs.location); + } + } + + kfree(priv->rss_tables[rss_ctx]); + + priv->rss_tables[rss_ctx] = NULL; + port->rss_ctx[port_ctx] = -1; + + return 0; +} + +int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 port_ctx, + const u32 *indir) +{ + int rss_ctx = mvpp22_rss_ctx(port, port_ctx); + struct mvpp2_rss_table *rss_table = mvpp22_rss_table_get(port->priv, + rss_ctx); + + if (!rss_table) + return -EINVAL; + + memcpy(rss_table->indir, indir, + MVPP22_RSS_TABLE_ENTRIES * sizeof(rss_table->indir[0])); + + mvpp22_rss_fill_table(port, rss_table, rss_ctx); + + return 0; +} + +int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 port_ctx, + u32 *indir) +{ + int rss_ctx = mvpp22_rss_ctx(port, port_ctx); + struct mvpp2_rss_table *rss_table = mvpp22_rss_table_get(port->priv, + rss_ctx); + + if (!rss_table) + return -EINVAL; + + memcpy(indir, rss_table->indir, + MVPP22_RSS_TABLE_ENTRIES * sizeof(rss_table->indir[0])); + + return 0; +} + +int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info) +{ + u16 hash_opts = 0; + u32 flow_type; + + flow_type = mvpp2_cls_ethtool_flow_to_type(info->flow_type); + + switch (flow_type) { + case MVPP22_FLOW_TCP4: + case MVPP22_FLOW_UDP4: + case MVPP22_FLOW_TCP6: + case MVPP22_FLOW_UDP6: + if (info->data & RXH_L4_B_0_1) + hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP; + if (info->data & RXH_L4_B_2_3) + hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP; + fallthrough; + case MVPP22_FLOW_IP4: + case MVPP22_FLOW_IP6: + if (info->data & RXH_L2DA) + hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA; + if (info->data & RXH_VLAN) + hash_opts |= MVPP22_CLS_HEK_OPT_VLAN; + if (info->data & RXH_L3_PROTO) + hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO; + if (info->data & RXH_IP_SRC) + hash_opts |= (MVPP22_CLS_HEK_OPT_IP4SA | + MVPP22_CLS_HEK_OPT_IP6SA); + if (info->data & RXH_IP_DST) + hash_opts |= (MVPP22_CLS_HEK_OPT_IP4DA | + MVPP22_CLS_HEK_OPT_IP6DA); + break; + default: return -EOPNOTSUPP; + } + + return mvpp2_port_rss_hash_opts_set(port, flow_type, hash_opts); +} + +int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info) +{ + unsigned long hash_opts; + u32 flow_type; + int i; + + flow_type = mvpp2_cls_ethtool_flow_to_type(info->flow_type); + + hash_opts = mvpp2_port_rss_hash_opts_get(port, flow_type); + info->data = 0; + + for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) { + switch (BIT(i)) { + case MVPP22_CLS_HEK_OPT_MAC_DA: + info->data |= RXH_L2DA; + break; + case MVPP22_CLS_HEK_OPT_VLAN: + info->data |= RXH_VLAN; + break; + case MVPP22_CLS_HEK_OPT_L3_PROTO: + info->data |= RXH_L3_PROTO; + break; + case MVPP22_CLS_HEK_OPT_IP4SA: + case MVPP22_CLS_HEK_OPT_IP6SA: + info->data |= RXH_IP_SRC; + break; + case MVPP22_CLS_HEK_OPT_IP4DA: + case MVPP22_CLS_HEK_OPT_IP6DA: + info->data |= RXH_IP_DST; + break; + case MVPP22_CLS_HEK_OPT_L4SIP: + info->data |= RXH_L4_B_0_1; + break; + case MVPP22_CLS_HEK_OPT_L4DIP: + info->data |= RXH_L4_B_2_3; + break; + default: + return -EINVAL; + } + } + return 0; +} + +int mvpp22_port_rss_init(struct mvpp2_port *port) +{ + struct mvpp2_rss_table *table; + u32 context = 0; + int i, ret; + + for (i = 0; i < MVPP22_N_RSS_TABLES; i++) + port->rss_ctx[i] = -1; + + ret = mvpp22_rss_context_create(port, &context); + if (ret) + return ret; + + table = mvpp22_rss_table_get(port->priv, context); + if (!table) + return -EINVAL; + + port->rss_ctx[0] = context; + + /* Configure the first table to evenly distribute the packets across + * real Rx Queues. The table entries map a hash to a port Rx Queue. + */ + for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) + table->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs); + + mvpp22_rss_fill_table(port, table, mvpp22_rss_ctx(port, 0)); + + /* Configure default flows */ + mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP4, MVPP22_CLS_HEK_IP4_2T); + mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP6, MVPP22_CLS_HEK_IP6_2T); + mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP4, MVPP22_CLS_HEK_IP4_5T); + mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP6, MVPP22_CLS_HEK_IP6_5T); + mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP4, MVPP22_CLS_HEK_IP4_5T); + mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP6, MVPP22_CLS_HEK_IP6_5T); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h new file mode 100644 index 000000000..8867f25af --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h @@ -0,0 +1,314 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * RSS and Classifier definitions for Marvell PPv2 Network Controller + * + * Copyright (C) 2014 Marvell + * + * Marcin Wojtas <mw@semihalf.com> + */ + +#ifndef _MVPP2_CLS_H_ +#define _MVPP2_CLS_H_ + +#include "mvpp2.h" +#include "mvpp2_prs.h" + +/* Classifier constants */ +#define MVPP2_CLS_FLOWS_TBL_SIZE 512 +#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3 +#define MVPP2_CLS_LKP_TBL_SIZE 64 +#define MVPP2_CLS_RX_QUEUES 256 + +/* Classifier flow constants */ + +#define MVPP2_FLOW_N_FIELDS 4 + +enum mvpp2_cls_engine { + MVPP22_CLS_ENGINE_C2 = 1, + MVPP22_CLS_ENGINE_C3A, + MVPP22_CLS_ENGINE_C3B, + MVPP22_CLS_ENGINE_C4, + MVPP22_CLS_ENGINE_C3HA = 6, + MVPP22_CLS_ENGINE_C3HB = 7, +}; + +#define MVPP22_CLS_HEK_OPT_MAC_DA BIT(0) +#define MVPP22_CLS_HEK_OPT_VLAN_PRI BIT(1) +#define MVPP22_CLS_HEK_OPT_VLAN BIT(2) +#define MVPP22_CLS_HEK_OPT_L3_PROTO BIT(3) +#define MVPP22_CLS_HEK_OPT_IP4SA BIT(4) +#define MVPP22_CLS_HEK_OPT_IP4DA BIT(5) +#define MVPP22_CLS_HEK_OPT_IP6SA BIT(6) +#define MVPP22_CLS_HEK_OPT_IP6DA BIT(7) +#define MVPP22_CLS_HEK_OPT_L4SIP BIT(8) +#define MVPP22_CLS_HEK_OPT_L4DIP BIT(9) +#define MVPP22_CLS_HEK_N_FIELDS 10 + +#define MVPP22_CLS_HEK_L4_OPTS (MVPP22_CLS_HEK_OPT_L4SIP | \ + MVPP22_CLS_HEK_OPT_L4DIP) + +#define MVPP22_CLS_HEK_IP4_2T (MVPP22_CLS_HEK_OPT_IP4SA | \ + MVPP22_CLS_HEK_OPT_IP4DA) + +#define MVPP22_CLS_HEK_IP6_2T (MVPP22_CLS_HEK_OPT_IP6SA | \ + MVPP22_CLS_HEK_OPT_IP6DA) + +/* The fifth tuple in "5T" is the L4_Info field */ +#define MVPP22_CLS_HEK_IP4_5T (MVPP22_CLS_HEK_IP4_2T | \ + MVPP22_CLS_HEK_L4_OPTS) + +#define MVPP22_CLS_HEK_IP6_5T (MVPP22_CLS_HEK_IP6_2T | \ + MVPP22_CLS_HEK_L4_OPTS) + +#define MVPP22_CLS_HEK_TAGGED (MVPP22_CLS_HEK_OPT_VLAN | \ + MVPP22_CLS_HEK_OPT_VLAN_PRI) + +enum mvpp2_cls_field_id { + MVPP22_CLS_FIELD_MAC_DA = 0x03, + MVPP22_CLS_FIELD_VLAN_PRI = 0x05, + MVPP22_CLS_FIELD_VLAN = 0x06, + MVPP22_CLS_FIELD_L3_PROTO = 0x0f, + MVPP22_CLS_FIELD_IP4SA = 0x10, + MVPP22_CLS_FIELD_IP4DA = 0x11, + MVPP22_CLS_FIELD_IP6SA = 0x17, + MVPP22_CLS_FIELD_IP6DA = 0x1a, + MVPP22_CLS_FIELD_L4SIP = 0x1d, + MVPP22_CLS_FIELD_L4DIP = 0x1e, +}; + +/* Classifier C2 engine constants */ +#define MVPP22_CLS_C2_TCAM_EN(data) ((data) << 16) + +enum mvpp22_cls_c2_action { + MVPP22_C2_NO_UPD = 0, + MVPP22_C2_NO_UPD_LOCK, + MVPP22_C2_UPD, + MVPP22_C2_UPD_LOCK, +}; + +enum mvpp22_cls_c2_fwd_action { + MVPP22_C2_FWD_NO_UPD = 0, + MVPP22_C2_FWD_NO_UPD_LOCK, + MVPP22_C2_FWD_SW, + MVPP22_C2_FWD_SW_LOCK, + MVPP22_C2_FWD_HW, + MVPP22_C2_FWD_HW_LOCK, + MVPP22_C2_FWD_HW_LOW_LAT, + MVPP22_C2_FWD_HW_LOW_LAT_LOCK, +}; + +enum mvpp22_cls_c2_color_action { + MVPP22_C2_COL_NO_UPD = 0, + MVPP22_C2_COL_NO_UPD_LOCK, + MVPP22_C2_COL_GREEN, + MVPP22_C2_COL_GREEN_LOCK, + MVPP22_C2_COL_YELLOW, + MVPP22_C2_COL_YELLOW_LOCK, + MVPP22_C2_COL_RED, /* Drop */ + MVPP22_C2_COL_RED_LOCK, /* Drop */ +}; + +#define MVPP2_CLS_C2_TCAM_WORDS 5 +#define MVPP2_CLS_C2_ATTR_WORDS 5 + +struct mvpp2_cls_c2_entry { + u32 index; + /* TCAM lookup key */ + u32 tcam[MVPP2_CLS_C2_TCAM_WORDS]; + /* Actions to perform upon TCAM match */ + u32 act; + /* Attributes relative to the actions to perform */ + u32 attr[MVPP2_CLS_C2_ATTR_WORDS]; + /* Entry validity */ + u8 valid; +}; + +#define MVPP22_FLOW_ETHER_BIT BIT(0) +#define MVPP22_FLOW_IP4_BIT BIT(1) +#define MVPP22_FLOW_IP6_BIT BIT(2) +#define MVPP22_FLOW_TCP_BIT BIT(3) +#define MVPP22_FLOW_UDP_BIT BIT(4) + +#define MVPP22_FLOW_TCP4 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP4_BIT | MVPP22_FLOW_TCP_BIT) +#define MVPP22_FLOW_TCP6 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP6_BIT | MVPP22_FLOW_TCP_BIT) +#define MVPP22_FLOW_UDP4 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP4_BIT | MVPP22_FLOW_UDP_BIT) +#define MVPP22_FLOW_UDP6 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP6_BIT | MVPP22_FLOW_UDP_BIT) +#define MVPP22_FLOW_IP4 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP4_BIT) +#define MVPP22_FLOW_IP6 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP6_BIT) +#define MVPP22_FLOW_ETHERNET (MVPP22_FLOW_ETHER_BIT) + +/* Classifier C2 engine entries */ +#define MVPP22_CLS_C2_N_ENTRIES 256 + +/* Number of per-port dedicated entries in the C2 TCAM */ +#define MVPP22_CLS_C2_PORT_N_FLOWS MVPP2_N_RFS_ENTRIES_PER_FLOW + +/* Each port has oen range per flow type + one entry controling the global RSS + * setting and the default rx queue + */ +#define MVPP22_CLS_C2_PORT_RANGE (MVPP22_CLS_C2_PORT_N_FLOWS + 1) +#define MVPP22_CLS_C2_PORT_FIRST(p) ((p) * MVPP22_CLS_C2_PORT_RANGE) +#define MVPP22_CLS_C2_RSS_ENTRY(p) (MVPP22_CLS_C2_PORT_FIRST((p) + 1) - 1) + +#define MVPP22_CLS_C2_PORT_FLOW_FIRST(p) (MVPP22_CLS_C2_PORT_FIRST(p)) + +#define MVPP22_CLS_C2_RFS_LOC(p, loc) (MVPP22_CLS_C2_PORT_FLOW_FIRST(p) + (loc)) + +/* Packet flow ID */ +enum mvpp2_prs_flow { + MVPP2_FL_START = 8, + MVPP2_FL_IP4_TCP_NF_UNTAG = MVPP2_FL_START, + MVPP2_FL_IP4_UDP_NF_UNTAG, + MVPP2_FL_IP4_TCP_NF_TAG, + MVPP2_FL_IP4_UDP_NF_TAG, + MVPP2_FL_IP6_TCP_NF_UNTAG, + MVPP2_FL_IP6_UDP_NF_UNTAG, + MVPP2_FL_IP6_TCP_NF_TAG, + MVPP2_FL_IP6_UDP_NF_TAG, + MVPP2_FL_IP4_TCP_FRAG_UNTAG, + MVPP2_FL_IP4_UDP_FRAG_UNTAG, + MVPP2_FL_IP4_TCP_FRAG_TAG, + MVPP2_FL_IP4_UDP_FRAG_TAG, + MVPP2_FL_IP6_TCP_FRAG_UNTAG, + MVPP2_FL_IP6_UDP_FRAG_UNTAG, + MVPP2_FL_IP6_TCP_FRAG_TAG, + MVPP2_FL_IP6_UDP_FRAG_TAG, + MVPP2_FL_IP4_UNTAG, /* non-TCP, non-UDP, same for below */ + MVPP2_FL_IP4_TAG, + MVPP2_FL_IP6_UNTAG, + MVPP2_FL_IP6_TAG, + MVPP2_FL_NON_IP_UNTAG, + MVPP2_FL_NON_IP_TAG, + MVPP2_FL_LAST, +}; + +/* LU Type defined for all engines, and specified in the flow table */ +#define MVPP2_CLS_LU_TYPE_MASK 0x3f + +enum mvpp2_cls_lu_type { + /* rule->loc is used as a lu-type for the entries 0 - 62. */ + MVPP22_CLS_LU_TYPE_ALL = 63, +}; + +#define MVPP2_N_FLOWS (MVPP2_FL_LAST - MVPP2_FL_START) + +struct mvpp2_cls_flow { + /* The L2-L4 traffic flow type */ + int flow_type; + + /* The first id in the flow table for this flow */ + u16 flow_id; + + /* The supported HEK fields for this flow */ + u16 supported_hash_opts; + + /* The Header Parser result_info that matches this flow */ + struct mvpp2_prs_result_info prs_ri; +}; + +#define MVPP2_CLS_FLT_ENTRIES_PER_FLOW (MVPP2_MAX_PORTS + 1 + 16) +#define MVPP2_CLS_FLT_FIRST(id) (((id) - MVPP2_FL_START) * \ + MVPP2_CLS_FLT_ENTRIES_PER_FLOW) + +#define MVPP2_CLS_FLT_C2_RFS(port, id, rfs_n) (MVPP2_CLS_FLT_FIRST(id) + \ + ((port) * MVPP2_MAX_PORTS) + \ + (rfs_n)) + +#define MVPP2_CLS_FLT_C2_RSS_ENTRY(id) (MVPP2_CLS_FLT_C2_RFS(MVPP2_MAX_PORTS, id, 0)) +#define MVPP2_CLS_FLT_HASH_ENTRY(port, id) (MVPP2_CLS_FLT_C2_RSS_ENTRY(id) + 1 + (port)) +#define MVPP2_CLS_FLT_LAST(id) (MVPP2_CLS_FLT_FIRST(id) + \ + MVPP2_CLS_FLT_ENTRIES_PER_FLOW - 1) + +/* Iterate on each classifier flow id. Sets 'i' to be the index of the first + * entry in the cls_flows table for each different flow_id. + * This relies on entries having the same flow_id in the cls_flows table being + * contiguous. + */ +#define for_each_cls_flow_id(i) \ + for ((i) = 0; (i) < MVPP2_N_PRS_FLOWS; (i)++) \ + if ((i) > 0 && \ + cls_flows[(i)].flow_id == cls_flows[(i) - 1].flow_id) \ + continue; \ + else + +/* Iterate on each classifier flow that has a given flow_type. Sets 'i' to be + * the index of the first entry in the cls_flow table for each different flow_id + * that has the given flow_type. This allows to operate on all flows that + * matches a given ethtool flow type. + */ +#define for_each_cls_flow_id_with_type(i, type) \ + for_each_cls_flow_id((i)) \ + if (cls_flows[(i)].flow_type != (type)) \ + continue; \ + else + +#define for_each_cls_flow_id_containing_type(i, type) \ + for_each_cls_flow_id((i)) \ + if ((cls_flows[(i)].flow_type & (type)) != (type)) \ + continue; \ + else + +struct mvpp2_cls_flow_entry { + u32 index; + u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS]; +}; + +struct mvpp2_cls_lookup_entry { + u32 lkpid; + u32 way; + u32 data; +}; + +int mvpp22_port_rss_init(struct mvpp2_port *port); + +int mvpp22_port_rss_enable(struct mvpp2_port *port); +int mvpp22_port_rss_disable(struct mvpp2_port *port); + +int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *rss_ctx); +int mvpp22_port_rss_ctx_delete(struct mvpp2_port *port, u32 rss_ctx); + +int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 rss_ctx, + const u32 *indir); +int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 rss_ctx, + u32 *indir); + +int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info); +int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info); + +void mvpp2_cls_init(struct mvpp2 *priv); + +void mvpp2_cls_port_config(struct mvpp2_port *port); + +void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port); + +int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe); + +u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe); + +const struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow); + +u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index); + +void mvpp2_cls_flow_read(struct mvpp2 *priv, int index, + struct mvpp2_cls_flow_entry *fe); + +u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index); + +void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way, + struct mvpp2_cls_lookup_entry *le); + +u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index); + +void mvpp2_cls_c2_read(struct mvpp2 *priv, int index, + struct mvpp2_cls_c2_entry *c2); + +int mvpp2_ethtool_cls_rule_get(struct mvpp2_port *port, + struct ethtool_rxnfc *rxnfc); + +int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port, + struct ethtool_rxnfc *info); + +int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port, + struct ethtool_rxnfc *info); + +#endif diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c new file mode 100644 index 000000000..75e83ea2a --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c @@ -0,0 +1,744 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Marvell PPv2 network controller for Armada 375 SoC. + * + * Copyright (C) 2018 Marvell + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/debugfs.h> + +#include "mvpp2.h" +#include "mvpp2_prs.h" +#include "mvpp2_cls.h" + +struct mvpp2_dbgfs_prs_entry { + int tid; + struct mvpp2 *priv; +}; + +struct mvpp2_dbgfs_c2_entry { + int id; + struct mvpp2 *priv; +}; + +struct mvpp2_dbgfs_flow_entry { + int flow; + struct mvpp2 *priv; +}; + +struct mvpp2_dbgfs_flow_tbl_entry { + int id; + struct mvpp2 *priv; +}; + +struct mvpp2_dbgfs_port_flow_entry { + struct mvpp2_port *port; + struct mvpp2_dbgfs_flow_entry *dbg_fe; +}; + +struct mvpp2_dbgfs_entries { + /* Entries for Header Parser debug info */ + struct mvpp2_dbgfs_prs_entry prs_entries[MVPP2_PRS_TCAM_SRAM_SIZE]; + + /* Entries for Classifier C2 engine debug info */ + struct mvpp2_dbgfs_c2_entry c2_entries[MVPP22_CLS_C2_N_ENTRIES]; + + /* Entries for Classifier Flow Table debug info */ + struct mvpp2_dbgfs_flow_tbl_entry flt_entries[MVPP2_CLS_FLOWS_TBL_SIZE]; + + /* Entries for Classifier flows debug info */ + struct mvpp2_dbgfs_flow_entry flow_entries[MVPP2_N_PRS_FLOWS]; + + /* Entries for per-port flows debug info */ + struct mvpp2_dbgfs_port_flow_entry port_flow_entries[MVPP2_MAX_PORTS]; +}; + +static int mvpp2_dbgfs_flow_flt_hits_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_flow_tbl_entry *entry = s->private; + + u32 hits = mvpp2_cls_flow_hits(entry->priv, entry->id); + + seq_printf(s, "%u\n", hits); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_flt_hits); + +static int mvpp2_dbgfs_flow_dec_hits_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_flow_entry *entry = s->private; + + u32 hits = mvpp2_cls_lookup_hits(entry->priv, entry->flow); + + seq_printf(s, "%u\n", hits); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_dec_hits); + +static int mvpp2_dbgfs_flow_type_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_flow_entry *entry = s->private; + const struct mvpp2_cls_flow *f; + const char *flow_name; + + f = mvpp2_cls_flow_get(entry->flow); + if (!f) + return -EINVAL; + + switch (f->flow_type) { + case IPV4_FLOW: + flow_name = "ipv4"; + break; + case IPV6_FLOW: + flow_name = "ipv6"; + break; + case TCP_V4_FLOW: + flow_name = "tcp4"; + break; + case TCP_V6_FLOW: + flow_name = "tcp6"; + break; + case UDP_V4_FLOW: + flow_name = "udp4"; + break; + case UDP_V6_FLOW: + flow_name = "udp6"; + break; + default: + flow_name = "other"; + } + + seq_printf(s, "%s\n", flow_name); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_type); + +static int mvpp2_dbgfs_flow_id_show(struct seq_file *s, void *unused) +{ + const struct mvpp2_dbgfs_flow_entry *entry = s->private; + const struct mvpp2_cls_flow *f; + + f = mvpp2_cls_flow_get(entry->flow); + if (!f) + return -EINVAL; + + seq_printf(s, "%d\n", f->flow_id); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_id); + +static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_port_flow_entry *entry = s->private; + struct mvpp2_port *port = entry->port; + struct mvpp2_cls_flow_entry fe; + const struct mvpp2_cls_flow *f; + int flow_index; + u16 hash_opts; + + f = mvpp2_cls_flow_get(entry->dbg_fe->flow); + if (!f) + return -EINVAL; + + flow_index = MVPP2_CLS_FLT_HASH_ENTRY(entry->port->id, f->flow_id); + + mvpp2_cls_flow_read(port->priv, flow_index, &fe); + + hash_opts = mvpp2_flow_get_hek_fields(&fe); + + seq_printf(s, "0x%04x\n", hash_opts); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_hash_opt); + +static int mvpp2_dbgfs_port_flow_engine_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_port_flow_entry *entry = s->private; + struct mvpp2_port *port = entry->port; + struct mvpp2_cls_flow_entry fe; + const struct mvpp2_cls_flow *f; + int flow_index, engine; + + f = mvpp2_cls_flow_get(entry->dbg_fe->flow); + if (!f) + return -EINVAL; + + flow_index = MVPP2_CLS_FLT_HASH_ENTRY(entry->port->id, f->flow_id); + + mvpp2_cls_flow_read(port->priv, flow_index, &fe); + + engine = mvpp2_cls_flow_eng_get(&fe); + + seq_printf(s, "%d\n", engine); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_engine); + +static int mvpp2_dbgfs_flow_c2_hits_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_c2_entry *entry = s->private; + u32 hits; + + hits = mvpp2_cls_c2_hit_count(entry->priv, entry->id); + + seq_printf(s, "%u\n", hits); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_hits); + +static int mvpp2_dbgfs_flow_c2_rxq_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_c2_entry *entry = s->private; + struct mvpp2_cls_c2_entry c2; + u8 qh, ql; + + mvpp2_cls_c2_read(entry->priv, entry->id, &c2); + + qh = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QHIGH_OFFS) & + MVPP22_CLS_C2_ATTR0_QHIGH_MASK; + + ql = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QLOW_OFFS) & + MVPP22_CLS_C2_ATTR0_QLOW_MASK; + + seq_printf(s, "%d\n", (qh << 3 | ql)); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_rxq); + +static int mvpp2_dbgfs_flow_c2_enable_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_c2_entry *entry = s->private; + struct mvpp2_cls_c2_entry c2; + int enabled; + + mvpp2_cls_c2_read(entry->priv, entry->id, &c2); + + enabled = !!(c2.attr[2] & MVPP22_CLS_C2_ATTR2_RSS_EN); + + seq_printf(s, "%d\n", enabled); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_enable); + +static int mvpp2_dbgfs_port_vid_show(struct seq_file *s, void *unused) +{ + struct mvpp2_port *port = s->private; + unsigned char byte[2], enable[2]; + struct mvpp2 *priv = port->priv; + struct mvpp2_prs_entry pe; + unsigned long pmap; + u16 rvid; + int tid; + + for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); + tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { + mvpp2_prs_init_from_hw(priv, &pe, tid); + + pmap = mvpp2_prs_tcam_port_map_get(&pe); + + if (!priv->prs_shadow[tid].valid) + continue; + + if (!test_bit(port->id, &pmap)) + continue; + + mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); + mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); + + rvid = ((byte[0] & 0xf) << 8) + byte[1]; + + seq_printf(s, "%u\n", rvid); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_vid); + +static int mvpp2_dbgfs_port_parser_show(struct seq_file *s, void *unused) +{ + struct mvpp2_port *port = s->private; + struct mvpp2 *priv = port->priv; + struct mvpp2_prs_entry pe; + unsigned long pmap; + int i; + + for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) { + mvpp2_prs_init_from_hw(port->priv, &pe, i); + + pmap = mvpp2_prs_tcam_port_map_get(&pe); + if (priv->prs_shadow[i].valid && test_bit(port->id, &pmap)) + seq_printf(s, "%03d\n", i); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_parser); + +static int mvpp2_dbgfs_filter_show(struct seq_file *s, void *unused) +{ + struct mvpp2_port *port = s->private; + struct mvpp2 *priv = port->priv; + struct mvpp2_prs_entry pe; + unsigned long pmap; + int index, tid; + + for (tid = MVPP2_PE_MAC_RANGE_START; + tid <= MVPP2_PE_MAC_RANGE_END; tid++) { + unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; + + if (!priv->prs_shadow[tid].valid || + priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC || + priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + + pmap = mvpp2_prs_tcam_port_map_get(&pe); + + /* We only want entries active on this port */ + if (!test_bit(port->id, &pmap)) + continue; + + /* Read mac addr from entry */ + for (index = 0; index < ETH_ALEN; index++) + mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index], + &da_mask[index]); + + seq_printf(s, "%pM\n", da); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_filter); + +static int mvpp2_dbgfs_prs_lu_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + struct mvpp2 *priv = entry->priv; + + seq_printf(s, "%x\n", priv->prs_shadow[entry->tid].lu); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_lu); + +static int mvpp2_dbgfs_prs_pmap_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + struct mvpp2_prs_entry pe; + unsigned int pmap; + + mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid); + + pmap = mvpp2_prs_tcam_port_map_get(&pe); + pmap &= MVPP2_PRS_PORT_MASK; + + seq_printf(s, "%02x\n", pmap); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_pmap); + +static int mvpp2_dbgfs_prs_ai_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + struct mvpp2_prs_entry pe; + unsigned char ai, ai_mask; + + mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid); + + ai = pe.tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK; + ai_mask = (pe.tcam[MVPP2_PRS_TCAM_AI_WORD] >> 16) & MVPP2_PRS_AI_MASK; + + seq_printf(s, "%02x %02x\n", ai, ai_mask); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_ai); + +static int mvpp2_dbgfs_prs_hdata_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + struct mvpp2_prs_entry pe; + unsigned char data[8], mask[8]; + int i; + + mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid); + + for (i = 0; i < 8; i++) + mvpp2_prs_tcam_data_byte_get(&pe, i, &data[i], &mask[i]); + + seq_printf(s, "%*phN %*phN\n", 8, data, 8, mask); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_hdata); + +static int mvpp2_dbgfs_prs_sram_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + struct mvpp2_prs_entry pe; + + mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid); + + seq_printf(s, "%*phN\n", 14, pe.sram); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_sram); + +static int mvpp2_dbgfs_prs_hits_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + int val; + + val = mvpp2_prs_hits(entry->priv, entry->tid); + if (val < 0) + return val; + + seq_printf(s, "%d\n", val); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_hits); + +static int mvpp2_dbgfs_prs_valid_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + struct mvpp2 *priv = entry->priv; + int tid = entry->tid; + + seq_printf(s, "%d\n", priv->prs_shadow[tid].valid ? 1 : 0); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_valid); + +static int mvpp2_dbgfs_flow_port_init(struct dentry *parent, + struct mvpp2_port *port, + struct mvpp2_dbgfs_flow_entry *entry) +{ + struct mvpp2_dbgfs_port_flow_entry *port_entry; + struct dentry *port_dir; + + port_dir = debugfs_create_dir(port->dev->name, parent); + + port_entry = &port->priv->dbgfs_entries->port_flow_entries[port->id]; + + port_entry->port = port; + port_entry->dbg_fe = entry; + + debugfs_create_file("hash_opts", 0444, port_dir, port_entry, + &mvpp2_dbgfs_port_flow_hash_opt_fops); + + debugfs_create_file("engine", 0444, port_dir, port_entry, + &mvpp2_dbgfs_port_flow_engine_fops); + + return 0; +} + +static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent, + struct mvpp2 *priv, int flow) +{ + struct mvpp2_dbgfs_flow_entry *entry; + struct dentry *flow_entry_dir; + char flow_entry_name[10]; + int i, ret; + + sprintf(flow_entry_name, "%02d", flow); + + flow_entry_dir = debugfs_create_dir(flow_entry_name, parent); + + entry = &priv->dbgfs_entries->flow_entries[flow]; + + entry->flow = flow; + entry->priv = priv; + + debugfs_create_file("dec_hits", 0444, flow_entry_dir, entry, + &mvpp2_dbgfs_flow_dec_hits_fops); + + debugfs_create_file("type", 0444, flow_entry_dir, entry, + &mvpp2_dbgfs_flow_type_fops); + + debugfs_create_file("id", 0444, flow_entry_dir, entry, + &mvpp2_dbgfs_flow_id_fops); + + /* Create entry for each port */ + for (i = 0; i < priv->port_count; i++) { + ret = mvpp2_dbgfs_flow_port_init(flow_entry_dir, + priv->port_list[i], entry); + if (ret) + return ret; + } + + return 0; +} + +static int mvpp2_dbgfs_flow_init(struct dentry *parent, struct mvpp2 *priv) +{ + struct dentry *flow_dir; + int i, ret; + + flow_dir = debugfs_create_dir("flows", parent); + + for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) { + ret = mvpp2_dbgfs_flow_entry_init(flow_dir, priv, i); + if (ret) + return ret; + } + + return 0; +} + +static int mvpp2_dbgfs_prs_entry_init(struct dentry *parent, + struct mvpp2 *priv, int tid) +{ + struct mvpp2_dbgfs_prs_entry *entry; + struct dentry *prs_entry_dir; + char prs_entry_name[10]; + + if (tid >= MVPP2_PRS_TCAM_SRAM_SIZE) + return -EINVAL; + + sprintf(prs_entry_name, "%03d", tid); + + prs_entry_dir = debugfs_create_dir(prs_entry_name, parent); + + entry = &priv->dbgfs_entries->prs_entries[tid]; + + entry->tid = tid; + entry->priv = priv; + + /* Create each attr */ + debugfs_create_file("sram", 0444, prs_entry_dir, entry, + &mvpp2_dbgfs_prs_sram_fops); + + debugfs_create_file("valid", 0644, prs_entry_dir, entry, + &mvpp2_dbgfs_prs_valid_fops); + + debugfs_create_file("lookup_id", 0644, prs_entry_dir, entry, + &mvpp2_dbgfs_prs_lu_fops); + + debugfs_create_file("ai", 0644, prs_entry_dir, entry, + &mvpp2_dbgfs_prs_ai_fops); + + debugfs_create_file("header_data", 0644, prs_entry_dir, entry, + &mvpp2_dbgfs_prs_hdata_fops); + + debugfs_create_file("hits", 0444, prs_entry_dir, entry, + &mvpp2_dbgfs_prs_hits_fops); + + debugfs_create_file("pmap", 0444, prs_entry_dir, entry, + &mvpp2_dbgfs_prs_pmap_fops); + + return 0; +} + +static int mvpp2_dbgfs_prs_init(struct dentry *parent, struct mvpp2 *priv) +{ + struct dentry *prs_dir; + int i, ret; + + prs_dir = debugfs_create_dir("parser", parent); + + for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) { + ret = mvpp2_dbgfs_prs_entry_init(prs_dir, priv, i); + if (ret) + return ret; + } + + return 0; +} + +static int mvpp2_dbgfs_c2_entry_init(struct dentry *parent, + struct mvpp2 *priv, int id) +{ + struct mvpp2_dbgfs_c2_entry *entry; + struct dentry *c2_entry_dir; + char c2_entry_name[10]; + + if (id >= MVPP22_CLS_C2_N_ENTRIES) + return -EINVAL; + + sprintf(c2_entry_name, "%03d", id); + + c2_entry_dir = debugfs_create_dir(c2_entry_name, parent); + if (!c2_entry_dir) + return -ENOMEM; + + entry = &priv->dbgfs_entries->c2_entries[id]; + + entry->id = id; + entry->priv = priv; + + debugfs_create_file("hits", 0444, c2_entry_dir, entry, + &mvpp2_dbgfs_flow_c2_hits_fops); + + debugfs_create_file("default_rxq", 0444, c2_entry_dir, entry, + &mvpp2_dbgfs_flow_c2_rxq_fops); + + debugfs_create_file("rss_enable", 0444, c2_entry_dir, entry, + &mvpp2_dbgfs_flow_c2_enable_fops); + + return 0; +} + +static int mvpp2_dbgfs_flow_tbl_entry_init(struct dentry *parent, + struct mvpp2 *priv, int id) +{ + struct mvpp2_dbgfs_flow_tbl_entry *entry; + struct dentry *flow_tbl_entry_dir; + char flow_tbl_entry_name[10]; + + if (id >= MVPP2_CLS_FLOWS_TBL_SIZE) + return -EINVAL; + + sprintf(flow_tbl_entry_name, "%03d", id); + + flow_tbl_entry_dir = debugfs_create_dir(flow_tbl_entry_name, parent); + if (!flow_tbl_entry_dir) + return -ENOMEM; + + entry = &priv->dbgfs_entries->flt_entries[id]; + + entry->id = id; + entry->priv = priv; + + debugfs_create_file("hits", 0444, flow_tbl_entry_dir, entry, + &mvpp2_dbgfs_flow_flt_hits_fops); + + return 0; +} + +static int mvpp2_dbgfs_cls_init(struct dentry *parent, struct mvpp2 *priv) +{ + struct dentry *cls_dir, *c2_dir, *flow_tbl_dir; + int i, ret; + + cls_dir = debugfs_create_dir("classifier", parent); + if (!cls_dir) + return -ENOMEM; + + c2_dir = debugfs_create_dir("c2", cls_dir); + if (!c2_dir) + return -ENOMEM; + + for (i = 0; i < MVPP22_CLS_C2_N_ENTRIES; i++) { + ret = mvpp2_dbgfs_c2_entry_init(c2_dir, priv, i); + if (ret) + return ret; + } + + flow_tbl_dir = debugfs_create_dir("flow_table", cls_dir); + if (!flow_tbl_dir) + return -ENOMEM; + + for (i = 0; i < MVPP2_CLS_FLOWS_TBL_SIZE; i++) { + ret = mvpp2_dbgfs_flow_tbl_entry_init(flow_tbl_dir, priv, i); + if (ret) + return ret; + } + + return 0; +} + +static int mvpp2_dbgfs_port_init(struct dentry *parent, + struct mvpp2_port *port) +{ + struct dentry *port_dir; + + port_dir = debugfs_create_dir(port->dev->name, parent); + + debugfs_create_file("parser_entries", 0444, port_dir, port, + &mvpp2_dbgfs_port_parser_fops); + + debugfs_create_file("mac_filter", 0444, port_dir, port, + &mvpp2_dbgfs_filter_fops); + + debugfs_create_file("vid_filter", 0444, port_dir, port, + &mvpp2_dbgfs_port_vid_fops); + + return 0; +} + +static struct dentry *mvpp2_root; + +void mvpp2_dbgfs_exit(void) +{ + debugfs_remove(mvpp2_root); +} + +void mvpp2_dbgfs_cleanup(struct mvpp2 *priv) +{ + debugfs_remove_recursive(priv->dbgfs_dir); + + kfree(priv->dbgfs_entries); +} + +void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name) +{ + struct dentry *mvpp2_dir; + int ret, i; + + if (!mvpp2_root) + mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL); + + mvpp2_dir = debugfs_create_dir(name, mvpp2_root); + + priv->dbgfs_dir = mvpp2_dir; + priv->dbgfs_entries = kzalloc(sizeof(*priv->dbgfs_entries), GFP_KERNEL); + if (!priv->dbgfs_entries) + goto err; + + ret = mvpp2_dbgfs_prs_init(mvpp2_dir, priv); + if (ret) + goto err; + + ret = mvpp2_dbgfs_cls_init(mvpp2_dir, priv); + if (ret) + goto err; + + for (i = 0; i < priv->port_count; i++) { + ret = mvpp2_dbgfs_port_init(mvpp2_dir, priv->port_list[i]); + if (ret) + goto err; + } + + ret = mvpp2_dbgfs_flow_init(mvpp2_dir, priv); + if (ret) + goto err; + + return; +err: + mvpp2_dbgfs_cleanup(priv); +} diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c new file mode 100644 index 000000000..e0e6275b3 --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -0,0 +1,7178 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Marvell PPv2 network controller for Armada 375 SoC. + * + * Copyright (C) 2014 Marvell + * + * Marcin Wojtas <mw@semihalf.com> + */ + +#include <linux/acpi.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/platform_device.h> +#include <linux/skbuff.h> +#include <linux/inetdevice.h> +#include <linux/mbus.h> +#include <linux/module.h> +#include <linux/mfd/syscon.h> +#include <linux/interrupt.h> +#include <linux/cpumask.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/phy.h> +#include <linux/phylink.h> +#include <linux/phy/phy.h> +#include <linux/ptp_classify.h> +#include <linux/clk.h> +#include <linux/hrtimer.h> +#include <linux/ktime.h> +#include <linux/regmap.h> +#include <uapi/linux/ppp_defs.h> +#include <net/ip.h> +#include <net/ipv6.h> +#include <net/tso.h> +#include <linux/bpf_trace.h> + +#include "mvpp2.h" +#include "mvpp2_prs.h" +#include "mvpp2_cls.h" + +enum mvpp2_bm_pool_log_num { + MVPP2_BM_SHORT, + MVPP2_BM_LONG, + MVPP2_BM_JUMBO, + MVPP2_BM_POOLS_NUM +}; + +static struct { + int pkt_size; + int buf_num; +} mvpp2_pools[MVPP2_BM_POOLS_NUM]; + +/* The prototype is added here to be used in start_dev when using ACPI. This + * will be removed once phylink is used for all modes (dt+ACPI). + */ +static void mvpp2_acpi_start(struct mvpp2_port *port); + +/* Queue modes */ +#define MVPP2_QDIST_SINGLE_MODE 0 +#define MVPP2_QDIST_MULTI_MODE 1 + +static int queue_mode = MVPP2_QDIST_MULTI_MODE; + +module_param(queue_mode, int, 0444); +MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)"); + +/* Utility/helper methods */ + +void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) +{ + writel(data, priv->swth_base[0] + offset); +} + +u32 mvpp2_read(struct mvpp2 *priv, u32 offset) +{ + return readl(priv->swth_base[0] + offset); +} + +static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) +{ + return readl_relaxed(priv->swth_base[0] + offset); +} + +static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu) +{ + return cpu % priv->nthreads; +} + +static struct page_pool * +mvpp2_create_page_pool(struct device *dev, int num, int len, + enum dma_data_direction dma_dir) +{ + struct page_pool_params pp_params = { + /* internal DMA mapping in page_pool */ + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .pool_size = num, + .nid = NUMA_NO_NODE, + .dev = dev, + .dma_dir = dma_dir, + .offset = MVPP2_SKB_HEADROOM, + .max_len = len, + }; + + return page_pool_create(&pp_params); +} + +/* These accessors should be used to access: + * + * - per-thread registers, where each thread has its own copy of the + * register. + * + * MVPP2_BM_VIRT_ALLOC_REG + * MVPP2_BM_ADDR_HIGH_ALLOC + * MVPP22_BM_ADDR_HIGH_RLS_REG + * MVPP2_BM_VIRT_RLS_REG + * MVPP2_ISR_RX_TX_CAUSE_REG + * MVPP2_ISR_RX_TX_MASK_REG + * MVPP2_TXQ_NUM_REG + * MVPP2_AGGR_TXQ_UPDATE_REG + * MVPP2_TXQ_RSVD_REQ_REG + * MVPP2_TXQ_RSVD_RSLT_REG + * MVPP2_TXQ_SENT_REG + * MVPP2_RXQ_NUM_REG + * + * - global registers that must be accessed through a specific thread + * window, because they are related to an access to a per-thread + * register + * + * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG) + * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG) + * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG) + * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG) + * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG) + * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG) + * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) + */ +static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread, + u32 offset, u32 data) +{ + writel(data, priv->swth_base[thread] + offset); +} + +static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread, + u32 offset) +{ + return readl(priv->swth_base[thread] + offset); +} + +static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread, + u32 offset, u32 data) +{ + writel_relaxed(data, priv->swth_base[thread] + offset); +} + +static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread, + u32 offset) +{ + return readl_relaxed(priv->swth_base[thread] + offset); +} + +static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc) +{ + if (port->priv->hw_version == MVPP21) + return le32_to_cpu(tx_desc->pp21.buf_dma_addr); + else + return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) & + MVPP2_DESC_DMA_MASK; +} + +static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + dma_addr_t dma_addr) +{ + dma_addr_t addr, offset; + + addr = dma_addr & ~MVPP2_TX_DESC_ALIGN; + offset = dma_addr & MVPP2_TX_DESC_ALIGN; + + if (port->priv->hw_version == MVPP21) { + tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr); + tx_desc->pp21.packet_offset = offset; + } else { + __le64 val = cpu_to_le64(addr); + + tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK); + tx_desc->pp22.buf_dma_addr_ptp |= val; + tx_desc->pp22.packet_offset = offset; + } +} + +static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc) +{ + if (port->priv->hw_version == MVPP21) + return le16_to_cpu(tx_desc->pp21.data_size); + else + return le16_to_cpu(tx_desc->pp22.data_size); +} + +static void mvpp2_txdesc_size_set(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + size_t size) +{ + if (port->priv->hw_version == MVPP21) + tx_desc->pp21.data_size = cpu_to_le16(size); + else + tx_desc->pp22.data_size = cpu_to_le16(size); +} + +static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + unsigned int txq) +{ + if (port->priv->hw_version == MVPP21) + tx_desc->pp21.phys_txq = txq; + else + tx_desc->pp22.phys_txq = txq; +} + +static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + unsigned int command) +{ + if (port->priv->hw_version == MVPP21) + tx_desc->pp21.command = cpu_to_le32(command); + else + tx_desc->pp22.command = cpu_to_le32(command); +} + +static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc) +{ + if (port->priv->hw_version == MVPP21) + return tx_desc->pp21.packet_offset; + else + return tx_desc->pp22.packet_offset; +} + +static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + if (port->priv->hw_version == MVPP21) + return le32_to_cpu(rx_desc->pp21.buf_dma_addr); + else + return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) & + MVPP2_DESC_DMA_MASK; +} + +static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + if (port->priv->hw_version == MVPP21) + return le32_to_cpu(rx_desc->pp21.buf_cookie); + else + return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) & + MVPP2_DESC_DMA_MASK; +} + +static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + if (port->priv->hw_version == MVPP21) + return le16_to_cpu(rx_desc->pp21.data_size); + else + return le16_to_cpu(rx_desc->pp22.data_size); +} + +static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + if (port->priv->hw_version == MVPP21) + return le32_to_cpu(rx_desc->pp21.status); + else + return le32_to_cpu(rx_desc->pp22.status); +} + +static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) +{ + txq_pcpu->txq_get_index++; + if (txq_pcpu->txq_get_index == txq_pcpu->size) + txq_pcpu->txq_get_index = 0; +} + +static void mvpp2_txq_inc_put(struct mvpp2_port *port, + struct mvpp2_txq_pcpu *txq_pcpu, + void *data, + struct mvpp2_tx_desc *tx_desc, + enum mvpp2_tx_buf_type buf_type) +{ + struct mvpp2_txq_pcpu_buf *tx_buf = + txq_pcpu->buffs + txq_pcpu->txq_put_index; + tx_buf->type = buf_type; + if (buf_type == MVPP2_TYPE_SKB) + tx_buf->skb = data; + else + tx_buf->xdpf = data; + tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc); + tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) + + mvpp2_txdesc_offset_get(port, tx_desc); + txq_pcpu->txq_put_index++; + if (txq_pcpu->txq_put_index == txq_pcpu->size) + txq_pcpu->txq_put_index = 0; +} + +/* Get number of maximum RXQ */ +static int mvpp2_get_nrxqs(struct mvpp2 *priv) +{ + unsigned int nrxqs; + + if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE) + return 1; + + /* According to the PPv2.2 datasheet and our experiments on + * PPv2.1, RX queues have an allocation granularity of 4 (when + * more than a single one on PPv2.2). + * Round up to nearest multiple of 4. + */ + nrxqs = (num_possible_cpus() + 3) & ~0x3; + if (nrxqs > MVPP2_PORT_MAX_RXQ) + nrxqs = MVPP2_PORT_MAX_RXQ; + + return nrxqs; +} + +/* Get number of physical egress port */ +static inline int mvpp2_egress_port(struct mvpp2_port *port) +{ + return MVPP2_MAX_TCONT + port->id; +} + +/* Get number of physical TXQ */ +static inline int mvpp2_txq_phys(int port, int txq) +{ + return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; +} + +/* Returns a struct page if page_pool is set, otherwise a buffer */ +static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool, + struct page_pool *page_pool) +{ + if (page_pool) + return page_pool_dev_alloc_pages(page_pool); + + if (likely(pool->frag_size <= PAGE_SIZE)) + return netdev_alloc_frag(pool->frag_size); + + return kmalloc(pool->frag_size, GFP_ATOMIC); +} + +static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, + struct page_pool *page_pool, void *data) +{ + if (page_pool) + page_pool_put_full_page(page_pool, virt_to_head_page(data), false); + else if (likely(pool->frag_size <= PAGE_SIZE)) + skb_free_frag(data); + else + kfree(data); +} + +/* Buffer Manager configuration routines */ + +/* Create pool */ +static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv, + struct mvpp2_bm_pool *bm_pool, int size) +{ + u32 val; + + /* Number of buffer pointers must be a multiple of 16, as per + * hardware constraints + */ + if (!IS_ALIGNED(size, 16)) + return -EINVAL; + + /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16 + * bytes per buffer pointer + */ + if (priv->hw_version == MVPP21) + bm_pool->size_bytes = 2 * sizeof(u32) * size; + else + bm_pool->size_bytes = 2 * sizeof(u64) * size; + + bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes, + &bm_pool->dma_addr, + GFP_KERNEL); + if (!bm_pool->virt_addr) + return -ENOMEM; + + if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, + MVPP2_BM_POOL_PTR_ALIGN)) { + dma_free_coherent(dev, bm_pool->size_bytes, + bm_pool->virt_addr, bm_pool->dma_addr); + dev_err(dev, "BM pool %d is not %d bytes aligned\n", + bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); + return -ENOMEM; + } + + mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), + lower_32_bits(bm_pool->dma_addr)); + mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); + + val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); + val |= MVPP2_BM_START_MASK; + mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); + + bm_pool->size = size; + bm_pool->pkt_size = 0; + bm_pool->buf_num = 0; + + return 0; +} + +/* Set pool buffer size */ +static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, + struct mvpp2_bm_pool *bm_pool, + int buf_size) +{ + u32 val; + + bm_pool->buf_size = buf_size; + + val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); + mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); +} + +static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, + struct mvpp2_bm_pool *bm_pool, + dma_addr_t *dma_addr, + phys_addr_t *phys_addr) +{ + unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu()); + + *dma_addr = mvpp2_thread_read(priv, thread, + MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); + *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG); + + if (priv->hw_version == MVPP22) { + u32 val; + u32 dma_addr_highbits, phys_addr_highbits; + + val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC); + dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK); + phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >> + MVPP22_BM_ADDR_HIGH_VIRT_SHIFT; + + if (sizeof(dma_addr_t) == 8) + *dma_addr |= (u64)dma_addr_highbits << 32; + + if (sizeof(phys_addr_t) == 8) + *phys_addr |= (u64)phys_addr_highbits << 32; + } + + put_cpu(); +} + +/* Free all buffers from the pool */ +static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, + struct mvpp2_bm_pool *bm_pool, int buf_num) +{ + struct page_pool *pp = NULL; + int i; + + if (buf_num > bm_pool->buf_num) { + WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n", + bm_pool->id, buf_num); + buf_num = bm_pool->buf_num; + } + + if (priv->percpu_pools) + pp = priv->page_pool[bm_pool->id]; + + for (i = 0; i < buf_num; i++) { + dma_addr_t buf_dma_addr; + phys_addr_t buf_phys_addr; + void *data; + + mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool, + &buf_dma_addr, &buf_phys_addr); + + if (!pp) + dma_unmap_single(dev, buf_dma_addr, + bm_pool->buf_size, DMA_FROM_DEVICE); + + data = (void *)phys_to_virt(buf_phys_addr); + if (!data) + break; + + mvpp2_frag_free(bm_pool, pp, data); + } + + /* Update BM driver with number of buffers removed from pool */ + bm_pool->buf_num -= i; +} + +/* Check number of buffers in BM pool */ +static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) +{ + int buf_num = 0; + + buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) & + MVPP22_BM_POOL_PTRS_NUM_MASK; + buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) & + MVPP2_BM_BPPI_PTR_NUM_MASK; + + /* HW has one buffer ready which is not reflected in the counters */ + if (buf_num) + buf_num += 1; + + return buf_num; +} + +/* Cleanup pool */ +static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv, + struct mvpp2_bm_pool *bm_pool) +{ + int buf_num; + u32 val; + + buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); + mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num); + + /* Check buffer counters after free */ + buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); + if (buf_num) { + WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n", + bm_pool->id, bm_pool->buf_num); + return 0; + } + + val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); + val |= MVPP2_BM_STOP_MASK; + mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); + + if (priv->percpu_pools) { + page_pool_destroy(priv->page_pool[bm_pool->id]); + priv->page_pool[bm_pool->id] = NULL; + } + + dma_free_coherent(dev, bm_pool->size_bytes, + bm_pool->virt_addr, + bm_pool->dma_addr); + return 0; +} + +static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv) +{ + int i, err, size, poolnum = MVPP2_BM_POOLS_NUM; + struct mvpp2_bm_pool *bm_pool; + + if (priv->percpu_pools) + poolnum = mvpp2_get_nrxqs(priv) * 2; + + /* Create all pools with maximum size */ + size = MVPP2_BM_POOL_SIZE_MAX; + for (i = 0; i < poolnum; i++) { + bm_pool = &priv->bm_pools[i]; + bm_pool->id = i; + err = mvpp2_bm_pool_create(dev, priv, bm_pool, size); + if (err) + goto err_unroll_pools; + mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0); + } + return 0; + +err_unroll_pools: + dev_err(dev, "failed to create BM pool %d, size %d\n", i, size); + for (i = i - 1; i >= 0; i--) + mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]); + return err; +} + +static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv) +{ + enum dma_data_direction dma_dir = DMA_FROM_DEVICE; + int i, err, poolnum = MVPP2_BM_POOLS_NUM; + struct mvpp2_port *port; + + if (priv->percpu_pools) { + for (i = 0; i < priv->port_count; i++) { + port = priv->port_list[i]; + if (port->xdp_prog) { + dma_dir = DMA_BIDIRECTIONAL; + break; + } + } + + poolnum = mvpp2_get_nrxqs(priv) * 2; + for (i = 0; i < poolnum; i++) { + /* the pool in use */ + int pn = i / (poolnum / 2); + + priv->page_pool[i] = + mvpp2_create_page_pool(dev, + mvpp2_pools[pn].buf_num, + mvpp2_pools[pn].pkt_size, + dma_dir); + if (IS_ERR(priv->page_pool[i])) { + int j; + + for (j = 0; j < i; j++) { + page_pool_destroy(priv->page_pool[j]); + priv->page_pool[j] = NULL; + } + return PTR_ERR(priv->page_pool[i]); + } + } + } + + dev_info(dev, "using %d %s buffers\n", poolnum, + priv->percpu_pools ? "per-cpu" : "shared"); + + for (i = 0; i < poolnum; i++) { + /* Mask BM all interrupts */ + mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0); + /* Clear BM cause register */ + mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0); + } + + /* Allocate and initialize BM pools */ + priv->bm_pools = devm_kcalloc(dev, poolnum, + sizeof(*priv->bm_pools), GFP_KERNEL); + if (!priv->bm_pools) + return -ENOMEM; + + err = mvpp2_bm_pools_init(dev, priv); + if (err < 0) + return err; + return 0; +} + +static void mvpp2_setup_bm_pool(void) +{ + /* Short pool */ + mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM; + mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE; + + /* Long pool */ + mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM; + mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE; + + /* Jumbo pool */ + mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM; + mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE; +} + +/* Attach long pool to rxq */ +static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, + int lrxq, int long_pool) +{ + u32 val, mask; + int prxq; + + /* Get queue physical ID */ + prxq = port->rxqs[lrxq]->id; + + if (port->priv->hw_version == MVPP21) + mask = MVPP21_RXQ_POOL_LONG_MASK; + else + mask = MVPP22_RXQ_POOL_LONG_MASK; + + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); + val &= ~mask; + val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask; + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); +} + +/* Attach short pool to rxq */ +static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port, + int lrxq, int short_pool) +{ + u32 val, mask; + int prxq; + + /* Get queue physical ID */ + prxq = port->rxqs[lrxq]->id; + + if (port->priv->hw_version == MVPP21) + mask = MVPP21_RXQ_POOL_SHORT_MASK; + else + mask = MVPP22_RXQ_POOL_SHORT_MASK; + + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); + val &= ~mask; + val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask; + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); +} + +static void *mvpp2_buf_alloc(struct mvpp2_port *port, + struct mvpp2_bm_pool *bm_pool, + struct page_pool *page_pool, + dma_addr_t *buf_dma_addr, + phys_addr_t *buf_phys_addr, + gfp_t gfp_mask) +{ + dma_addr_t dma_addr; + struct page *page; + void *data; + + data = mvpp2_frag_alloc(bm_pool, page_pool); + if (!data) + return NULL; + + if (page_pool) { + page = (struct page *)data; + dma_addr = page_pool_get_dma_addr(page); + data = page_to_virt(page); + } else { + dma_addr = dma_map_single(port->dev->dev.parent, data, + MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { + mvpp2_frag_free(bm_pool, NULL, data); + return NULL; + } + } + *buf_dma_addr = dma_addr; + *buf_phys_addr = virt_to_phys(data); + + return data; +} + +/* Release buffer to BM */ +static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, + dma_addr_t buf_dma_addr, + phys_addr_t buf_phys_addr) +{ + unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + unsigned long flags = 0; + + if (test_bit(thread, &port->priv->lock_map)) + spin_lock_irqsave(&port->bm_lock[thread], flags); + + if (port->priv->hw_version == MVPP22) { + u32 val = 0; + + if (sizeof(dma_addr_t) == 8) + val |= upper_32_bits(buf_dma_addr) & + MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK; + + if (sizeof(phys_addr_t) == 8) + val |= (upper_32_bits(buf_phys_addr) + << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & + MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; + + mvpp2_thread_write_relaxed(port->priv, thread, + MVPP22_BM_ADDR_HIGH_RLS_REG, val); + } + + /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply + * returned in the "cookie" field of the RX + * descriptor. Instead of storing the virtual address, we + * store the physical address + */ + mvpp2_thread_write_relaxed(port->priv, thread, + MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); + mvpp2_thread_write_relaxed(port->priv, thread, + MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); + + if (test_bit(thread, &port->priv->lock_map)) + spin_unlock_irqrestore(&port->bm_lock[thread], flags); + + put_cpu(); +} + +/* Allocate buffers for the pool */ +static int mvpp2_bm_bufs_add(struct mvpp2_port *port, + struct mvpp2_bm_pool *bm_pool, int buf_num) +{ + int i, buf_size, total_size; + dma_addr_t dma_addr; + phys_addr_t phys_addr; + struct page_pool *pp = NULL; + void *buf; + + if (port->priv->percpu_pools && + bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { + netdev_err(port->dev, + "attempted to use jumbo frames with per-cpu pools"); + return 0; + } + + buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size); + total_size = MVPP2_RX_TOTAL_SIZE(buf_size); + + if (buf_num < 0 || + (buf_num + bm_pool->buf_num > bm_pool->size)) { + netdev_err(port->dev, + "cannot allocate %d buffers for pool %d\n", + buf_num, bm_pool->id); + return 0; + } + + if (port->priv->percpu_pools) + pp = port->priv->page_pool[bm_pool->id]; + for (i = 0; i < buf_num; i++) { + buf = mvpp2_buf_alloc(port, bm_pool, pp, &dma_addr, + &phys_addr, GFP_KERNEL); + if (!buf) + break; + + mvpp2_bm_pool_put(port, bm_pool->id, dma_addr, + phys_addr); + } + + /* Update BM driver with number of buffers added to pool */ + bm_pool->buf_num += i; + + netdev_dbg(port->dev, + "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n", + bm_pool->id, bm_pool->pkt_size, buf_size, total_size); + + netdev_dbg(port->dev, + "pool %d: %d of %d buffers added\n", + bm_pool->id, i, buf_num); + return i; +} + +/* Notify the driver that BM pool is being used as specific type and return the + * pool pointer on success + */ +static struct mvpp2_bm_pool * +mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size) +{ + struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; + int num; + + if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) || + (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) { + netdev_err(port->dev, "Invalid pool %d\n", pool); + return NULL; + } + + /* Allocate buffers in case BM pool is used as long pool, but packet + * size doesn't match MTU or BM pool hasn't being used yet + */ + if (new_pool->pkt_size == 0) { + int pkts_num; + + /* Set default buffer number or free all the buffers in case + * the pool is not empty + */ + pkts_num = new_pool->buf_num; + if (pkts_num == 0) { + if (port->priv->percpu_pools) { + if (pool < port->nrxqs) + pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num; + else + pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num; + } else { + pkts_num = mvpp2_pools[pool].buf_num; + } + } else { + mvpp2_bm_bufs_free(port->dev->dev.parent, + port->priv, new_pool, pkts_num); + } + + new_pool->pkt_size = pkt_size; + new_pool->frag_size = + SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + + MVPP2_SKB_SHINFO_SIZE; + + /* Allocate buffers for this pool */ + num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); + if (num != pkts_num) { + WARN(1, "pool %d: %d of %d allocated\n", + new_pool->id, num, pkts_num); + return NULL; + } + } + + mvpp2_bm_pool_bufsize_set(port->priv, new_pool, + MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); + + return new_pool; +} + +static struct mvpp2_bm_pool * +mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type, + unsigned int pool, int pkt_size) +{ + struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; + int num; + + if (pool > port->nrxqs * 2) { + netdev_err(port->dev, "Invalid pool %d\n", pool); + return NULL; + } + + /* Allocate buffers in case BM pool is used as long pool, but packet + * size doesn't match MTU or BM pool hasn't being used yet + */ + if (new_pool->pkt_size == 0) { + int pkts_num; + + /* Set default buffer number or free all the buffers in case + * the pool is not empty + */ + pkts_num = new_pool->buf_num; + if (pkts_num == 0) + pkts_num = mvpp2_pools[type].buf_num; + else + mvpp2_bm_bufs_free(port->dev->dev.parent, + port->priv, new_pool, pkts_num); + + new_pool->pkt_size = pkt_size; + new_pool->frag_size = + SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + + MVPP2_SKB_SHINFO_SIZE; + + /* Allocate buffers for this pool */ + num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); + if (num != pkts_num) { + WARN(1, "pool %d: %d of %d allocated\n", + new_pool->id, num, pkts_num); + return NULL; + } + } + + mvpp2_bm_pool_bufsize_set(port->priv, new_pool, + MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); + + return new_pool; +} + +/* Initialize pools for swf, shared buffers variant */ +static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port) +{ + enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool; + int rxq; + + /* If port pkt_size is higher than 1518B: + * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool + * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool + */ + if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { + long_log_pool = MVPP2_BM_JUMBO; + short_log_pool = MVPP2_BM_LONG; + } else { + long_log_pool = MVPP2_BM_LONG; + short_log_pool = MVPP2_BM_SHORT; + } + + if (!port->pool_long) { + port->pool_long = + mvpp2_bm_pool_use(port, long_log_pool, + mvpp2_pools[long_log_pool].pkt_size); + if (!port->pool_long) + return -ENOMEM; + + port->pool_long->port_map |= BIT(port->id); + + for (rxq = 0; rxq < port->nrxqs; rxq++) + mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); + } + + if (!port->pool_short) { + port->pool_short = + mvpp2_bm_pool_use(port, short_log_pool, + mvpp2_pools[short_log_pool].pkt_size); + if (!port->pool_short) + return -ENOMEM; + + port->pool_short->port_map |= BIT(port->id); + + for (rxq = 0; rxq < port->nrxqs; rxq++) + mvpp2_rxq_short_pool_set(port, rxq, + port->pool_short->id); + } + + return 0; +} + +/* Initialize pools for swf, percpu buffers variant */ +static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port) +{ + struct mvpp2_bm_pool *bm_pool; + int i; + + for (i = 0; i < port->nrxqs; i++) { + bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i, + mvpp2_pools[MVPP2_BM_SHORT].pkt_size); + if (!bm_pool) + return -ENOMEM; + + bm_pool->port_map |= BIT(port->id); + mvpp2_rxq_short_pool_set(port, i, bm_pool->id); + } + + for (i = 0; i < port->nrxqs; i++) { + bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs, + mvpp2_pools[MVPP2_BM_LONG].pkt_size); + if (!bm_pool) + return -ENOMEM; + + bm_pool->port_map |= BIT(port->id); + mvpp2_rxq_long_pool_set(port, i, bm_pool->id); + } + + port->pool_long = NULL; + port->pool_short = NULL; + + return 0; +} + +static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) +{ + if (port->priv->percpu_pools) + return mvpp2_swf_bm_pool_init_percpu(port); + else + return mvpp2_swf_bm_pool_init_shared(port); +} + +static void mvpp2_set_hw_csum(struct mvpp2_port *port, + enum mvpp2_bm_pool_log_num new_long_pool) +{ + const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + + /* Update L4 checksum when jumbo enable/disable on port. + * Only port 0 supports hardware checksum offload due to + * the Tx FIFO size limitation. + * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor + * has 7 bits, so the maximum L3 offset is 128. + */ + if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { + port->dev->features &= ~csums; + port->dev->hw_features &= ~csums; + } else { + port->dev->features |= csums; + port->dev->hw_features |= csums; + } +} + +static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) +{ + struct mvpp2_port *port = netdev_priv(dev); + enum mvpp2_bm_pool_log_num new_long_pool; + int pkt_size = MVPP2_RX_PKT_SIZE(mtu); + + if (port->priv->percpu_pools) + goto out_set; + + /* If port MTU is higher than 1518B: + * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool + * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool + */ + if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) + new_long_pool = MVPP2_BM_JUMBO; + else + new_long_pool = MVPP2_BM_LONG; + + if (new_long_pool != port->pool_long->id) { + /* Remove port from old short & long pool */ + port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id, + port->pool_long->pkt_size); + port->pool_long->port_map &= ~BIT(port->id); + port->pool_long = NULL; + + port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id, + port->pool_short->pkt_size); + port->pool_short->port_map &= ~BIT(port->id); + port->pool_short = NULL; + + port->pkt_size = pkt_size; + + /* Add port to new short & long pool */ + mvpp2_swf_bm_pool_init(port); + + mvpp2_set_hw_csum(port, new_long_pool); + } + +out_set: + dev->mtu = mtu; + dev->wanted_features = dev->features; + + netdev_update_features(dev); + return 0; +} + +static inline void mvpp2_interrupts_enable(struct mvpp2_port *port) +{ + int i, sw_thread_mask = 0; + + for (i = 0; i < port->nqvecs; i++) + sw_thread_mask |= port->qvecs[i].sw_thread_mask; + + mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), + MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask)); +} + +static inline void mvpp2_interrupts_disable(struct mvpp2_port *port) +{ + int i, sw_thread_mask = 0; + + for (i = 0; i < port->nqvecs; i++) + sw_thread_mask |= port->qvecs[i].sw_thread_mask; + + mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), + MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask)); +} + +static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec) +{ + struct mvpp2_port *port = qvec->port; + + mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), + MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask)); +} + +static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec) +{ + struct mvpp2_port *port = qvec->port; + + mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), + MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask)); +} + +/* Mask the current thread's Rx/Tx interrupts + * Called by on_each_cpu(), guaranteed to run with migration disabled, + * using smp_processor_id() is OK. + */ +static void mvpp2_interrupts_mask(void *arg) +{ + struct mvpp2_port *port = arg; + + /* If the thread isn't used, don't do anything */ + if (smp_processor_id() > port->priv->nthreads) + return; + + mvpp2_thread_write(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), + MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); +} + +/* Unmask the current thread's Rx/Tx interrupts. + * Called by on_each_cpu(), guaranteed to run with migration disabled, + * using smp_processor_id() is OK. + */ +static void mvpp2_interrupts_unmask(void *arg) +{ + struct mvpp2_port *port = arg; + u32 val; + + /* If the thread isn't used, don't do anything */ + if (smp_processor_id() >= port->priv->nthreads) + return; + + val = MVPP2_CAUSE_MISC_SUM_MASK | + MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); + if (port->has_tx_irqs) + val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; + + mvpp2_thread_write(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), + MVPP2_ISR_RX_TX_MASK_REG(port->id), val); +} + +static void +mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) +{ + u32 val; + int i; + + if (port->priv->hw_version != MVPP22) + return; + + if (mask) + val = 0; + else + val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22); + + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *v = port->qvecs + i; + + if (v->type != MVPP2_QUEUE_VECTOR_SHARED) + continue; + + mvpp2_thread_write(port->priv, v->sw_thread_id, + MVPP2_ISR_RX_TX_MASK_REG(port->id), val); + } +} + +/* Only GOP port 0 has an XLG MAC */ +static bool mvpp2_port_supports_xlg(struct mvpp2_port *port) +{ + return port->gop_id == 0; +} + +static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port) +{ + return !(port->priv->hw_version == MVPP22 && port->gop_id == 0); +} + +/* Port configuration routines */ +static bool mvpp2_is_xlg(phy_interface_t interface) +{ + return interface == PHY_INTERFACE_MODE_10GBASER || + interface == PHY_INTERFACE_MODE_XAUI; +} + +static void mvpp2_modify(void __iomem *ptr, u32 mask, u32 set) +{ + u32 old, val; + + old = val = readl(ptr); + val &= ~mask; + val |= set; + if (old != val) + writel(val, ptr); +} + +static void mvpp22_gop_init_rgmii(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + u32 val; + + regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); + val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT; + regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); + + regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); + if (port->gop_id == 2) + val |= GENCONF_CTRL0_PORT0_RGMII; + else if (port->gop_id == 3) + val |= GENCONF_CTRL0_PORT1_RGMII_MII; + regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); +} + +static void mvpp22_gop_init_sgmii(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + u32 val; + + regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); + val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT | + GENCONF_PORT_CTRL0_RX_DATA_SAMPLE; + regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); + + if (port->gop_id > 1) { + regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); + if (port->gop_id == 2) + val &= ~GENCONF_CTRL0_PORT0_RGMII; + else if (port->gop_id == 3) + val &= ~GENCONF_CTRL0_PORT1_RGMII_MII; + regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); + } +} + +static void mvpp22_gop_init_10gkr(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); + void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); + u32 val; + + val = readl(xpcs + MVPP22_XPCS_CFG0); + val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) | + MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3)); + val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2); + writel(val, xpcs + MVPP22_XPCS_CFG0); + + val = readl(mpcs + MVPP22_MPCS_CTRL); + val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN; + writel(val, mpcs + MVPP22_MPCS_CTRL); + + val = readl(mpcs + MVPP22_MPCS_CLK_RESET); + val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7); + val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1); + writel(val, mpcs + MVPP22_MPCS_CLK_RESET); +} + +static int mvpp22_gop_init(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + u32 val; + + if (!priv->sysctrl_base) + return 0; + + switch (port->phy_interface) { + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + if (!mvpp2_port_supports_rgmii(port)) + goto invalid_conf; + mvpp22_gop_init_rgmii(port); + break; + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + mvpp22_gop_init_sgmii(port); + break; + case PHY_INTERFACE_MODE_10GBASER: + if (!mvpp2_port_supports_xlg(port)) + goto invalid_conf; + mvpp22_gop_init_10gkr(port); + break; + default: + goto unsupported_conf; + } + + regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val); + val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) | + GENCONF_PORT_CTRL1_EN(port->gop_id); + regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val); + + regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); + val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR; + regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); + + regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val); + val |= GENCONF_SOFT_RESET1_GOP; + regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val); + +unsupported_conf: + return 0; + +invalid_conf: + netdev_err(port->dev, "Invalid port configuration\n"); + return -EINVAL; +} + +static void mvpp22_gop_unmask_irq(struct mvpp2_port *port) +{ + u32 val; + + if (phy_interface_mode_is_rgmii(port->phy_interface) || + phy_interface_mode_is_8023z(port->phy_interface) || + port->phy_interface == PHY_INTERFACE_MODE_SGMII) { + /* Enable the GMAC link status irq for this port */ + val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); + val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; + writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); + } + + if (mvpp2_port_supports_xlg(port)) { + /* Enable the XLG/GIG irqs for this port */ + val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); + if (mvpp2_is_xlg(port->phy_interface)) + val |= MVPP22_XLG_EXT_INT_MASK_XLG; + else + val |= MVPP22_XLG_EXT_INT_MASK_GIG; + writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); + } +} + +static void mvpp22_gop_mask_irq(struct mvpp2_port *port) +{ + u32 val; + + if (mvpp2_port_supports_xlg(port)) { + val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); + val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG | + MVPP22_XLG_EXT_INT_MASK_GIG); + writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); + } + + if (phy_interface_mode_is_rgmii(port->phy_interface) || + phy_interface_mode_is_8023z(port->phy_interface) || + port->phy_interface == PHY_INTERFACE_MODE_SGMII) { + val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); + val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; + writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); + } +} + +static void mvpp22_gop_setup_irq(struct mvpp2_port *port) +{ + u32 val; + + mvpp2_modify(port->base + MVPP22_GMAC_INT_SUM_MASK, + MVPP22_GMAC_INT_SUM_MASK_PTP, + MVPP22_GMAC_INT_SUM_MASK_PTP); + + if (port->phylink || + phy_interface_mode_is_rgmii(port->phy_interface) || + phy_interface_mode_is_8023z(port->phy_interface) || + port->phy_interface == PHY_INTERFACE_MODE_SGMII) { + val = readl(port->base + MVPP22_GMAC_INT_MASK); + val |= MVPP22_GMAC_INT_MASK_LINK_STAT; + writel(val, port->base + MVPP22_GMAC_INT_MASK); + } + + if (mvpp2_port_supports_xlg(port)) { + val = readl(port->base + MVPP22_XLG_INT_MASK); + val |= MVPP22_XLG_INT_MASK_LINK; + writel(val, port->base + MVPP22_XLG_INT_MASK); + + mvpp2_modify(port->base + MVPP22_XLG_EXT_INT_MASK, + MVPP22_XLG_EXT_INT_MASK_PTP, + MVPP22_XLG_EXT_INT_MASK_PTP); + } + + mvpp22_gop_unmask_irq(port); +} + +/* Sets the PHY mode of the COMPHY (which configures the serdes lanes). + * + * The PHY mode used by the PPv2 driver comes from the network subsystem, while + * the one given to the COMPHY comes from the generic PHY subsystem. Hence they + * differ. + * + * The COMPHY configures the serdes lanes regardless of the actual use of the + * lanes by the physical layer. This is why configurations like + * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid. + */ +static int mvpp22_comphy_init(struct mvpp2_port *port) +{ + int ret; + + if (!port->comphy) + return 0; + + ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET, + port->phy_interface); + if (ret) + return ret; + + return phy_power_on(port->comphy); +} + +static void mvpp2_port_enable(struct mvpp2_port *port) +{ + u32 val; + + if (mvpp2_port_supports_xlg(port) && + mvpp2_is_xlg(port->phy_interface)) { + val = readl(port->base + MVPP22_XLG_CTRL0_REG); + val |= MVPP22_XLG_CTRL0_PORT_EN; + val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS; + writel(val, port->base + MVPP22_XLG_CTRL0_REG); + } else { + val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + val |= MVPP2_GMAC_PORT_EN_MASK; + val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); + } +} + +static void mvpp2_port_disable(struct mvpp2_port *port) +{ + u32 val; + + if (mvpp2_port_supports_xlg(port) && + mvpp2_is_xlg(port->phy_interface)) { + val = readl(port->base + MVPP22_XLG_CTRL0_REG); + val &= ~MVPP22_XLG_CTRL0_PORT_EN; + writel(val, port->base + MVPP22_XLG_CTRL0_REG); + } + + val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + val &= ~(MVPP2_GMAC_PORT_EN_MASK); + writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); +} + +/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ +static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) +{ + u32 val; + + val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) & + ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); +} + +/* Configure loopback port */ +static void mvpp2_port_loopback_set(struct mvpp2_port *port, + const struct phylink_link_state *state) +{ + u32 val; + + val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); + + if (state->speed == 1000) + val |= MVPP2_GMAC_GMII_LB_EN_MASK; + else + val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; + + if (phy_interface_mode_is_8023z(state->interface) || + state->interface == PHY_INTERFACE_MODE_SGMII) + val |= MVPP2_GMAC_PCS_LB_EN_MASK; + else + val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; + + writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); +} + +enum { + ETHTOOL_XDP_REDIRECT, + ETHTOOL_XDP_PASS, + ETHTOOL_XDP_DROP, + ETHTOOL_XDP_TX, + ETHTOOL_XDP_TX_ERR, + ETHTOOL_XDP_XMIT, + ETHTOOL_XDP_XMIT_ERR, +}; + +struct mvpp2_ethtool_counter { + unsigned int offset; + const char string[ETH_GSTRING_LEN]; + bool reg_is_64b; +}; + +static u64 mvpp2_read_count(struct mvpp2_port *port, + const struct mvpp2_ethtool_counter *counter) +{ + u64 val; + + val = readl(port->stats_base + counter->offset); + if (counter->reg_is_64b) + val += (u64)readl(port->stats_base + counter->offset + 4) << 32; + + return val; +} + +/* Some counters are accessed indirectly by first writing an index to + * MVPP2_CTRS_IDX. The index can represent various resources depending on the + * register we access, it can be a hit counter for some classification tables, + * a counter specific to a rxq, a txq or a buffer pool. + */ +static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg) +{ + mvpp2_write(priv, MVPP2_CTRS_IDX, index); + return mvpp2_read(priv, reg); +} + +/* Due to the fact that software statistics and hardware statistics are, by + * design, incremented at different moments in the chain of packet processing, + * it is very likely that incoming packets could have been dropped after being + * counted by hardware but before reaching software statistics (most probably + * multicast packets), and in the oppposite way, during transmission, FCS bytes + * are added in between as well as TSO skb will be split and header bytes added. + * Hence, statistics gathered from userspace with ifconfig (software) and + * ethtool (hardware) cannot be compared. + */ +static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = { + { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true }, + { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" }, + { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" }, + { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" }, + { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" }, + { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" }, + { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" }, + { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" }, + { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" }, + { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" }, + { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" }, + { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" }, + { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true }, + { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" }, + { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" }, + { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" }, + { MVPP2_MIB_FC_SENT, "fc_sent" }, + { MVPP2_MIB_FC_RCVD, "fc_received" }, + { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" }, + { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" }, + { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" }, + { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" }, + { MVPP2_MIB_JABBER_RCVD, "jabber_received" }, + { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" }, + { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" }, + { MVPP2_MIB_COLLISION, "collision" }, + { MVPP2_MIB_LATE_COLLISION, "late_collision" }, +}; + +static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = { + { MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" }, + { MVPP2_CLS_ETH_DROP, "rx_classifier_drops" }, +}; + +static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = { + { MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" }, + { MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" }, + { MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" }, + { MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" }, + { MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" }, + { MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" }, + { MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" }, + { MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" }, + { MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" }, +}; + +static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = { + { MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" }, + { MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" }, + { MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" }, + { MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" }, +}; + +static const struct mvpp2_ethtool_counter mvpp2_ethtool_xdp[] = { + { ETHTOOL_XDP_REDIRECT, "rx_xdp_redirect", }, + { ETHTOOL_XDP_PASS, "rx_xdp_pass", }, + { ETHTOOL_XDP_DROP, "rx_xdp_drop", }, + { ETHTOOL_XDP_TX, "rx_xdp_tx", }, + { ETHTOOL_XDP_TX_ERR, "rx_xdp_tx_errors", }, + { ETHTOOL_XDP_XMIT, "tx_xdp_xmit", }, + { ETHTOOL_XDP_XMIT_ERR, "tx_xdp_xmit_errors", }, +}; + +#define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs) (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \ + ARRAY_SIZE(mvpp2_ethtool_port_regs) + \ + (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \ + (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)) + \ + ARRAY_SIZE(mvpp2_ethtool_xdp)) + +static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset, + u8 *data) +{ + struct mvpp2_port *port = netdev_priv(netdev); + int i, q; + + if (sset != ETH_SS_STATS) + return; + + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) { + strscpy(data, mvpp2_ethtool_mib_regs[i].string, + ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) { + strscpy(data, mvpp2_ethtool_port_regs[i].string, + ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + + for (q = 0; q < port->ntxqs; q++) { + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) { + snprintf(data, ETH_GSTRING_LEN, + mvpp2_ethtool_txq_regs[i].string, q); + data += ETH_GSTRING_LEN; + } + } + + for (q = 0; q < port->nrxqs; q++) { + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) { + snprintf(data, ETH_GSTRING_LEN, + mvpp2_ethtool_rxq_regs[i].string, + q); + data += ETH_GSTRING_LEN; + } + } + + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++) { + strscpy(data, mvpp2_ethtool_xdp[i].string, + ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } +} + +static void +mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats) +{ + unsigned int start; + unsigned int cpu; + + /* Gather XDP Statistics */ + for_each_possible_cpu(cpu) { + struct mvpp2_pcpu_stats *cpu_stats; + u64 xdp_redirect; + u64 xdp_pass; + u64 xdp_drop; + u64 xdp_xmit; + u64 xdp_xmit_err; + u64 xdp_tx; + u64 xdp_tx_err; + + cpu_stats = per_cpu_ptr(port->stats, cpu); + do { + start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); + xdp_redirect = cpu_stats->xdp_redirect; + xdp_pass = cpu_stats->xdp_pass; + xdp_drop = cpu_stats->xdp_drop; + xdp_xmit = cpu_stats->xdp_xmit; + xdp_xmit_err = cpu_stats->xdp_xmit_err; + xdp_tx = cpu_stats->xdp_tx; + xdp_tx_err = cpu_stats->xdp_tx_err; + } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); + + xdp_stats->xdp_redirect += xdp_redirect; + xdp_stats->xdp_pass += xdp_pass; + xdp_stats->xdp_drop += xdp_drop; + xdp_stats->xdp_xmit += xdp_xmit; + xdp_stats->xdp_xmit_err += xdp_xmit_err; + xdp_stats->xdp_tx += xdp_tx; + xdp_stats->xdp_tx_err += xdp_tx_err; + } +} + +static void mvpp2_read_stats(struct mvpp2_port *port) +{ + struct mvpp2_pcpu_stats xdp_stats = {}; + const struct mvpp2_ethtool_counter *s; + u64 *pstats; + int i, q; + + pstats = port->ethtool_stats; + + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) + *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]); + + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) + *pstats++ += mvpp2_read(port->priv, + mvpp2_ethtool_port_regs[i].offset + + 4 * port->id); + + for (q = 0; q < port->ntxqs; q++) + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) + *pstats++ += mvpp2_read_index(port->priv, + MVPP22_CTRS_TX_CTR(port->id, q), + mvpp2_ethtool_txq_regs[i].offset); + + /* Rxqs are numbered from 0 from the user standpoint, but not from the + * driver's. We need to add the port->first_rxq offset. + */ + for (q = 0; q < port->nrxqs; q++) + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) + *pstats++ += mvpp2_read_index(port->priv, + port->first_rxq + q, + mvpp2_ethtool_rxq_regs[i].offset); + + /* Gather XDP Statistics */ + mvpp2_get_xdp_stats(port, &xdp_stats); + + for (i = 0, s = mvpp2_ethtool_xdp; + s < mvpp2_ethtool_xdp + ARRAY_SIZE(mvpp2_ethtool_xdp); + s++, i++) { + switch (s->offset) { + case ETHTOOL_XDP_REDIRECT: + *pstats++ = xdp_stats.xdp_redirect; + break; + case ETHTOOL_XDP_PASS: + *pstats++ = xdp_stats.xdp_pass; + break; + case ETHTOOL_XDP_DROP: + *pstats++ = xdp_stats.xdp_drop; + break; + case ETHTOOL_XDP_TX: + *pstats++ = xdp_stats.xdp_tx; + break; + case ETHTOOL_XDP_TX_ERR: + *pstats++ = xdp_stats.xdp_tx_err; + break; + case ETHTOOL_XDP_XMIT: + *pstats++ = xdp_stats.xdp_xmit; + break; + case ETHTOOL_XDP_XMIT_ERR: + *pstats++ = xdp_stats.xdp_xmit_err; + break; + } + } +} + +static void mvpp2_gather_hw_statistics(struct work_struct *work) +{ + struct delayed_work *del_work = to_delayed_work(work); + struct mvpp2_port *port = container_of(del_work, struct mvpp2_port, + stats_work); + + mutex_lock(&port->gather_stats_lock); + + mvpp2_read_stats(port); + + /* No need to read again the counters right after this function if it + * was called asynchronously by the user (ie. use of ethtool). + */ + cancel_delayed_work(&port->stats_work); + queue_delayed_work(port->priv->stats_queue, &port->stats_work, + MVPP2_MIB_COUNTERS_STATS_DELAY); + + mutex_unlock(&port->gather_stats_lock); +} + +static void mvpp2_ethtool_get_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct mvpp2_port *port = netdev_priv(dev); + + /* Update statistics for the given port, then take the lock to avoid + * concurrent accesses on the ethtool_stats structure during its copy. + */ + mvpp2_gather_hw_statistics(&port->stats_work.work); + + mutex_lock(&port->gather_stats_lock); + memcpy(data, port->ethtool_stats, + sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs)); + mutex_unlock(&port->gather_stats_lock); +} + +static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (sset == ETH_SS_STATS) + return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs); + + return -EOPNOTSUPP; +} + +static void mvpp2_mac_reset_assert(struct mvpp2_port *port) +{ + u32 val; + + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) | + MVPP2_GMAC_PORT_RESET_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); + + if (port->priv->hw_version == MVPP22 && port->gop_id == 0) { + val = readl(port->base + MVPP22_XLG_CTRL0_REG) & + ~MVPP22_XLG_CTRL0_MAC_RESET_DIS; + writel(val, port->base + MVPP22_XLG_CTRL0_REG); + } +} + +static void mvpp22_pcs_reset_assert(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + void __iomem *mpcs, *xpcs; + u32 val; + + if (port->priv->hw_version != MVPP22 || port->gop_id != 0) + return; + + mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); + xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); + + val = readl(mpcs + MVPP22_MPCS_CLK_RESET); + val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX); + val |= MVPP22_MPCS_CLK_RESET_DIV_SET; + writel(val, mpcs + MVPP22_MPCS_CLK_RESET); + + val = readl(xpcs + MVPP22_XPCS_CFG0); + writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0); +} + +static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + void __iomem *mpcs, *xpcs; + u32 val; + + if (port->priv->hw_version != MVPP22 || port->gop_id != 0) + return; + + mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); + xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); + + switch (port->phy_interface) { + case PHY_INTERFACE_MODE_10GBASER: + val = readl(mpcs + MVPP22_MPCS_CLK_RESET); + val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | + MAC_CLK_RESET_SD_TX; + val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET; + writel(val, mpcs + MVPP22_MPCS_CLK_RESET); + break; + case PHY_INTERFACE_MODE_XAUI: + case PHY_INTERFACE_MODE_RXAUI: + val = readl(xpcs + MVPP22_XPCS_CFG0); + writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0); + break; + default: + break; + } +} + +/* Change maximum receive size of the port */ +static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) +{ + u32 val; + + val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; + val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << + MVPP2_GMAC_MAX_RX_SIZE_OFFS); + writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); +} + +/* Change maximum receive size of the port */ +static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port) +{ + u32 val; + + val = readl(port->base + MVPP22_XLG_CTRL1_REG); + val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK; + val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) << + MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS; + writel(val, port->base + MVPP22_XLG_CTRL1_REG); +} + +/* Set defaults to the MVPP2 port */ +static void mvpp2_defaults_set(struct mvpp2_port *port) +{ + int tx_port_num, val, queue, lrxq; + + if (port->priv->hw_version == MVPP21) { + /* Update TX FIFO MIN Threshold */ + val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); + val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; + /* Min. TX threshold must be less than minimal packet length */ + val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); + writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); + } + + /* Disable Legacy WRR, Disable EJP, Release from reset */ + tx_port_num = mvpp2_egress_port(port); + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, + tx_port_num); + mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); + + /* Set TXQ scheduling to Round-Robin */ + mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0); + + /* Close bandwidth for all queues */ + for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) + mvpp2_write(port->priv, + MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0); + + /* Set refill period to 1 usec, refill tokens + * and bucket size to maximum + */ + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, + port->priv->tclk / USEC_PER_SEC); + val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG); + val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; + val |= MVPP2_TXP_REFILL_PERIOD_MASK(1); + val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; + mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val); + val = MVPP2_TXP_TOKEN_SIZE_MAX; + mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); + + /* Set MaximumLowLatencyPacketSize value to 256 */ + mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id), + MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | + MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); + + /* Enable Rx cache snoop */ + for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { + queue = port->rxqs[lrxq]->id; + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); + val |= MVPP2_SNOOP_PKT_SIZE_MASK | + MVPP2_SNOOP_BUF_HDR_MASK; + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); + } + + /* At default, mask all interrupts to all present cpus */ + mvpp2_interrupts_disable(port); +} + +/* Enable/disable receiving packets */ +static void mvpp2_ingress_enable(struct mvpp2_port *port) +{ + u32 val; + int lrxq, queue; + + for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { + queue = port->rxqs[lrxq]->id; + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); + val &= ~MVPP2_RXQ_DISABLE_MASK; + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); + } +} + +static void mvpp2_ingress_disable(struct mvpp2_port *port) +{ + u32 val; + int lrxq, queue; + + for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { + queue = port->rxqs[lrxq]->id; + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); + val |= MVPP2_RXQ_DISABLE_MASK; + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); + } +} + +/* Enable transmit via physical egress queue + * - HW starts take descriptors from DRAM + */ +static void mvpp2_egress_enable(struct mvpp2_port *port) +{ + u32 qmap; + int queue; + int tx_port_num = mvpp2_egress_port(port); + + /* Enable all initialized TXs. */ + qmap = 0; + for (queue = 0; queue < port->ntxqs; queue++) { + struct mvpp2_tx_queue *txq = port->txqs[queue]; + + if (txq->descs) + qmap |= (1 << queue); + } + + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); + mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); +} + +/* Disable transmit via physical egress queue + * - HW doesn't take descriptors from DRAM + */ +static void mvpp2_egress_disable(struct mvpp2_port *port) +{ + u32 reg_data; + int delay; + int tx_port_num = mvpp2_egress_port(port); + + /* Issue stop command for active channels only */ + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); + reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & + MVPP2_TXP_SCHED_ENQ_MASK; + if (reg_data != 0) + mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, + (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET)); + + /* Wait for all Tx activity to terminate. */ + delay = 0; + do { + if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { + netdev_warn(port->dev, + "Tx stop timed out, status=0x%08x\n", + reg_data); + break; + } + mdelay(1); + delay++; + + /* Check port TX Command register that all + * Tx queues are stopped + */ + reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); + } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK); +} + +/* Rx descriptors helper methods */ + +/* Get number of Rx descriptors occupied by received packets */ +static inline int +mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id) +{ + u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); + + return val & MVPP2_RXQ_OCCUPIED_MASK; +} + +/* Update Rx queue status with the number of occupied and available + * Rx descriptor slots. + */ +static inline void +mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id, + int used_count, int free_count) +{ + /* Decrement the number of used descriptors and increment count + * increment the number of free descriptors. + */ + u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET); + + mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val); +} + +/* Get pointer to next RX descriptor to be processed by SW */ +static inline struct mvpp2_rx_desc * +mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) +{ + int rx_desc = rxq->next_desc_to_proc; + + rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); + prefetch(rxq->descs + rxq->next_desc_to_proc); + return rxq->descs + rx_desc; +} + +/* Set rx queue offset */ +static void mvpp2_rxq_offset_set(struct mvpp2_port *port, + int prxq, int offset) +{ + u32 val; + + /* Convert offset from bytes to units of 32 bytes */ + offset = offset >> 5; + + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); + val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; + + /* Offset is in */ + val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & + MVPP2_RXQ_PACKET_OFFSET_MASK); + + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); +} + +/* Tx descriptors helper methods */ + +/* Get pointer to next Tx descriptor to be processed (send) by HW */ +static struct mvpp2_tx_desc * +mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) +{ + int tx_desc = txq->next_desc_to_proc; + + txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); + return txq->descs + tx_desc; +} + +/* Update HW with number of aggregated Tx descriptors to be sent + * + * Called only from mvpp2_tx(), so migration is disabled, using + * smp_processor_id() is OK. + */ +static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) +{ + /* aggregated access - relevant TXQ number is written in TX desc */ + mvpp2_thread_write(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), + MVPP2_AGGR_TXQ_UPDATE_REG, pending); +} + +/* Check if there are enough free descriptors in aggregated txq. + * If not, update the number of occupied descriptors and repeat the check. + * + * Called only from mvpp2_tx(), so migration is disabled, using + * smp_processor_id() is OK. + */ +static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port, + struct mvpp2_tx_queue *aggr_txq, int num) +{ + if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) { + /* Update number of occupied aggregated Tx descriptors */ + unsigned int thread = + mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + u32 val = mvpp2_read_relaxed(port->priv, + MVPP2_AGGR_TXQ_STATUS_REG(thread)); + + aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK; + + if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) + return -ENOMEM; + } + return 0; +} + +/* Reserved Tx descriptors allocation request + * + * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called + * only by mvpp2_tx(), so migration is disabled, using + * smp_processor_id() is OK. + */ +static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq, int num) +{ + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + struct mvpp2 *priv = port->priv; + u32 val; + + val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; + mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val); + + val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG); + + return val & MVPP2_TXQ_RSVD_RSLT_MASK; +} + +/* Check if there are enough reserved descriptors for transmission. + * If not, request chunk of reserved descriptors and check again. + */ +static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq, + struct mvpp2_txq_pcpu *txq_pcpu, + int num) +{ + int req, desc_count; + unsigned int thread; + + if (txq_pcpu->reserved_num >= num) + return 0; + + /* Not enough descriptors reserved! Update the reserved descriptor + * count and check again. + */ + + desc_count = 0; + /* Compute total of used descriptors */ + for (thread = 0; thread < port->priv->nthreads; thread++) { + struct mvpp2_txq_pcpu *txq_pcpu_aux; + + txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread); + desc_count += txq_pcpu_aux->count; + desc_count += txq_pcpu_aux->reserved_num; + } + + req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num); + desc_count += req; + + if (desc_count > + (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK))) + return -ENOMEM; + + txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req); + + /* OK, the descriptor could have been updated: check again. */ + if (txq_pcpu->reserved_num < num) + return -ENOMEM; + return 0; +} + +/* Release the last allocated Tx descriptor. Useful to handle DMA + * mapping failures in the Tx path. + */ +static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq) +{ + if (txq->next_desc_to_proc == 0) + txq->next_desc_to_proc = txq->last_desc - 1; + else + txq->next_desc_to_proc--; +} + +/* Set Tx descriptors fields relevant for CSUM calculation */ +static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto, + int ip_hdr_len, int l4_proto) +{ + u32 command; + + /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, + * G_L4_chk, L4_type required only for checksum calculation + */ + command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT); + command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT); + command |= MVPP2_TXD_IP_CSUM_DISABLE; + + if (l3_proto == htons(ETH_P_IP)) { + command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */ + command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */ + } else { + command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */ + } + + if (l4_proto == IPPROTO_TCP) { + command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */ + command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ + } else if (l4_proto == IPPROTO_UDP) { + command |= MVPP2_TXD_L4_UDP; /* enable UDP */ + command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ + } else { + command |= MVPP2_TXD_L4_CSUM_NOT; + } + + return command; +} + +/* Get number of sent descriptors and decrement counter. + * The number of sent descriptors is returned. + * Per-thread access + * + * Called only from mvpp2_txq_done(), called from mvpp2_tx() + * (migration disabled) and from the TX completion tasklet (migration + * disabled) so using smp_processor_id() is OK. + */ +static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq) +{ + u32 val; + + /* Reading status reg resets transmitted descriptor counter */ + val = mvpp2_thread_read_relaxed(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), + MVPP2_TXQ_SENT_REG(txq->id)); + + return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> + MVPP2_TRANSMITTED_COUNT_OFFSET; +} + +/* Called through on_each_cpu(), so runs on all CPUs, with migration + * disabled, therefore using smp_processor_id() is OK. + */ +static void mvpp2_txq_sent_counter_clear(void *arg) +{ + struct mvpp2_port *port = arg; + int queue; + + /* If the thread isn't used, don't do anything */ + if (smp_processor_id() >= port->priv->nthreads) + return; + + for (queue = 0; queue < port->ntxqs; queue++) { + int id = port->txqs[queue]->id; + + mvpp2_thread_read(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), + MVPP2_TXQ_SENT_REG(id)); + } +} + +/* Set max sizes for Tx queues */ +static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) +{ + u32 val, size, mtu; + int txq, tx_port_num; + + mtu = port->pkt_size * 8; + if (mtu > MVPP2_TXP_MTU_MAX) + mtu = MVPP2_TXP_MTU_MAX; + + /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ + mtu = 3 * mtu; + + /* Indirect access to registers */ + tx_port_num = mvpp2_egress_port(port); + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); + + /* Set MTU */ + val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG); + val &= ~MVPP2_TXP_MTU_MAX; + val |= mtu; + mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val); + + /* TXP token size and all TXQs token size must be larger that MTU */ + val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); + size = val & MVPP2_TXP_TOKEN_SIZE_MAX; + if (size < mtu) { + size = mtu; + val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; + val |= size; + mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); + } + + for (txq = 0; txq < port->ntxqs; txq++) { + val = mvpp2_read(port->priv, + MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); + size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; + + if (size < mtu) { + size = mtu; + val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; + val |= size; + mvpp2_write(port->priv, + MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), + val); + } + } +} + +/* Set the number of packets that will be received before Rx interrupt + * will be generated by HW. + */ +static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq) +{ + unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + + if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) + rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; + + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG, + rxq->pkts_coal); + + put_cpu(); +} + +/* For some reason in the LSP this is done on each CPU. Why ? */ +static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq) +{ + unsigned int thread; + u32 val; + + if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK) + txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK; + + val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET); + /* PKT-coalescing registers are per-queue + per-thread */ + for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) { + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val); + } +} + +static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) +{ + u64 tmp = (u64)clk_hz * usec; + + do_div(tmp, USEC_PER_SEC); + + return tmp > U32_MAX ? U32_MAX : tmp; +} + +static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz) +{ + u64 tmp = (u64)cycles * USEC_PER_SEC; + + do_div(tmp, clk_hz); + + return tmp > U32_MAX ? U32_MAX : tmp; +} + +/* Set the time delay in usec before Rx interrupt */ +static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq) +{ + unsigned long freq = port->priv->tclk; + u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq); + + if (val > MVPP2_MAX_ISR_RX_THRESHOLD) { + rxq->time_coal = + mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq); + + /* re-evaluate to get actual register value */ + val = mvpp2_usec_to_cycles(rxq->time_coal, freq); + } + + mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); +} + +static void mvpp2_tx_time_coal_set(struct mvpp2_port *port) +{ + unsigned long freq = port->priv->tclk; + u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); + + if (val > MVPP2_MAX_ISR_TX_THRESHOLD) { + port->tx_time_coal = + mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq); + + /* re-evaluate to get actual register value */ + val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); + } + + mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val); +} + +/* Free Tx queue skbuffs */ +static void mvpp2_txq_bufs_free(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq, + struct mvpp2_txq_pcpu *txq_pcpu, int num) +{ + int i; + + for (i = 0; i < num; i++) { + struct mvpp2_txq_pcpu_buf *tx_buf = + txq_pcpu->buffs + txq_pcpu->txq_get_index; + + if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma) && + tx_buf->type != MVPP2_TYPE_XDP_TX) + dma_unmap_single(port->dev->dev.parent, tx_buf->dma, + tx_buf->size, DMA_TO_DEVICE); + if (tx_buf->type == MVPP2_TYPE_SKB && tx_buf->skb) + dev_kfree_skb_any(tx_buf->skb); + else if (tx_buf->type == MVPP2_TYPE_XDP_TX || + tx_buf->type == MVPP2_TYPE_XDP_NDO) + xdp_return_frame(tx_buf->xdpf); + + mvpp2_txq_inc_get(txq_pcpu); + } +} + +static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, + u32 cause) +{ + int queue = fls(cause) - 1; + + return port->rxqs[queue]; +} + +static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, + u32 cause) +{ + int queue = fls(cause) - 1; + + return port->txqs[queue]; +} + +/* Handle end of transmission */ +static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, + struct mvpp2_txq_pcpu *txq_pcpu) +{ + struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id); + int tx_done; + + if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id())) + netdev_err(port->dev, "wrong cpu on the end of Tx processing\n"); + + tx_done = mvpp2_txq_sent_desc_proc(port, txq); + if (!tx_done) + return; + mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done); + + txq_pcpu->count -= tx_done; + + if (netif_tx_queue_stopped(nq)) + if (txq_pcpu->count <= txq_pcpu->wake_threshold) + netif_tx_wake_queue(nq); +} + +static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, + unsigned int thread) +{ + struct mvpp2_tx_queue *txq; + struct mvpp2_txq_pcpu *txq_pcpu; + unsigned int tx_todo = 0; + + while (cause) { + txq = mvpp2_get_tx_queue(port, cause); + if (!txq) + break; + + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); + + if (txq_pcpu->count) { + mvpp2_txq_done(port, txq, txq_pcpu); + tx_todo += txq_pcpu->count; + } + + cause &= ~(1 << txq->log_id); + } + return tx_todo; +} + +/* Rx/Tx queue initialization/cleanup methods */ + +/* Allocate and initialize descriptors for aggr TXQ */ +static int mvpp2_aggr_txq_init(struct platform_device *pdev, + struct mvpp2_tx_queue *aggr_txq, + unsigned int thread, struct mvpp2 *priv) +{ + u32 txq_dma; + + /* Allocate memory for TX descriptors */ + aggr_txq->descs = dma_alloc_coherent(&pdev->dev, + MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, + &aggr_txq->descs_dma, GFP_KERNEL); + if (!aggr_txq->descs) + return -ENOMEM; + + aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1; + + /* Aggr TXQ no reset WA */ + aggr_txq->next_desc_to_proc = mvpp2_read(priv, + MVPP2_AGGR_TXQ_INDEX_REG(thread)); + + /* Set Tx descriptors queue starting address indirect + * access + */ + if (priv->hw_version == MVPP21) + txq_dma = aggr_txq->descs_dma; + else + txq_dma = aggr_txq->descs_dma >> + MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; + + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma); + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread), + MVPP2_AGGR_TXQ_SIZE); + + return 0; +} + +/* Create a specified Rx queue */ +static int mvpp2_rxq_init(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq) +{ + struct mvpp2 *priv = port->priv; + unsigned int thread; + u32 rxq_dma; + int err; + + rxq->size = port->rx_ring_size; + + /* Allocate memory for RX descriptors */ + rxq->descs = dma_alloc_coherent(port->dev->dev.parent, + rxq->size * MVPP2_DESC_ALIGNED_SIZE, + &rxq->descs_dma, GFP_KERNEL); + if (!rxq->descs) + return -ENOMEM; + + rxq->last_desc = rxq->size - 1; + + /* Zero occupied and non-occupied counters - direct access */ + mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); + + /* Set Rx descriptors queue starting address - indirect access */ + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); + if (port->priv->hw_version == MVPP21) + rxq_dma = rxq->descs_dma; + else + rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0); + put_cpu(); + + /* Set Offset */ + mvpp2_rxq_offset_set(port, rxq->id, MVPP2_SKB_HEADROOM); + + /* Set coalescing pkts and time */ + mvpp2_rx_pkts_coal_set(port, rxq); + mvpp2_rx_time_coal_set(port, rxq); + + /* Add number of descriptors ready for receiving packets */ + mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); + + if (priv->percpu_pools) { + err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->logic_rxq); + if (err < 0) + goto err_free_dma; + + err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->logic_rxq); + if (err < 0) + goto err_unregister_rxq_short; + + /* Every RXQ has a pool for short and another for long packets */ + err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_short, + MEM_TYPE_PAGE_POOL, + priv->page_pool[rxq->logic_rxq]); + if (err < 0) + goto err_unregister_rxq_long; + + err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_long, + MEM_TYPE_PAGE_POOL, + priv->page_pool[rxq->logic_rxq + + port->nrxqs]); + if (err < 0) + goto err_unregister_mem_rxq_short; + } + + return 0; + +err_unregister_mem_rxq_short: + xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq_short); +err_unregister_rxq_long: + xdp_rxq_info_unreg(&rxq->xdp_rxq_long); +err_unregister_rxq_short: + xdp_rxq_info_unreg(&rxq->xdp_rxq_short); +err_free_dma: + dma_free_coherent(port->dev->dev.parent, + rxq->size * MVPP2_DESC_ALIGNED_SIZE, + rxq->descs, rxq->descs_dma); + return err; +} + +/* Push packets received by the RXQ to BM pool */ +static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq) +{ + int rx_received, i; + + rx_received = mvpp2_rxq_received(port, rxq->id); + if (!rx_received) + return; + + for (i = 0; i < rx_received; i++) { + struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); + u32 status = mvpp2_rxdesc_status_get(port, rx_desc); + int pool; + + pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >> + MVPP2_RXD_BM_POOL_ID_OFFS; + + mvpp2_bm_pool_put(port, pool, + mvpp2_rxdesc_dma_addr_get(port, rx_desc), + mvpp2_rxdesc_cookie_get(port, rx_desc)); + } + mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); +} + +/* Cleanup Rx queue */ +static void mvpp2_rxq_deinit(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq) +{ + unsigned int thread; + + if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_short)) + xdp_rxq_info_unreg(&rxq->xdp_rxq_short); + + if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_long)) + xdp_rxq_info_unreg(&rxq->xdp_rxq_long); + + mvpp2_rxq_drop_pkts(port, rxq); + + if (rxq->descs) + dma_free_coherent(port->dev->dev.parent, + rxq->size * MVPP2_DESC_ALIGNED_SIZE, + rxq->descs, + rxq->descs_dma); + + rxq->descs = NULL; + rxq->last_desc = 0; + rxq->next_desc_to_proc = 0; + rxq->descs_dma = 0; + + /* Clear Rx descriptors queue starting address and size; + * free descriptor number + */ + mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0); + put_cpu(); +} + +/* Create and initialize a Tx queue */ +static int mvpp2_txq_init(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq) +{ + u32 val; + unsigned int thread; + int desc, desc_per_txq, tx_port_num; + struct mvpp2_txq_pcpu *txq_pcpu; + + txq->size = port->tx_ring_size; + + /* Allocate memory for Tx descriptors */ + txq->descs = dma_alloc_coherent(port->dev->dev.parent, + txq->size * MVPP2_DESC_ALIGNED_SIZE, + &txq->descs_dma, GFP_KERNEL); + if (!txq->descs) + return -ENOMEM; + + txq->last_desc = txq->size - 1; + + /* Set Tx descriptors queue starting address - indirect access */ + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, + txq->descs_dma); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, + txq->size & MVPP2_TXQ_DESC_SIZE_MASK); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG, + txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); + val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG); + val &= ~MVPP2_TXQ_PENDING_MASK; + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val); + + /* Calculate base address in prefetch buffer. We reserve 16 descriptors + * for each existing TXQ. + * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT + * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS + */ + desc_per_txq = 16; + desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + + (txq->log_id * desc_per_txq); + + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, + MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | + MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); + put_cpu(); + + /* WRR / EJP configuration - indirect access */ + tx_port_num = mvpp2_egress_port(port); + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); + + val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); + val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; + val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); + val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; + mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); + + val = MVPP2_TXQ_TOKEN_SIZE_MAX; + mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), + val); + + for (thread = 0; thread < port->priv->nthreads; thread++) { + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); + txq_pcpu->size = txq->size; + txq_pcpu->buffs = kmalloc_array(txq_pcpu->size, + sizeof(*txq_pcpu->buffs), + GFP_KERNEL); + if (!txq_pcpu->buffs) + return -ENOMEM; + + txq_pcpu->count = 0; + txq_pcpu->reserved_num = 0; + txq_pcpu->txq_put_index = 0; + txq_pcpu->txq_get_index = 0; + txq_pcpu->tso_headers = NULL; + + txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS; + txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2; + + txq_pcpu->tso_headers = + dma_alloc_coherent(port->dev->dev.parent, + txq_pcpu->size * TSO_HEADER_SIZE, + &txq_pcpu->tso_headers_dma, + GFP_KERNEL); + if (!txq_pcpu->tso_headers) + return -ENOMEM; + } + + return 0; +} + +/* Free allocated TXQ resources */ +static void mvpp2_txq_deinit(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq) +{ + struct mvpp2_txq_pcpu *txq_pcpu; + unsigned int thread; + + for (thread = 0; thread < port->priv->nthreads; thread++) { + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); + kfree(txq_pcpu->buffs); + + if (txq_pcpu->tso_headers) + dma_free_coherent(port->dev->dev.parent, + txq_pcpu->size * TSO_HEADER_SIZE, + txq_pcpu->tso_headers, + txq_pcpu->tso_headers_dma); + + txq_pcpu->tso_headers = NULL; + } + + if (txq->descs) + dma_free_coherent(port->dev->dev.parent, + txq->size * MVPP2_DESC_ALIGNED_SIZE, + txq->descs, txq->descs_dma); + + txq->descs = NULL; + txq->last_desc = 0; + txq->next_desc_to_proc = 0; + txq->descs_dma = 0; + + /* Set minimum bandwidth for disabled TXQs */ + mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0); + + /* Set Tx descriptors queue starting address and size */ + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0); + put_cpu(); +} + +/* Cleanup Tx ports */ +static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) +{ + struct mvpp2_txq_pcpu *txq_pcpu; + int delay, pending; + unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + u32 val; + + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); + val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG); + val |= MVPP2_TXQ_DRAIN_EN_MASK; + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val); + + /* The napi queue has been stopped so wait for all packets + * to be transmitted. + */ + delay = 0; + do { + if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { + netdev_warn(port->dev, + "port %d: cleaning queue %d timed out\n", + port->id, txq->log_id); + break; + } + mdelay(1); + delay++; + + pending = mvpp2_thread_read(port->priv, thread, + MVPP2_TXQ_PENDING_REG); + pending &= MVPP2_TXQ_PENDING_MASK; + } while (pending); + + val &= ~MVPP2_TXQ_DRAIN_EN_MASK; + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val); + put_cpu(); + + for (thread = 0; thread < port->priv->nthreads; thread++) { + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); + + /* Release all packets */ + mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); + + /* Reset queue */ + txq_pcpu->count = 0; + txq_pcpu->txq_put_index = 0; + txq_pcpu->txq_get_index = 0; + } +} + +/* Cleanup all Tx queues */ +static void mvpp2_cleanup_txqs(struct mvpp2_port *port) +{ + struct mvpp2_tx_queue *txq; + int queue; + u32 val; + + val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG); + + /* Reset Tx ports and delete Tx queues */ + val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); + mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); + + for (queue = 0; queue < port->ntxqs; queue++) { + txq = port->txqs[queue]; + mvpp2_txq_clean(port, txq); + mvpp2_txq_deinit(port, txq); + } + + on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); + + val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id); + mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); +} + +/* Cleanup all Rx queues */ +static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) +{ + int queue; + + for (queue = 0; queue < port->nrxqs; queue++) + mvpp2_rxq_deinit(port, port->rxqs[queue]); +} + +/* Init all Rx queues for port */ +static int mvpp2_setup_rxqs(struct mvpp2_port *port) +{ + int queue, err; + + for (queue = 0; queue < port->nrxqs; queue++) { + err = mvpp2_rxq_init(port, port->rxqs[queue]); + if (err) + goto err_cleanup; + } + return 0; + +err_cleanup: + mvpp2_cleanup_rxqs(port); + return err; +} + +/* Init all tx queues for port */ +static int mvpp2_setup_txqs(struct mvpp2_port *port) +{ + struct mvpp2_tx_queue *txq; + int queue, err; + + for (queue = 0; queue < port->ntxqs; queue++) { + txq = port->txqs[queue]; + err = mvpp2_txq_init(port, txq); + if (err) + goto err_cleanup; + + /* Assign this queue to a CPU */ + if (queue < num_possible_cpus()) + netif_set_xps_queue(port->dev, cpumask_of(queue), queue); + } + + if (port->has_tx_irqs) { + mvpp2_tx_time_coal_set(port); + for (queue = 0; queue < port->ntxqs; queue++) { + txq = port->txqs[queue]; + mvpp2_tx_pkts_coal_set(port, txq); + } + } + + on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); + return 0; + +err_cleanup: + mvpp2_cleanup_txqs(port); + return err; +} + +/* The callback for per-port interrupt */ +static irqreturn_t mvpp2_isr(int irq, void *dev_id) +{ + struct mvpp2_queue_vector *qv = dev_id; + + mvpp2_qvec_interrupt_disable(qv); + + napi_schedule(&qv->napi); + + return IRQ_HANDLED; +} + +static void mvpp2_isr_handle_ptp_queue(struct mvpp2_port *port, int nq) +{ + struct skb_shared_hwtstamps shhwtstamps; + struct mvpp2_hwtstamp_queue *queue; + struct sk_buff *skb; + void __iomem *ptp_q; + unsigned int id; + u32 r0, r1, r2; + + ptp_q = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); + if (nq) + ptp_q += MVPP22_PTP_TX_Q1_R0 - MVPP22_PTP_TX_Q0_R0; + + queue = &port->tx_hwtstamp_queue[nq]; + + while (1) { + r0 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R0) & 0xffff; + if (!r0) + break; + + r1 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R1) & 0xffff; + r2 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R2) & 0xffff; + + id = (r0 >> 1) & 31; + + skb = queue->skb[id]; + queue->skb[id] = NULL; + if (skb) { + u32 ts = r2 << 19 | r1 << 3 | r0 >> 13; + + mvpp22_tai_tstamp(port->priv->tai, ts, &shhwtstamps); + skb_tstamp_tx(skb, &shhwtstamps); + dev_kfree_skb_any(skb); + } + } +} + +static void mvpp2_isr_handle_ptp(struct mvpp2_port *port) +{ + void __iomem *ptp; + u32 val; + + ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); + val = readl(ptp + MVPP22_PTP_INT_CAUSE); + if (val & MVPP22_PTP_INT_CAUSE_QUEUE0) + mvpp2_isr_handle_ptp_queue(port, 0); + if (val & MVPP22_PTP_INT_CAUSE_QUEUE1) + mvpp2_isr_handle_ptp_queue(port, 1); +} + +static void mvpp2_isr_handle_link(struct mvpp2_port *port, bool link) +{ + struct net_device *dev = port->dev; + + if (port->phylink) { + phylink_mac_change(port->phylink, link); + return; + } + + if (!netif_running(dev)) + return; + + if (link) { + mvpp2_interrupts_enable(port); + + mvpp2_egress_enable(port); + mvpp2_ingress_enable(port); + netif_carrier_on(dev); + netif_tx_wake_all_queues(dev); + } else { + netif_tx_stop_all_queues(dev); + netif_carrier_off(dev); + mvpp2_ingress_disable(port); + mvpp2_egress_disable(port); + + mvpp2_interrupts_disable(port); + } +} + +static void mvpp2_isr_handle_xlg(struct mvpp2_port *port) +{ + bool link; + u32 val; + + val = readl(port->base + MVPP22_XLG_INT_STAT); + if (val & MVPP22_XLG_INT_STAT_LINK) { + val = readl(port->base + MVPP22_XLG_STATUS); + link = (val & MVPP22_XLG_STATUS_LINK_UP); + mvpp2_isr_handle_link(port, link); + } +} + +static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port) +{ + bool link; + u32 val; + + if (phy_interface_mode_is_rgmii(port->phy_interface) || + phy_interface_mode_is_8023z(port->phy_interface) || + port->phy_interface == PHY_INTERFACE_MODE_SGMII) { + val = readl(port->base + MVPP22_GMAC_INT_STAT); + if (val & MVPP22_GMAC_INT_STAT_LINK) { + val = readl(port->base + MVPP2_GMAC_STATUS0); + link = (val & MVPP2_GMAC_STATUS0_LINK_UP); + mvpp2_isr_handle_link(port, link); + } + } +} + +/* Per-port interrupt for link status changes */ +static irqreturn_t mvpp2_port_isr(int irq, void *dev_id) +{ + struct mvpp2_port *port = (struct mvpp2_port *)dev_id; + u32 val; + + mvpp22_gop_mask_irq(port); + + if (mvpp2_port_supports_xlg(port) && + mvpp2_is_xlg(port->phy_interface)) { + /* Check the external status register */ + val = readl(port->base + MVPP22_XLG_EXT_INT_STAT); + if (val & MVPP22_XLG_EXT_INT_STAT_XLG) + mvpp2_isr_handle_xlg(port); + if (val & MVPP22_XLG_EXT_INT_STAT_PTP) + mvpp2_isr_handle_ptp(port); + } else { + /* If it's not the XLG, we must be using the GMAC. + * Check the summary status. + */ + val = readl(port->base + MVPP22_GMAC_INT_SUM_STAT); + if (val & MVPP22_GMAC_INT_SUM_STAT_INTERNAL) + mvpp2_isr_handle_gmac_internal(port); + if (val & MVPP22_GMAC_INT_SUM_STAT_PTP) + mvpp2_isr_handle_ptp(port); + } + + mvpp22_gop_unmask_irq(port); + return IRQ_HANDLED; +} + +static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer) +{ + struct net_device *dev; + struct mvpp2_port *port; + struct mvpp2_port_pcpu *port_pcpu; + unsigned int tx_todo, cause; + + port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer); + dev = port_pcpu->dev; + + if (!netif_running(dev)) + return HRTIMER_NORESTART; + + port_pcpu->timer_scheduled = false; + port = netdev_priv(dev); + + /* Process all the Tx queues */ + cause = (1 << port->ntxqs) - 1; + tx_todo = mvpp2_tx_done(port, cause, + mvpp2_cpu_to_thread(port->priv, smp_processor_id())); + + /* Set the timer in case not all the packets were processed */ + if (tx_todo && !port_pcpu->timer_scheduled) { + port_pcpu->timer_scheduled = true; + hrtimer_forward_now(&port_pcpu->tx_done_timer, + MVPP2_TXDONE_HRTIMER_PERIOD_NS); + + return HRTIMER_RESTART; + } + return HRTIMER_NORESTART; +} + +/* Main RX/TX processing routines */ + +/* Display more error info */ +static void mvpp2_rx_error(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + u32 status = mvpp2_rxdesc_status_get(port, rx_desc); + size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); + char *err_str = NULL; + + switch (status & MVPP2_RXD_ERR_CODE_MASK) { + case MVPP2_RXD_ERR_CRC: + err_str = "crc"; + break; + case MVPP2_RXD_ERR_OVERRUN: + err_str = "overrun"; + break; + case MVPP2_RXD_ERR_RESOURCE: + err_str = "resource"; + break; + } + if (err_str && net_ratelimit()) + netdev_err(port->dev, + "bad rx status %08x (%s error), size=%zu\n", + status, err_str, sz); +} + +/* Handle RX checksum offload */ +static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status, + struct sk_buff *skb) +{ + if (((status & MVPP2_RXD_L3_IP4) && + !(status & MVPP2_RXD_IP4_HEADER_ERR)) || + (status & MVPP2_RXD_L3_IP6)) + if (((status & MVPP2_RXD_L4_UDP) || + (status & MVPP2_RXD_L4_TCP)) && + (status & MVPP2_RXD_L4_CSUM_OK)) { + skb->csum = 0; + skb->ip_summed = CHECKSUM_UNNECESSARY; + return; + } + + skb->ip_summed = CHECKSUM_NONE; +} + +/* Allocate a new skb and add it to BM pool */ +static int mvpp2_rx_refill(struct mvpp2_port *port, + struct mvpp2_bm_pool *bm_pool, + struct page_pool *page_pool, int pool) +{ + dma_addr_t dma_addr; + phys_addr_t phys_addr; + void *buf; + + buf = mvpp2_buf_alloc(port, bm_pool, page_pool, + &dma_addr, &phys_addr, GFP_ATOMIC); + if (!buf) + return -ENOMEM; + + mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); + + return 0; +} + +/* Handle tx checksum */ +static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) +{ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + int ip_hdr_len = 0; + u8 l4_proto; + __be16 l3_proto = vlan_get_protocol(skb); + + if (l3_proto == htons(ETH_P_IP)) { + struct iphdr *ip4h = ip_hdr(skb); + + /* Calculate IPv4 checksum and L4 checksum */ + ip_hdr_len = ip4h->ihl; + l4_proto = ip4h->protocol; + } else if (l3_proto == htons(ETH_P_IPV6)) { + struct ipv6hdr *ip6h = ipv6_hdr(skb); + + /* Read l4_protocol from one of IPv6 extra headers */ + if (skb_network_header_len(skb) > 0) + ip_hdr_len = (skb_network_header_len(skb) >> 2); + l4_proto = ip6h->nexthdr; + } else { + return MVPP2_TXD_L4_CSUM_NOT; + } + + return mvpp2_txq_desc_csum(skb_network_offset(skb), + l3_proto, ip_hdr_len, l4_proto); + } + + return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE; +} + +static void mvpp2_xdp_finish_tx(struct mvpp2_port *port, u16 txq_id, int nxmit, int nxmit_byte) +{ + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + struct mvpp2_tx_queue *aggr_txq; + struct mvpp2_txq_pcpu *txq_pcpu; + struct mvpp2_tx_queue *txq; + struct netdev_queue *nq; + + txq = port->txqs[txq_id]; + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); + nq = netdev_get_tx_queue(port->dev, txq_id); + aggr_txq = &port->priv->aggr_txqs[thread]; + + txq_pcpu->reserved_num -= nxmit; + txq_pcpu->count += nxmit; + aggr_txq->count += nxmit; + + /* Enable transmit */ + wmb(); + mvpp2_aggr_txq_pend_desc_add(port, nxmit); + + if (txq_pcpu->count >= txq_pcpu->stop_threshold) + netif_tx_stop_queue(nq); + + /* Finalize TX processing */ + if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) + mvpp2_txq_done(port, txq, txq_pcpu); +} + +static int +mvpp2_xdp_submit_frame(struct mvpp2_port *port, u16 txq_id, + struct xdp_frame *xdpf, bool dma_map) +{ + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + u32 tx_cmd = MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE | + MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; + enum mvpp2_tx_buf_type buf_type; + struct mvpp2_txq_pcpu *txq_pcpu; + struct mvpp2_tx_queue *aggr_txq; + struct mvpp2_tx_desc *tx_desc; + struct mvpp2_tx_queue *txq; + int ret = MVPP2_XDP_TX; + dma_addr_t dma_addr; + + txq = port->txqs[txq_id]; + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); + aggr_txq = &port->priv->aggr_txqs[thread]; + + /* Check number of available descriptors */ + if (mvpp2_aggr_desc_num_check(port, aggr_txq, 1) || + mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, 1)) { + ret = MVPP2_XDP_DROPPED; + goto out; + } + + /* Get a descriptor for the first part of the packet */ + tx_desc = mvpp2_txq_next_desc_get(aggr_txq); + mvpp2_txdesc_txq_set(port, tx_desc, txq->id); + mvpp2_txdesc_size_set(port, tx_desc, xdpf->len); + + if (dma_map) { + /* XDP_REDIRECT or AF_XDP */ + dma_addr = dma_map_single(port->dev->dev.parent, xdpf->data, + xdpf->len, DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { + mvpp2_txq_desc_put(txq); + ret = MVPP2_XDP_DROPPED; + goto out; + } + + buf_type = MVPP2_TYPE_XDP_NDO; + } else { + /* XDP_TX */ + struct page *page = virt_to_page(xdpf->data); + + dma_addr = page_pool_get_dma_addr(page) + + sizeof(*xdpf) + xdpf->headroom; + dma_sync_single_for_device(port->dev->dev.parent, dma_addr, + xdpf->len, DMA_BIDIRECTIONAL); + + buf_type = MVPP2_TYPE_XDP_TX; + } + + mvpp2_txdesc_dma_addr_set(port, tx_desc, dma_addr); + + mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); + mvpp2_txq_inc_put(port, txq_pcpu, xdpf, tx_desc, buf_type); + +out: + return ret; +} + +static int +mvpp2_xdp_xmit_back(struct mvpp2_port *port, struct xdp_buff *xdp) +{ + struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); + struct xdp_frame *xdpf; + u16 txq_id; + int ret; + + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) + return MVPP2_XDP_DROPPED; + + /* The first of the TX queues are used for XPS, + * the second half for XDP_TX + */ + txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2); + + ret = mvpp2_xdp_submit_frame(port, txq_id, xdpf, false); + if (ret == MVPP2_XDP_TX) { + u64_stats_update_begin(&stats->syncp); + stats->tx_bytes += xdpf->len; + stats->tx_packets++; + stats->xdp_tx++; + u64_stats_update_end(&stats->syncp); + + mvpp2_xdp_finish_tx(port, txq_id, 1, xdpf->len); + } else { + u64_stats_update_begin(&stats->syncp); + stats->xdp_tx_err++; + u64_stats_update_end(&stats->syncp); + } + + return ret; +} + +static int +mvpp2_xdp_xmit(struct net_device *dev, int num_frame, + struct xdp_frame **frames, u32 flags) +{ + struct mvpp2_port *port = netdev_priv(dev); + int i, nxmit_byte = 0, nxmit = num_frame; + struct mvpp2_pcpu_stats *stats; + u16 txq_id; + u32 ret; + + if (unlikely(test_bit(0, &port->state))) + return -ENETDOWN; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + /* The first of the TX queues are used for XPS, + * the second half for XDP_TX + */ + txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2); + + for (i = 0; i < num_frame; i++) { + ret = mvpp2_xdp_submit_frame(port, txq_id, frames[i], true); + if (ret == MVPP2_XDP_TX) { + nxmit_byte += frames[i]->len; + } else { + xdp_return_frame_rx_napi(frames[i]); + nxmit--; + } + } + + if (likely(nxmit > 0)) + mvpp2_xdp_finish_tx(port, txq_id, nxmit, nxmit_byte); + + stats = this_cpu_ptr(port->stats); + u64_stats_update_begin(&stats->syncp); + stats->tx_bytes += nxmit_byte; + stats->tx_packets += nxmit; + stats->xdp_xmit += nxmit; + stats->xdp_xmit_err += num_frame - nxmit; + u64_stats_update_end(&stats->syncp); + + return nxmit; +} + +static int +mvpp2_run_xdp(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq, + struct bpf_prog *prog, struct xdp_buff *xdp, + struct page_pool *pp, struct mvpp2_pcpu_stats *stats) +{ + unsigned int len, sync, err; + struct page *page; + u32 ret, act; + + len = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM; + act = bpf_prog_run_xdp(prog, xdp); + + /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ + sync = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM; + sync = max(sync, len); + + switch (act) { + case XDP_PASS: + stats->xdp_pass++; + ret = MVPP2_XDP_PASS; + break; + case XDP_REDIRECT: + err = xdp_do_redirect(port->dev, xdp, prog); + if (unlikely(err)) { + ret = MVPP2_XDP_DROPPED; + page = virt_to_head_page(xdp->data); + page_pool_put_page(pp, page, sync, true); + } else { + ret = MVPP2_XDP_REDIR; + stats->xdp_redirect++; + } + break; + case XDP_TX: + ret = mvpp2_xdp_xmit_back(port, xdp); + if (ret != MVPP2_XDP_TX) { + page = virt_to_head_page(xdp->data); + page_pool_put_page(pp, page, sync, true); + } + break; + default: + bpf_warn_invalid_xdp_action(act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(port->dev, prog, act); + fallthrough; + case XDP_DROP: + page = virt_to_head_page(xdp->data); + page_pool_put_page(pp, page, sync, true); + ret = MVPP2_XDP_DROPPED; + stats->xdp_drop++; + break; + } + + return ret; +} + +static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc, + int pool, u32 rx_status) +{ + phys_addr_t phys_addr, phys_addr_next; + dma_addr_t dma_addr, dma_addr_next; + struct mvpp2_buff_hdr *buff_hdr; + + phys_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); + dma_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); + + do { + buff_hdr = (struct mvpp2_buff_hdr *)phys_to_virt(phys_addr); + + phys_addr_next = le32_to_cpu(buff_hdr->next_phys_addr); + dma_addr_next = le32_to_cpu(buff_hdr->next_dma_addr); + + if (port->priv->hw_version >= MVPP22) { + phys_addr_next |= ((u64)buff_hdr->next_phys_addr_high << 32); + dma_addr_next |= ((u64)buff_hdr->next_dma_addr_high << 32); + } + + mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); + + phys_addr = phys_addr_next; + dma_addr = dma_addr_next; + + } while (!MVPP2_B_HDR_INFO_IS_LAST(le16_to_cpu(buff_hdr->info))); +} + +/* Main rx processing */ +static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, + int rx_todo, struct mvpp2_rx_queue *rxq) +{ + struct net_device *dev = port->dev; + struct mvpp2_pcpu_stats ps = {}; + enum dma_data_direction dma_dir; + struct bpf_prog *xdp_prog; + struct xdp_buff xdp; + int rx_received; + int rx_done = 0; + u32 xdp_ret = 0; + + rcu_read_lock(); + + xdp_prog = READ_ONCE(port->xdp_prog); + + /* Get number of received packets and clamp the to-do */ + rx_received = mvpp2_rxq_received(port, rxq->id); + if (rx_todo > rx_received) + rx_todo = rx_received; + + while (rx_done < rx_todo) { + struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); + struct mvpp2_bm_pool *bm_pool; + struct page_pool *pp = NULL; + struct sk_buff *skb; + unsigned int frag_size; + dma_addr_t dma_addr; + phys_addr_t phys_addr; + u32 rx_status, timestamp; + int pool, rx_bytes, err, ret; + void *data; + + rx_done++; + rx_status = mvpp2_rxdesc_status_get(port, rx_desc); + rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); + rx_bytes -= MVPP2_MH_SIZE; + dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); + phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); + data = (void *)phys_to_virt(phys_addr); + + pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >> + MVPP2_RXD_BM_POOL_ID_OFFS; + bm_pool = &port->priv->bm_pools[pool]; + + if (port->priv->percpu_pools) { + pp = port->priv->page_pool[pool]; + dma_dir = page_pool_get_dma_dir(pp); + } else { + dma_dir = DMA_FROM_DEVICE; + } + + dma_sync_single_for_cpu(dev->dev.parent, dma_addr, + rx_bytes + MVPP2_MH_SIZE, + dma_dir); + + /* Buffer header not supported */ + if (rx_status & MVPP2_RXD_BUF_HDR) + goto err_drop_frame; + + /* In case of an error, release the requested buffer pointer + * to the Buffer Manager. This request process is controlled + * by the hardware, and the information about the buffer is + * comprised by the RX descriptor. + */ + if (rx_status & MVPP2_RXD_ERR_SUMMARY) + goto err_drop_frame; + + /* Prefetch header */ + prefetch(data); + + if (bm_pool->frag_size > PAGE_SIZE) + frag_size = 0; + else + frag_size = bm_pool->frag_size; + + if (xdp_prog) { + xdp.data_hard_start = data; + xdp.data = data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM; + xdp.data_end = xdp.data + rx_bytes; + xdp.frame_sz = PAGE_SIZE; + + if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE) + xdp.rxq = &rxq->xdp_rxq_short; + else + xdp.rxq = &rxq->xdp_rxq_long; + + xdp_set_data_meta_invalid(&xdp); + + ret = mvpp2_run_xdp(port, rxq, xdp_prog, &xdp, pp, &ps); + + if (ret) { + xdp_ret |= ret; + err = mvpp2_rx_refill(port, bm_pool, pp, pool); + if (err) { + netdev_err(port->dev, "failed to refill BM pools\n"); + goto err_drop_frame; + } + + ps.rx_packets++; + ps.rx_bytes += rx_bytes; + continue; + } + } + + skb = build_skb(data, frag_size); + if (!skb) { + netdev_warn(port->dev, "skb build failed\n"); + goto err_drop_frame; + } + + /* If we have RX hardware timestamping enabled, grab the + * timestamp from the queue and convert. + */ + if (mvpp22_rx_hwtstamping(port)) { + timestamp = le32_to_cpu(rx_desc->pp22.timestamp); + mvpp22_tai_tstamp(port->priv->tai, timestamp, + skb_hwtstamps(skb)); + } + + err = mvpp2_rx_refill(port, bm_pool, pp, pool); + if (err) { + netdev_err(port->dev, "failed to refill BM pools\n"); + dev_kfree_skb_any(skb); + goto err_drop_frame; + } + + if (pp) + page_pool_release_page(pp, virt_to_page(data)); + else + dma_unmap_single_attrs(dev->dev.parent, dma_addr, + bm_pool->buf_size, DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + + ps.rx_packets++; + ps.rx_bytes += rx_bytes; + + skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM); + skb_put(skb, rx_bytes); + skb->protocol = eth_type_trans(skb, dev); + mvpp2_rx_csum(port, rx_status, skb); + + napi_gro_receive(napi, skb); + continue; + +err_drop_frame: + dev->stats.rx_errors++; + mvpp2_rx_error(port, rx_desc); + /* Return the buffer to the pool */ + if (rx_status & MVPP2_RXD_BUF_HDR) + mvpp2_buff_hdr_pool_put(port, rx_desc, pool, rx_status); + else + mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); + } + + rcu_read_unlock(); + + if (xdp_ret & MVPP2_XDP_REDIR) + xdp_do_flush_map(); + + if (ps.rx_packets) { + struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); + + u64_stats_update_begin(&stats->syncp); + stats->rx_packets += ps.rx_packets; + stats->rx_bytes += ps.rx_bytes; + /* xdp */ + stats->xdp_redirect += ps.xdp_redirect; + stats->xdp_pass += ps.xdp_pass; + stats->xdp_drop += ps.xdp_drop; + u64_stats_update_end(&stats->syncp); + } + + /* Update Rx queue management counters */ + wmb(); + mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done); + + return rx_todo; +} + +static inline void +tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, + struct mvpp2_tx_desc *desc) +{ + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread); + + dma_addr_t buf_dma_addr = + mvpp2_txdesc_dma_addr_get(port, desc); + size_t buf_sz = + mvpp2_txdesc_size_get(port, desc); + if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr)) + dma_unmap_single(port->dev->dev.parent, buf_dma_addr, + buf_sz, DMA_TO_DEVICE); + mvpp2_txq_desc_put(txq); +} + +static void mvpp2_txdesc_clear_ptp(struct mvpp2_port *port, + struct mvpp2_tx_desc *desc) +{ + /* We only need to clear the low bits */ + if (port->priv->hw_version != MVPP21) + desc->pp22.ptp_descriptor &= + cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW); +} + +static bool mvpp2_tx_hw_tstamp(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + struct sk_buff *skb) +{ + struct mvpp2_hwtstamp_queue *queue; + unsigned int mtype, type, i; + struct ptp_header *hdr; + u64 ptpdesc; + + if (port->priv->hw_version == MVPP21 || + port->tx_hwtstamp_type == HWTSTAMP_TX_OFF) + return false; + + type = ptp_classify_raw(skb); + if (!type) + return false; + + hdr = ptp_parse_header(skb, type); + if (!hdr) + return false; + + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + + ptpdesc = MVPP22_PTP_MACTIMESTAMPINGEN | + MVPP22_PTP_ACTION_CAPTURE; + queue = &port->tx_hwtstamp_queue[0]; + + switch (type & PTP_CLASS_VMASK) { + case PTP_CLASS_V1: + ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV1); + break; + + case PTP_CLASS_V2: + ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV2); + mtype = hdr->tsmt & 15; + /* Direct PTP Sync messages to queue 1 */ + if (mtype == 0) { + ptpdesc |= MVPP22_PTP_TIMESTAMPQUEUESELECT; + queue = &port->tx_hwtstamp_queue[1]; + } + break; + } + + /* Take a reference on the skb and insert into our queue */ + i = queue->next; + queue->next = (i + 1) & 31; + if (queue->skb[i]) + dev_kfree_skb_any(queue->skb[i]); + queue->skb[i] = skb_get(skb); + + ptpdesc |= MVPP22_PTP_TIMESTAMPENTRYID(i); + + /* + * 3:0 - PTPAction + * 6:4 - PTPPacketFormat + * 7 - PTP_CF_WraparoundCheckEn + * 9:8 - IngressTimestampSeconds[1:0] + * 10 - Reserved + * 11 - MACTimestampingEn + * 17:12 - PTP_TimestampQueueEntryID[5:0] + * 18 - PTPTimestampQueueSelect + * 19 - UDPChecksumUpdateEn + * 27:20 - TimestampOffset + * PTP, NTPTransmit, OWAMP/TWAMP - L3 to PTP header + * NTPTs, Y.1731 - L3 to timestamp entry + * 35:28 - UDP Checksum Offset + * + * stored in tx descriptor bits 75:64 (11:0) and 191:168 (35:12) + */ + tx_desc->pp22.ptp_descriptor &= + cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW); + tx_desc->pp22.ptp_descriptor |= + cpu_to_le32(ptpdesc & MVPP22_PTP_DESC_MASK_LOW); + tx_desc->pp22.buf_dma_addr_ptp &= cpu_to_le64(~0xffffff0000000000ULL); + tx_desc->pp22.buf_dma_addr_ptp |= cpu_to_le64((ptpdesc >> 12) << 40); + + return true; +} + +/* Handle tx fragmentation processing */ +static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, + struct mvpp2_tx_queue *aggr_txq, + struct mvpp2_tx_queue *txq) +{ + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread); + struct mvpp2_tx_desc *tx_desc; + int i; + dma_addr_t buf_dma_addr; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + void *addr = skb_frag_address(frag); + + tx_desc = mvpp2_txq_next_desc_get(aggr_txq); + mvpp2_txdesc_clear_ptp(port, tx_desc); + mvpp2_txdesc_txq_set(port, tx_desc, txq->id); + mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag)); + + buf_dma_addr = dma_map_single(port->dev->dev.parent, addr, + skb_frag_size(frag), + DMA_TO_DEVICE); + if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) { + mvpp2_txq_desc_put(txq); + goto cleanup; + } + + mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); + + if (i == (skb_shinfo(skb)->nr_frags - 1)) { + /* Last descriptor */ + mvpp2_txdesc_cmd_set(port, tx_desc, + MVPP2_TXD_L_DESC); + mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB); + } else { + /* Descriptor in the middle: Not First, Not Last */ + mvpp2_txdesc_cmd_set(port, tx_desc, 0); + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); + } + } + + return 0; +cleanup: + /* Release all descriptors that were used to map fragments of + * this packet, as well as the corresponding DMA mappings + */ + for (i = i - 1; i >= 0; i--) { + tx_desc = txq->descs + i; + tx_desc_unmap_put(port, txq, tx_desc); + } + + return -ENOMEM; +} + +static inline void mvpp2_tso_put_hdr(struct sk_buff *skb, + struct net_device *dev, + struct mvpp2_tx_queue *txq, + struct mvpp2_tx_queue *aggr_txq, + struct mvpp2_txq_pcpu *txq_pcpu, + int hdr_sz) +{ + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); + dma_addr_t addr; + + mvpp2_txdesc_clear_ptp(port, tx_desc); + mvpp2_txdesc_txq_set(port, tx_desc, txq->id); + mvpp2_txdesc_size_set(port, tx_desc, hdr_sz); + + addr = txq_pcpu->tso_headers_dma + + txq_pcpu->txq_put_index * TSO_HEADER_SIZE; + mvpp2_txdesc_dma_addr_set(port, tx_desc, addr); + + mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) | + MVPP2_TXD_F_DESC | + MVPP2_TXD_PADDING_DISABLE); + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); +} + +static inline int mvpp2_tso_put_data(struct sk_buff *skb, + struct net_device *dev, struct tso_t *tso, + struct mvpp2_tx_queue *txq, + struct mvpp2_tx_queue *aggr_txq, + struct mvpp2_txq_pcpu *txq_pcpu, + int sz, bool left, bool last) +{ + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); + dma_addr_t buf_dma_addr; + + mvpp2_txdesc_clear_ptp(port, tx_desc); + mvpp2_txdesc_txq_set(port, tx_desc, txq->id); + mvpp2_txdesc_size_set(port, tx_desc, sz); + + buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { + mvpp2_txq_desc_put(txq); + return -ENOMEM; + } + + mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); + + if (!left) { + mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC); + if (last) { + mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB); + return 0; + } + } else { + mvpp2_txdesc_cmd_set(port, tx_desc, 0); + } + + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); + return 0; +} + +static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, + struct mvpp2_tx_queue *txq, + struct mvpp2_tx_queue *aggr_txq, + struct mvpp2_txq_pcpu *txq_pcpu) +{ + struct mvpp2_port *port = netdev_priv(dev); + int hdr_sz, i, len, descs = 0; + struct tso_t tso; + + /* Check number of available descriptors */ + if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) || + mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, + tso_count_descs(skb))) + return 0; + + hdr_sz = tso_start(skb, &tso); + + len = skb->len - hdr_sz; + while (len > 0) { + int left = min_t(int, skb_shinfo(skb)->gso_size, len); + char *hdr = txq_pcpu->tso_headers + + txq_pcpu->txq_put_index * TSO_HEADER_SIZE; + + len -= left; + descs++; + + tso_build_hdr(skb, hdr, &tso, left, len == 0); + mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz); + + while (left > 0) { + int sz = min_t(int, tso.size, left); + left -= sz; + descs++; + + if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq, + txq_pcpu, sz, left, len == 0)) + goto release; + tso_build_data(skb, &tso, sz); + } + } + + return descs; + +release: + for (i = descs - 1; i >= 0; i--) { + struct mvpp2_tx_desc *tx_desc = txq->descs + i; + tx_desc_unmap_put(port, txq, tx_desc); + } + return 0; +} + +/* Main tx processing */ +static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_tx_queue *txq, *aggr_txq; + struct mvpp2_txq_pcpu *txq_pcpu; + struct mvpp2_tx_desc *tx_desc; + dma_addr_t buf_dma_addr; + unsigned long flags = 0; + unsigned int thread; + int frags = 0; + u16 txq_id; + u32 tx_cmd; + + thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + + txq_id = skb_get_queue_mapping(skb); + txq = port->txqs[txq_id]; + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); + aggr_txq = &port->priv->aggr_txqs[thread]; + + if (test_bit(thread, &port->priv->lock_map)) + spin_lock_irqsave(&port->tx_lock[thread], flags); + + if (skb_is_gso(skb)) { + frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu); + goto out; + } + frags = skb_shinfo(skb)->nr_frags + 1; + + /* Check number of available descriptors */ + if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) || + mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) { + frags = 0; + goto out; + } + + /* Get a descriptor for the first part of the packet */ + tx_desc = mvpp2_txq_next_desc_get(aggr_txq); + if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) || + !mvpp2_tx_hw_tstamp(port, tx_desc, skb)) + mvpp2_txdesc_clear_ptp(port, tx_desc); + mvpp2_txdesc_txq_set(port, tx_desc, txq->id); + mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb)); + + buf_dma_addr = dma_map_single(dev->dev.parent, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { + mvpp2_txq_desc_put(txq); + frags = 0; + goto out; + } + + mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); + + tx_cmd = mvpp2_skb_tx_csum(port, skb); + + if (frags == 1) { + /* First and Last descriptor */ + tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; + mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); + mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB); + } else { + /* First but not Last */ + tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; + mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); + + /* Continue with other skb fragments */ + if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { + tx_desc_unmap_put(port, txq, tx_desc); + frags = 0; + } + } + +out: + if (frags > 0) { + struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread); + struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); + + txq_pcpu->reserved_num -= frags; + txq_pcpu->count += frags; + aggr_txq->count += frags; + + /* Enable transmit */ + wmb(); + mvpp2_aggr_txq_pend_desc_add(port, frags); + + if (txq_pcpu->count >= txq_pcpu->stop_threshold) + netif_tx_stop_queue(nq); + + u64_stats_update_begin(&stats->syncp); + stats->tx_packets++; + stats->tx_bytes += skb->len; + u64_stats_update_end(&stats->syncp); + } else { + dev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + } + + /* Finalize TX processing */ + if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) + mvpp2_txq_done(port, txq, txq_pcpu); + + /* Set the timer in case not all frags were processed */ + if (!port->has_tx_irqs && txq_pcpu->count <= frags && + txq_pcpu->count > 0) { + struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread); + + if (!port_pcpu->timer_scheduled) { + port_pcpu->timer_scheduled = true; + hrtimer_start(&port_pcpu->tx_done_timer, + MVPP2_TXDONE_HRTIMER_PERIOD_NS, + HRTIMER_MODE_REL_PINNED_SOFT); + } + } + + if (test_bit(thread, &port->priv->lock_map)) + spin_unlock_irqrestore(&port->tx_lock[thread], flags); + + return NETDEV_TX_OK; +} + +static inline void mvpp2_cause_error(struct net_device *dev, int cause) +{ + if (cause & MVPP2_CAUSE_FCS_ERR_MASK) + netdev_err(dev, "FCS error\n"); + if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK) + netdev_err(dev, "rx fifo overrun error\n"); + if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK) + netdev_err(dev, "tx fifo underrun error\n"); +} + +static int mvpp2_poll(struct napi_struct *napi, int budget) +{ + u32 cause_rx_tx, cause_rx, cause_tx, cause_misc; + int rx_done = 0; + struct mvpp2_port *port = netdev_priv(napi->dev); + struct mvpp2_queue_vector *qv; + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + + qv = container_of(napi, struct mvpp2_queue_vector, napi); + + /* Rx/Tx cause register + * + * Bits 0-15: each bit indicates received packets on the Rx queue + * (bit 0 is for Rx queue 0). + * + * Bits 16-23: each bit indicates transmitted packets on the Tx queue + * (bit 16 is for Tx queue 0). + * + * Each CPU has its own Rx/Tx cause register + */ + cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id, + MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); + + cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; + if (cause_misc) { + mvpp2_cause_error(port->dev, cause_misc); + + /* Clear the cause register */ + mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0); + mvpp2_thread_write(port->priv, thread, + MVPP2_ISR_RX_TX_CAUSE_REG(port->id), + cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); + } + + if (port->has_tx_irqs) { + cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; + if (cause_tx) { + cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; + mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); + } + } + + /* Process RX packets */ + cause_rx = cause_rx_tx & + MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); + cause_rx <<= qv->first_rxq; + cause_rx |= qv->pending_cause_rx; + while (cause_rx && budget > 0) { + int count; + struct mvpp2_rx_queue *rxq; + + rxq = mvpp2_get_rx_queue(port, cause_rx); + if (!rxq) + break; + + count = mvpp2_rx(port, napi, budget, rxq); + rx_done += count; + budget -= count; + if (budget > 0) { + /* Clear the bit associated to this Rx queue + * so that next iteration will continue from + * the next Rx queue. + */ + cause_rx &= ~(1 << rxq->logic_rxq); + } + } + + if (budget > 0) { + cause_rx = 0; + napi_complete_done(napi, rx_done); + + mvpp2_qvec_interrupt_enable(qv); + } + qv->pending_cause_rx = cause_rx; + return rx_done; +} + +static void mvpp22_mode_reconfigure(struct mvpp2_port *port) +{ + u32 ctrl3; + + /* Set the GMAC & XLG MAC in reset */ + mvpp2_mac_reset_assert(port); + + /* Set the MPCS and XPCS in reset */ + mvpp22_pcs_reset_assert(port); + + /* comphy reconfiguration */ + mvpp22_comphy_init(port); + + /* gop reconfiguration */ + mvpp22_gop_init(port); + + mvpp22_pcs_reset_deassert(port); + + if (mvpp2_port_supports_xlg(port)) { + ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG); + ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; + + if (mvpp2_is_xlg(port->phy_interface)) + ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G; + else + ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; + + writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG); + } + + if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(port->phy_interface)) + mvpp2_xlg_max_rx_size_set(port); + else + mvpp2_gmac_max_rx_size_set(port); +} + +/* Set hw internals when starting port */ +static void mvpp2_start_dev(struct mvpp2_port *port) +{ + int i; + + mvpp2_txp_max_tx_size_set(port); + + for (i = 0; i < port->nqvecs; i++) + napi_enable(&port->qvecs[i].napi); + + /* Enable interrupts on all threads */ + mvpp2_interrupts_enable(port); + + if (port->priv->hw_version == MVPP22) + mvpp22_mode_reconfigure(port); + + if (port->phylink) { + phylink_start(port->phylink); + } else { + mvpp2_acpi_start(port); + } + + netif_tx_start_all_queues(port->dev); + + clear_bit(0, &port->state); +} + +/* Set hw internals when stopping port */ +static void mvpp2_stop_dev(struct mvpp2_port *port) +{ + int i; + + set_bit(0, &port->state); + + /* Disable interrupts on all threads */ + mvpp2_interrupts_disable(port); + + for (i = 0; i < port->nqvecs; i++) + napi_disable(&port->qvecs[i].napi); + + if (port->phylink) + phylink_stop(port->phylink); + phy_power_off(port->comphy); +} + +static int mvpp2_check_ringparam_valid(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + u16 new_rx_pending = ring->rx_pending; + u16 new_tx_pending = ring->tx_pending; + + if (ring->rx_pending == 0 || ring->tx_pending == 0) + return -EINVAL; + + if (ring->rx_pending > MVPP2_MAX_RXD_MAX) + new_rx_pending = MVPP2_MAX_RXD_MAX; + else if (!IS_ALIGNED(ring->rx_pending, 16)) + new_rx_pending = ALIGN(ring->rx_pending, 16); + + if (ring->tx_pending > MVPP2_MAX_TXD_MAX) + new_tx_pending = MVPP2_MAX_TXD_MAX; + else if (!IS_ALIGNED(ring->tx_pending, 32)) + new_tx_pending = ALIGN(ring->tx_pending, 32); + + /* The Tx ring size cannot be smaller than the minimum number of + * descriptors needed for TSO. + */ + if (new_tx_pending < MVPP2_MAX_SKB_DESCS) + new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32); + + if (ring->rx_pending != new_rx_pending) { + netdev_info(dev, "illegal Rx ring size value %d, round to %d\n", + ring->rx_pending, new_rx_pending); + ring->rx_pending = new_rx_pending; + } + + if (ring->tx_pending != new_tx_pending) { + netdev_info(dev, "illegal Tx ring size value %d, round to %d\n", + ring->tx_pending, new_tx_pending); + ring->tx_pending = new_tx_pending; + } + + return 0; +} + +static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr) +{ + u32 mac_addr_l, mac_addr_m, mac_addr_h; + + mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG); + mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE); + mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH); + addr[0] = (mac_addr_h >> 24) & 0xFF; + addr[1] = (mac_addr_h >> 16) & 0xFF; + addr[2] = (mac_addr_h >> 8) & 0xFF; + addr[3] = mac_addr_h & 0xFF; + addr[4] = mac_addr_m & 0xFF; + addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF; +} + +static int mvpp2_irqs_init(struct mvpp2_port *port) +{ + int err, i; + + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *qv = port->qvecs + i; + + if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) { + qv->mask = kzalloc(cpumask_size(), GFP_KERNEL); + if (!qv->mask) { + err = -ENOMEM; + goto err; + } + + irq_set_status_flags(qv->irq, IRQ_NO_BALANCING); + } + + err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv); + if (err) + goto err; + + if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) { + unsigned int cpu; + + for_each_present_cpu(cpu) { + if (mvpp2_cpu_to_thread(port->priv, cpu) == + qv->sw_thread_id) + cpumask_set_cpu(cpu, qv->mask); + } + + irq_set_affinity_hint(qv->irq, qv->mask); + } + } + + return 0; +err: + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *qv = port->qvecs + i; + + irq_set_affinity_hint(qv->irq, NULL); + kfree(qv->mask); + qv->mask = NULL; + free_irq(qv->irq, qv); + } + + return err; +} + +static void mvpp2_irqs_deinit(struct mvpp2_port *port) +{ + int i; + + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *qv = port->qvecs + i; + + irq_set_affinity_hint(qv->irq, NULL); + kfree(qv->mask); + qv->mask = NULL; + irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING); + free_irq(qv->irq, qv); + } +} + +static bool mvpp22_rss_is_supported(void) +{ + return queue_mode == MVPP2_QDIST_MULTI_MODE; +} + +static int mvpp2_open(struct net_device *dev) +{ + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2 *priv = port->priv; + unsigned char mac_bcast[ETH_ALEN] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + bool valid = false; + int err; + + err = mvpp2_prs_mac_da_accept(port, mac_bcast, true); + if (err) { + netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n"); + return err; + } + err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true); + if (err) { + netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n"); + return err; + } + err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH); + if (err) { + netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n"); + return err; + } + err = mvpp2_prs_def_flow(port); + if (err) { + netdev_err(dev, "mvpp2_prs_def_flow failed\n"); + return err; + } + + /* Allocate the Rx/Tx queues */ + err = mvpp2_setup_rxqs(port); + if (err) { + netdev_err(port->dev, "cannot allocate Rx queues\n"); + return err; + } + + err = mvpp2_setup_txqs(port); + if (err) { + netdev_err(port->dev, "cannot allocate Tx queues\n"); + goto err_cleanup_rxqs; + } + + err = mvpp2_irqs_init(port); + if (err) { + netdev_err(port->dev, "cannot init IRQs\n"); + goto err_cleanup_txqs; + } + + /* Phylink isn't supported yet in ACPI mode */ + if (port->of_node) { + err = phylink_of_phy_connect(port->phylink, port->of_node, 0); + if (err) { + netdev_err(port->dev, "could not attach PHY (%d)\n", + err); + goto err_free_irq; + } + + valid = true; + } + + if (priv->hw_version == MVPP22 && port->port_irq) { + err = request_irq(port->port_irq, mvpp2_port_isr, 0, + dev->name, port); + if (err) { + netdev_err(port->dev, + "cannot request port link/ptp IRQ %d\n", + port->port_irq); + goto err_free_irq; + } + + mvpp22_gop_setup_irq(port); + + /* In default link is down */ + netif_carrier_off(port->dev); + + valid = true; + } else { + port->port_irq = 0; + } + + if (!valid) { + netdev_err(port->dev, + "invalid configuration: no dt or link IRQ"); + err = -ENOENT; + goto err_free_irq; + } + + /* Unmask interrupts on all CPUs */ + on_each_cpu(mvpp2_interrupts_unmask, port, 1); + mvpp2_shared_interrupt_mask_unmask(port, false); + + mvpp2_start_dev(port); + + /* Start hardware statistics gathering */ + queue_delayed_work(priv->stats_queue, &port->stats_work, + MVPP2_MIB_COUNTERS_STATS_DELAY); + + return 0; + +err_free_irq: + mvpp2_irqs_deinit(port); +err_cleanup_txqs: + mvpp2_cleanup_txqs(port); +err_cleanup_rxqs: + mvpp2_cleanup_rxqs(port); + return err; +} + +static int mvpp2_stop(struct net_device *dev) +{ + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_port_pcpu *port_pcpu; + unsigned int thread; + + mvpp2_stop_dev(port); + + /* Mask interrupts on all threads */ + on_each_cpu(mvpp2_interrupts_mask, port, 1); + mvpp2_shared_interrupt_mask_unmask(port, true); + + if (port->phylink) + phylink_disconnect_phy(port->phylink); + if (port->port_irq) + free_irq(port->port_irq, port); + + mvpp2_irqs_deinit(port); + if (!port->has_tx_irqs) { + for (thread = 0; thread < port->priv->nthreads; thread++) { + port_pcpu = per_cpu_ptr(port->pcpu, thread); + + hrtimer_cancel(&port_pcpu->tx_done_timer); + port_pcpu->timer_scheduled = false; + } + } + mvpp2_cleanup_rxqs(port); + mvpp2_cleanup_txqs(port); + + cancel_delayed_work_sync(&port->stats_work); + + mvpp2_mac_reset_assert(port); + mvpp22_pcs_reset_assert(port); + + return 0; +} + +static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port, + struct netdev_hw_addr_list *list) +{ + struct netdev_hw_addr *ha; + int ret; + + netdev_hw_addr_list_for_each(ha, list) { + ret = mvpp2_prs_mac_da_accept(port, ha->addr, true); + if (ret) + return ret; + } + + return 0; +} + +static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable) +{ + if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) + mvpp2_prs_vid_enable_filtering(port); + else + mvpp2_prs_vid_disable_filtering(port); + + mvpp2_prs_mac_promisc_set(port->priv, port->id, + MVPP2_PRS_L2_UNI_CAST, enable); + + mvpp2_prs_mac_promisc_set(port->priv, port->id, + MVPP2_PRS_L2_MULTI_CAST, enable); +} + +static void mvpp2_set_rx_mode(struct net_device *dev) +{ + struct mvpp2_port *port = netdev_priv(dev); + + /* Clear the whole UC and MC list */ + mvpp2_prs_mac_del_all(port); + + if (dev->flags & IFF_PROMISC) { + mvpp2_set_rx_promisc(port, true); + return; + } + + mvpp2_set_rx_promisc(port, false); + + if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX || + mvpp2_prs_mac_da_accept_list(port, &dev->uc)) + mvpp2_prs_mac_promisc_set(port->priv, port->id, + MVPP2_PRS_L2_UNI_CAST, true); + + if (dev->flags & IFF_ALLMULTI) { + mvpp2_prs_mac_promisc_set(port->priv, port->id, + MVPP2_PRS_L2_MULTI_CAST, true); + return; + } + + if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX || + mvpp2_prs_mac_da_accept_list(port, &dev->mc)) + mvpp2_prs_mac_promisc_set(port->priv, port->id, + MVPP2_PRS_L2_MULTI_CAST, true); +} + +static int mvpp2_set_mac_address(struct net_device *dev, void *p) +{ + const struct sockaddr *addr = p; + int err; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + err = mvpp2_prs_update_mac_da(dev, addr->sa_data); + if (err) { + /* Reconfigure parser accept the original MAC address */ + mvpp2_prs_update_mac_da(dev, dev->dev_addr); + netdev_err(dev, "failed to change MAC address\n"); + } + return err; +} + +/* Shut down all the ports, reconfigure the pools as percpu or shared, + * then bring up again all ports. + */ +static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu) +{ + int numbufs = MVPP2_BM_POOLS_NUM, i; + struct mvpp2_port *port = NULL; + bool status[MVPP2_MAX_PORTS]; + + for (i = 0; i < priv->port_count; i++) { + port = priv->port_list[i]; + status[i] = netif_running(port->dev); + if (status[i]) + mvpp2_stop(port->dev); + } + + /* nrxqs is the same for all ports */ + if (priv->percpu_pools) + numbufs = port->nrxqs * 2; + + for (i = 0; i < numbufs; i++) + mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]); + + devm_kfree(port->dev->dev.parent, priv->bm_pools); + priv->percpu_pools = percpu; + mvpp2_bm_init(port->dev->dev.parent, priv); + + for (i = 0; i < priv->port_count; i++) { + port = priv->port_list[i]; + mvpp2_swf_bm_pool_init(port); + if (status[i]) + mvpp2_open(port->dev); + } + + return 0; +} + +static int mvpp2_change_mtu(struct net_device *dev, int mtu) +{ + struct mvpp2_port *port = netdev_priv(dev); + bool running = netif_running(dev); + struct mvpp2 *priv = port->priv; + int err; + + if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) { + netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu, + ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8)); + mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); + } + + if (port->xdp_prog && mtu > MVPP2_MAX_RX_BUF_SIZE) { + netdev_err(dev, "Illegal MTU value %d (> %d) for XDP mode\n", + mtu, (int)MVPP2_MAX_RX_BUF_SIZE); + return -EINVAL; + } + + if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) { + if (priv->percpu_pools) { + netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu); + mvpp2_bm_switch_buffers(priv, false); + } + } else { + bool jumbo = false; + int i; + + for (i = 0; i < priv->port_count; i++) + if (priv->port_list[i] != port && + MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) > + MVPP2_BM_LONG_PKT_SIZE) { + jumbo = true; + break; + } + + /* No port is using jumbo frames */ + if (!jumbo) { + dev_info(port->dev->dev.parent, + "all ports have a low MTU, switching to per-cpu buffers"); + mvpp2_bm_switch_buffers(priv, true); + } + } + + if (running) + mvpp2_stop_dev(port); + + err = mvpp2_bm_update_mtu(dev, mtu); + if (err) { + netdev_err(dev, "failed to change MTU\n"); + /* Reconfigure BM to the original MTU */ + mvpp2_bm_update_mtu(dev, dev->mtu); + } else { + port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); + } + + if (running) { + mvpp2_start_dev(port); + mvpp2_egress_enable(port); + mvpp2_ingress_enable(port); + } + + return err; +} + +static int mvpp2_check_pagepool_dma(struct mvpp2_port *port) +{ + enum dma_data_direction dma_dir = DMA_FROM_DEVICE; + struct mvpp2 *priv = port->priv; + int err = -1, i; + + if (!priv->percpu_pools) + return err; + + if (!priv->page_pool[0]) + return -ENOMEM; + + for (i = 0; i < priv->port_count; i++) { + port = priv->port_list[i]; + if (port->xdp_prog) { + dma_dir = DMA_BIDIRECTIONAL; + break; + } + } + + /* All pools are equal in terms of DMA direction */ + if (priv->page_pool[0]->p.dma_dir != dma_dir) + err = mvpp2_bm_switch_buffers(priv, true); + + return err; +} + +static void +mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct mvpp2_port *port = netdev_priv(dev); + unsigned int start; + unsigned int cpu; + + for_each_possible_cpu(cpu) { + struct mvpp2_pcpu_stats *cpu_stats; + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + + cpu_stats = per_cpu_ptr(port->stats, cpu); + do { + start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); + rx_packets = cpu_stats->rx_packets; + rx_bytes = cpu_stats->rx_bytes; + tx_packets = cpu_stats->tx_packets; + tx_bytes = cpu_stats->tx_bytes; + } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); + + stats->rx_packets += rx_packets; + stats->rx_bytes += rx_bytes; + stats->tx_packets += tx_packets; + stats->tx_bytes += tx_bytes; + } + + stats->rx_errors = dev->stats.rx_errors; + stats->rx_dropped = dev->stats.rx_dropped; + stats->tx_dropped = dev->stats.tx_dropped; +} + +static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr) +{ + struct hwtstamp_config config; + void __iomem *ptp; + u32 gcr, int_mask; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + if (config.flags) + return -EINVAL; + + if (config.tx_type != HWTSTAMP_TX_OFF && + config.tx_type != HWTSTAMP_TX_ON) + return -ERANGE; + + ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); + + int_mask = gcr = 0; + if (config.tx_type != HWTSTAMP_TX_OFF) { + gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET; + int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 | + MVPP22_PTP_INT_MASK_QUEUE0; + } + + /* It seems we must also release the TX reset when enabling the TSU */ + if (config.rx_filter != HWTSTAMP_FILTER_NONE) + gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET | + MVPP22_PTP_GCR_TX_RESET; + + if (gcr & MVPP22_PTP_GCR_TSU_ENABLE) + mvpp22_tai_start(port->priv->tai); + + if (config.rx_filter != HWTSTAMP_FILTER_NONE) { + config.rx_filter = HWTSTAMP_FILTER_ALL; + mvpp2_modify(ptp + MVPP22_PTP_GCR, + MVPP22_PTP_GCR_RX_RESET | + MVPP22_PTP_GCR_TX_RESET | + MVPP22_PTP_GCR_TSU_ENABLE, gcr); + port->rx_hwtstamp = true; + } else { + port->rx_hwtstamp = false; + mvpp2_modify(ptp + MVPP22_PTP_GCR, + MVPP22_PTP_GCR_RX_RESET | + MVPP22_PTP_GCR_TX_RESET | + MVPP22_PTP_GCR_TSU_ENABLE, gcr); + } + + mvpp2_modify(ptp + MVPP22_PTP_INT_MASK, + MVPP22_PTP_INT_MASK_QUEUE1 | + MVPP22_PTP_INT_MASK_QUEUE0, int_mask); + + if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE)) + mvpp22_tai_stop(port->priv->tai); + + port->tx_hwtstamp_type = config.tx_type; + + if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) + return -EFAULT; + + return 0; +} + +static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr) +{ + struct hwtstamp_config config; + + memset(&config, 0, sizeof(config)); + + config.tx_type = port->tx_hwtstamp_type; + config.rx_filter = port->rx_hwtstamp ? + HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; + + if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) + return -EFAULT; + + return 0; +} + +static int mvpp2_ethtool_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->hwtstamp) + return -EOPNOTSUPP; + + info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai); + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->tx_types = BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_ALL); + + return 0; +} + +static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mvpp2_port *port = netdev_priv(dev); + + switch (cmd) { + case SIOCSHWTSTAMP: + if (port->hwtstamp) + return mvpp2_set_ts_config(port, ifr); + break; + + case SIOCGHWTSTAMP: + if (port->hwtstamp) + return mvpp2_get_ts_config(port, ifr); + break; + } + + if (!port->phylink) + return -ENOTSUPP; + + return phylink_mii_ioctl(port->phylink, ifr, cmd); +} + +static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct mvpp2_port *port = netdev_priv(dev); + int ret; + + ret = mvpp2_prs_vid_entry_add(port, vid); + if (ret) + netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n", + MVPP2_PRS_VLAN_FILT_MAX - 1); + return ret; +} + +static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct mvpp2_port *port = netdev_priv(dev); + + mvpp2_prs_vid_entry_remove(port, vid); + return 0; +} + +static int mvpp2_set_features(struct net_device *dev, + netdev_features_t features) +{ + netdev_features_t changed = dev->features ^ features; + struct mvpp2_port *port = netdev_priv(dev); + + if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { + mvpp2_prs_vid_enable_filtering(port); + } else { + /* Invalidate all registered VID filters for this + * port + */ + mvpp2_prs_vid_remove_all(port); + + mvpp2_prs_vid_disable_filtering(port); + } + } + + if (changed & NETIF_F_RXHASH) { + if (features & NETIF_F_RXHASH) + mvpp22_port_rss_enable(port); + else + mvpp22_port_rss_disable(port); + } + + return 0; +} + +static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf) +{ + struct bpf_prog *prog = bpf->prog, *old_prog; + bool running = netif_running(port->dev); + bool reset = !prog != !port->xdp_prog; + + if (port->dev->mtu > MVPP2_MAX_RX_BUF_SIZE) { + NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP"); + return -EOPNOTSUPP; + } + + if (!port->priv->percpu_pools) { + NL_SET_ERR_MSG_MOD(bpf->extack, "Per CPU Pools required for XDP"); + return -EOPNOTSUPP; + } + + if (port->ntxqs < num_possible_cpus() * 2) { + NL_SET_ERR_MSG_MOD(bpf->extack, "XDP_TX needs two TX queues per CPU"); + return -EOPNOTSUPP; + } + + /* device is up and bpf is added/removed, must setup the RX queues */ + if (running && reset) + mvpp2_stop(port->dev); + + old_prog = xchg(&port->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + /* bpf is just replaced, RXQ and MTU are already setup */ + if (!reset) + return 0; + + /* device was up, restore the link */ + if (running) + mvpp2_open(port->dev); + + /* Check Page Pool DMA Direction */ + mvpp2_check_pagepool_dma(port); + + return 0; +} + +static int mvpp2_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + struct mvpp2_port *port = netdev_priv(dev); + + switch (xdp->command) { + case XDP_SETUP_PROG: + return mvpp2_xdp_setup(port, xdp); + default: + return -EINVAL; + } +} + +/* Ethtool methods */ + +static int mvpp2_ethtool_nway_reset(struct net_device *dev) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phylink) + return -ENOTSUPP; + + return phylink_ethtool_nway_reset(port->phylink); +} + +/* Set interrupt coalescing for ethtools */ +static int mvpp2_ethtool_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *c) +{ + struct mvpp2_port *port = netdev_priv(dev); + int queue; + + for (queue = 0; queue < port->nrxqs; queue++) { + struct mvpp2_rx_queue *rxq = port->rxqs[queue]; + + rxq->time_coal = c->rx_coalesce_usecs; + rxq->pkts_coal = c->rx_max_coalesced_frames; + mvpp2_rx_pkts_coal_set(port, rxq); + mvpp2_rx_time_coal_set(port, rxq); + } + + if (port->has_tx_irqs) { + port->tx_time_coal = c->tx_coalesce_usecs; + mvpp2_tx_time_coal_set(port); + } + + for (queue = 0; queue < port->ntxqs; queue++) { + struct mvpp2_tx_queue *txq = port->txqs[queue]; + + txq->done_pkts_coal = c->tx_max_coalesced_frames; + + if (port->has_tx_irqs) + mvpp2_tx_pkts_coal_set(port, txq); + } + + return 0; +} + +/* get coalescing for ethtools */ +static int mvpp2_ethtool_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *c) +{ + struct mvpp2_port *port = netdev_priv(dev); + + c->rx_coalesce_usecs = port->rxqs[0]->time_coal; + c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal; + c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal; + c->tx_coalesce_usecs = port->tx_time_coal; + return 0; +} + +static void mvpp2_ethtool_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME, + sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION, + sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, dev_name(&dev->dev), + sizeof(drvinfo->bus_info)); +} + +static void mvpp2_ethtool_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct mvpp2_port *port = netdev_priv(dev); + + ring->rx_max_pending = MVPP2_MAX_RXD_MAX; + ring->tx_max_pending = MVPP2_MAX_TXD_MAX; + ring->rx_pending = port->rx_ring_size; + ring->tx_pending = port->tx_ring_size; +} + +static int mvpp2_ethtool_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct mvpp2_port *port = netdev_priv(dev); + u16 prev_rx_ring_size = port->rx_ring_size; + u16 prev_tx_ring_size = port->tx_ring_size; + int err; + + err = mvpp2_check_ringparam_valid(dev, ring); + if (err) + return err; + + if (!netif_running(dev)) { + port->rx_ring_size = ring->rx_pending; + port->tx_ring_size = ring->tx_pending; + return 0; + } + + /* The interface is running, so we have to force a + * reallocation of the queues + */ + mvpp2_stop_dev(port); + mvpp2_cleanup_rxqs(port); + mvpp2_cleanup_txqs(port); + + port->rx_ring_size = ring->rx_pending; + port->tx_ring_size = ring->tx_pending; + + err = mvpp2_setup_rxqs(port); + if (err) { + /* Reallocate Rx queues with the original ring size */ + port->rx_ring_size = prev_rx_ring_size; + ring->rx_pending = prev_rx_ring_size; + err = mvpp2_setup_rxqs(port); + if (err) + goto err_out; + } + err = mvpp2_setup_txqs(port); + if (err) { + /* Reallocate Tx queues with the original ring size */ + port->tx_ring_size = prev_tx_ring_size; + ring->tx_pending = prev_tx_ring_size; + err = mvpp2_setup_txqs(port); + if (err) + goto err_clean_rxqs; + } + + mvpp2_start_dev(port); + mvpp2_egress_enable(port); + mvpp2_ingress_enable(port); + + return 0; + +err_clean_rxqs: + mvpp2_cleanup_rxqs(port); +err_out: + netdev_err(dev, "failed to change ring parameters"); + return err; +} + +static void mvpp2_ethtool_get_pause_param(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phylink) + return; + + phylink_ethtool_get_pauseparam(port->phylink, pause); +} + +static int mvpp2_ethtool_set_pause_param(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phylink) + return -ENOTSUPP; + + return phylink_ethtool_set_pauseparam(port->phylink, pause); +} + +static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phylink) + return -ENOTSUPP; + + return phylink_ethtool_ksettings_get(port->phylink, cmd); +} + +static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phylink) + return -ENOTSUPP; + + return phylink_ethtool_ksettings_set(port->phylink, cmd); +} + +static int mvpp2_ethtool_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *info, u32 *rules) +{ + struct mvpp2_port *port = netdev_priv(dev); + int ret = 0, i, loc = 0; + + if (!mvpp22_rss_is_supported()) + return -EOPNOTSUPP; + + switch (info->cmd) { + case ETHTOOL_GRXFH: + ret = mvpp2_ethtool_rxfh_get(port, info); + break; + case ETHTOOL_GRXRINGS: + info->data = port->nrxqs; + break; + case ETHTOOL_GRXCLSRLCNT: + info->rule_cnt = port->n_rfs_rules; + break; + case ETHTOOL_GRXCLSRULE: + ret = mvpp2_ethtool_cls_rule_get(port, info); + break; + case ETHTOOL_GRXCLSRLALL: + for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) { + if (loc == info->rule_cnt) { + ret = -EMSGSIZE; + break; + } + + if (port->rfs_rules[i]) + rules[loc++] = i; + } + break; + default: + return -ENOTSUPP; + } + + return ret; +} + +static int mvpp2_ethtool_set_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *info) +{ + struct mvpp2_port *port = netdev_priv(dev); + int ret = 0; + + if (!mvpp22_rss_is_supported()) + return -EOPNOTSUPP; + + switch (info->cmd) { + case ETHTOOL_SRXFH: + ret = mvpp2_ethtool_rxfh_set(port, info); + break; + case ETHTOOL_SRXCLSRLINS: + ret = mvpp2_ethtool_cls_rule_ins(port, info); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = mvpp2_ethtool_cls_rule_del(port, info); + break; + default: + return -EOPNOTSUPP; + } + return ret; +} + +static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev) +{ + return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES : 0; +} + +static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct mvpp2_port *port = netdev_priv(dev); + int ret = 0; + + if (!mvpp22_rss_is_supported()) + return -EOPNOTSUPP; + + if (indir) + ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir); + + if (hfunc) + *hfunc = ETH_RSS_HASH_CRC32; + + return ret; +} + +static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct mvpp2_port *port = netdev_priv(dev); + int ret = 0; + + if (!mvpp22_rss_is_supported()) + return -EOPNOTSUPP; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32) + return -EOPNOTSUPP; + + if (key) + return -EOPNOTSUPP; + + if (indir) + ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir); + + return ret; +} + +static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir, + u8 *key, u8 *hfunc, u32 rss_context) +{ + struct mvpp2_port *port = netdev_priv(dev); + int ret = 0; + + if (!mvpp22_rss_is_supported()) + return -EOPNOTSUPP; + if (rss_context >= MVPP22_N_RSS_TABLES) + return -EINVAL; + + if (hfunc) + *hfunc = ETH_RSS_HASH_CRC32; + + if (indir) + ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir); + + return ret; +} + +static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev, + const u32 *indir, const u8 *key, + const u8 hfunc, u32 *rss_context, + bool delete) +{ + struct mvpp2_port *port = netdev_priv(dev); + int ret; + + if (!mvpp22_rss_is_supported()) + return -EOPNOTSUPP; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32) + return -EOPNOTSUPP; + + if (key) + return -EOPNOTSUPP; + + if (delete) + return mvpp22_port_rss_ctx_delete(port, *rss_context); + + if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) { + ret = mvpp22_port_rss_ctx_create(port, rss_context); + if (ret) + return ret; + } + + return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir); +} +/* Device ops */ + +static const struct net_device_ops mvpp2_netdev_ops = { + .ndo_open = mvpp2_open, + .ndo_stop = mvpp2_stop, + .ndo_start_xmit = mvpp2_tx, + .ndo_set_rx_mode = mvpp2_set_rx_mode, + .ndo_set_mac_address = mvpp2_set_mac_address, + .ndo_change_mtu = mvpp2_change_mtu, + .ndo_get_stats64 = mvpp2_get_stats64, + .ndo_do_ioctl = mvpp2_ioctl, + .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid, + .ndo_set_features = mvpp2_set_features, + .ndo_bpf = mvpp2_xdp, + .ndo_xdp_xmit = mvpp2_xdp_xmit, +}; + +static const struct ethtool_ops mvpp2_eth_tool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES, + .nway_reset = mvpp2_ethtool_nway_reset, + .get_link = ethtool_op_get_link, + .get_ts_info = mvpp2_ethtool_get_ts_info, + .set_coalesce = mvpp2_ethtool_set_coalesce, + .get_coalesce = mvpp2_ethtool_get_coalesce, + .get_drvinfo = mvpp2_ethtool_get_drvinfo, + .get_ringparam = mvpp2_ethtool_get_ringparam, + .set_ringparam = mvpp2_ethtool_set_ringparam, + .get_strings = mvpp2_ethtool_get_strings, + .get_ethtool_stats = mvpp2_ethtool_get_stats, + .get_sset_count = mvpp2_ethtool_get_sset_count, + .get_pauseparam = mvpp2_ethtool_get_pause_param, + .set_pauseparam = mvpp2_ethtool_set_pause_param, + .get_link_ksettings = mvpp2_ethtool_get_link_ksettings, + .set_link_ksettings = mvpp2_ethtool_set_link_ksettings, + .get_rxnfc = mvpp2_ethtool_get_rxnfc, + .set_rxnfc = mvpp2_ethtool_set_rxnfc, + .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size, + .get_rxfh = mvpp2_ethtool_get_rxfh, + .set_rxfh = mvpp2_ethtool_set_rxfh, + .get_rxfh_context = mvpp2_ethtool_get_rxfh_context, + .set_rxfh_context = mvpp2_ethtool_set_rxfh_context, +}; + +/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that + * had a single IRQ defined per-port. + */ +static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port, + struct device_node *port_node) +{ + struct mvpp2_queue_vector *v = &port->qvecs[0]; + + v->first_rxq = 0; + v->nrxqs = port->nrxqs; + v->type = MVPP2_QUEUE_VECTOR_SHARED; + v->sw_thread_id = 0; + v->sw_thread_mask = *cpumask_bits(cpu_online_mask); + v->port = port; + v->irq = irq_of_parse_and_map(port_node, 0); + if (v->irq <= 0) + return -EINVAL; + netif_napi_add(port->dev, &v->napi, mvpp2_poll, + NAPI_POLL_WEIGHT); + + port->nqvecs = 1; + + return 0; +} + +static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, + struct device_node *port_node) +{ + struct mvpp2 *priv = port->priv; + struct mvpp2_queue_vector *v; + int i, ret; + + switch (queue_mode) { + case MVPP2_QDIST_SINGLE_MODE: + port->nqvecs = priv->nthreads + 1; + break; + case MVPP2_QDIST_MULTI_MODE: + port->nqvecs = priv->nthreads; + break; + } + + for (i = 0; i < port->nqvecs; i++) { + char irqname[16]; + + v = port->qvecs + i; + + v->port = port; + v->type = MVPP2_QUEUE_VECTOR_PRIVATE; + v->sw_thread_id = i; + v->sw_thread_mask = BIT(i); + + if (port->flags & MVPP2_F_DT_COMPAT) + snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); + else + snprintf(irqname, sizeof(irqname), "hif%d", i); + + if (queue_mode == MVPP2_QDIST_MULTI_MODE) { + v->first_rxq = i; + v->nrxqs = 1; + } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE && + i == (port->nqvecs - 1)) { + v->first_rxq = 0; + v->nrxqs = port->nrxqs; + v->type = MVPP2_QUEUE_VECTOR_SHARED; + + if (port->flags & MVPP2_F_DT_COMPAT) + strncpy(irqname, "rx-shared", sizeof(irqname)); + } + + if (port_node) + v->irq = of_irq_get_byname(port_node, irqname); + else + v->irq = fwnode_irq_get(port->fwnode, i); + if (v->irq <= 0) { + ret = -EINVAL; + goto err; + } + + netif_napi_add(port->dev, &v->napi, mvpp2_poll, + NAPI_POLL_WEIGHT); + } + + return 0; + +err: + for (i = 0; i < port->nqvecs; i++) + irq_dispose_mapping(port->qvecs[i].irq); + return ret; +} + +static int mvpp2_queue_vectors_init(struct mvpp2_port *port, + struct device_node *port_node) +{ + if (port->has_tx_irqs) + return mvpp2_multi_queue_vectors_init(port, port_node); + else + return mvpp2_simple_queue_vectors_init(port, port_node); +} + +static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port) +{ + int i; + + for (i = 0; i < port->nqvecs; i++) + irq_dispose_mapping(port->qvecs[i].irq); +} + +/* Configure Rx queue group interrupt for this port */ +static void mvpp2_rx_irqs_setup(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + u32 val; + int i; + + if (priv->hw_version == MVPP21) { + mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), + port->nrxqs); + return; + } + + /* Handle the more complicated PPv2.2 case */ + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *qv = port->qvecs + i; + + if (!qv->nrxqs) + continue; + + val = qv->sw_thread_id; + val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET; + mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); + + val = qv->first_rxq; + val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET; + mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); + } +} + +/* Initialize port HW */ +static int mvpp2_port_init(struct mvpp2_port *port) +{ + struct device *dev = port->dev->dev.parent; + struct mvpp2 *priv = port->priv; + struct mvpp2_txq_pcpu *txq_pcpu; + unsigned int thread; + int queue, err, val; + + /* Checks for hardware constraints */ + if (port->first_rxq + port->nrxqs > + MVPP2_MAX_PORTS * priv->max_port_rxqs) + return -EINVAL; + + if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ) + return -EINVAL; + + /* Disable port */ + mvpp2_egress_disable(port); + mvpp2_port_disable(port); + + if (mvpp2_is_xlg(port->phy_interface)) { + val = readl(port->base + MVPP22_XLG_CTRL0_REG); + val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS; + val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN; + writel(val, port->base + MVPP22_XLG_CTRL0_REG); + } else { + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + val &= ~MVPP2_GMAC_FORCE_LINK_PASS; + val |= MVPP2_GMAC_FORCE_LINK_DOWN; + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + } + + port->tx_time_coal = MVPP2_TXDONE_COAL_USEC; + + port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs), + GFP_KERNEL); + if (!port->txqs) + return -ENOMEM; + + /* Associate physical Tx queues to this port and initialize. + * The mapping is predefined. + */ + for (queue = 0; queue < port->ntxqs; queue++) { + int queue_phy_id = mvpp2_txq_phys(port->id, queue); + struct mvpp2_tx_queue *txq; + + txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); + if (!txq) { + err = -ENOMEM; + goto err_free_percpu; + } + + txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu); + if (!txq->pcpu) { + err = -ENOMEM; + goto err_free_percpu; + } + + txq->id = queue_phy_id; + txq->log_id = queue; + txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; + for (thread = 0; thread < priv->nthreads; thread++) { + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); + txq_pcpu->thread = thread; + } + + port->txqs[queue] = txq; + } + + port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs), + GFP_KERNEL); + if (!port->rxqs) { + err = -ENOMEM; + goto err_free_percpu; + } + + /* Allocate and initialize Rx queue for this port */ + for (queue = 0; queue < port->nrxqs; queue++) { + struct mvpp2_rx_queue *rxq; + + /* Map physical Rx queue to port's logical Rx queue */ + rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); + if (!rxq) { + err = -ENOMEM; + goto err_free_percpu; + } + /* Map this Rx queue to a physical queue */ + rxq->id = port->first_rxq + queue; + rxq->port = port->id; + rxq->logic_rxq = queue; + + port->rxqs[queue] = rxq; + } + + mvpp2_rx_irqs_setup(port); + + /* Create Rx descriptor rings */ + for (queue = 0; queue < port->nrxqs; queue++) { + struct mvpp2_rx_queue *rxq = port->rxqs[queue]; + + rxq->size = port->rx_ring_size; + rxq->pkts_coal = MVPP2_RX_COAL_PKTS; + rxq->time_coal = MVPP2_RX_COAL_USEC; + } + + mvpp2_ingress_disable(port); + + /* Port default configuration */ + mvpp2_defaults_set(port); + + /* Port's classifier configuration */ + mvpp2_cls_oversize_rxq_set(port); + mvpp2_cls_port_config(port); + + if (mvpp22_rss_is_supported()) + mvpp22_port_rss_init(port); + + /* Provide an initial Rx packet size */ + port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu); + + /* Initialize pools for swf */ + err = mvpp2_swf_bm_pool_init(port); + if (err) + goto err_free_percpu; + + /* Clear all port stats */ + mvpp2_read_stats(port); + memset(port->ethtool_stats, 0, + MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64)); + + return 0; + +err_free_percpu: + for (queue = 0; queue < port->ntxqs; queue++) { + if (!port->txqs[queue]) + continue; + free_percpu(port->txqs[queue]->pcpu); + } + return err; +} + +static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node, + unsigned long *flags) +{ + char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2", + "tx-cpu3" }; + int i; + + for (i = 0; i < 5; i++) + if (of_property_match_string(port_node, "interrupt-names", + irqs[i]) < 0) + return false; + + *flags |= MVPP2_F_DT_COMPAT; + return true; +} + +/* Checks if the port dt description has the required Tx interrupts: + * - PPv2.1: there are no such interrupts. + * - PPv2.2: + * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3] + * - The new ones have: "hifX" with X in [0..8] + * + * All those variants are supported to keep the backward compatibility. + */ +static bool mvpp2_port_has_irqs(struct mvpp2 *priv, + struct device_node *port_node, + unsigned long *flags) +{ + char name[5]; + int i; + + /* ACPI */ + if (!port_node) + return true; + + if (priv->hw_version == MVPP21) + return false; + + if (mvpp22_port_has_legacy_tx_irqs(port_node, flags)) + return true; + + for (i = 0; i < MVPP2_MAX_THREADS; i++) { + snprintf(name, 5, "hif%d", i); + if (of_property_match_string(port_node, "interrupt-names", + name) < 0) + return false; + } + + return true; +} + +static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv, + struct fwnode_handle *fwnode, + char **mac_from) +{ + struct mvpp2_port *port = netdev_priv(dev); + char hw_mac_addr[ETH_ALEN] = {0}; + char fw_mac_addr[ETH_ALEN]; + + if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) { + *mac_from = "firmware node"; + ether_addr_copy(dev->dev_addr, fw_mac_addr); + return; + } + + if (priv->hw_version == MVPP21) { + mvpp21_get_mac_address(port, hw_mac_addr); + if (is_valid_ether_addr(hw_mac_addr)) { + *mac_from = "hardware"; + ether_addr_copy(dev->dev_addr, hw_mac_addr); + return; + } + } + + *mac_from = "random"; + eth_hw_addr_random(dev); +} + +static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config) +{ + return container_of(config, struct mvpp2_port, phylink_config); +} + +static struct mvpp2_port *mvpp2_pcs_to_port(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct mvpp2_port, phylink_pcs); +} + +static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) +{ + struct mvpp2_port *port = mvpp2_pcs_to_port(pcs); + u32 val; + + state->speed = SPEED_10000; + state->duplex = 1; + state->an_complete = 1; + + val = readl(port->base + MVPP22_XLG_STATUS); + state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP); + + state->pause = 0; + val = readl(port->base + MVPP22_XLG_CTRL0_REG); + if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN) + state->pause |= MLO_PAUSE_TX; + if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN) + state->pause |= MLO_PAUSE_RX; +} + +static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs, + unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + return 0; +} + +static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = { + .pcs_get_state = mvpp2_xlg_pcs_get_state, + .pcs_config = mvpp2_xlg_pcs_config, +}; + +static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) +{ + struct mvpp2_port *port = mvpp2_pcs_to_port(pcs); + u32 val; + + val = readl(port->base + MVPP2_GMAC_STATUS0); + + state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE); + state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP); + state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX); + + switch (port->phy_interface) { + case PHY_INTERFACE_MODE_1000BASEX: + state->speed = SPEED_1000; + break; + case PHY_INTERFACE_MODE_2500BASEX: + state->speed = SPEED_2500; + break; + default: + if (val & MVPP2_GMAC_STATUS0_GMII_SPEED) + state->speed = SPEED_1000; + else if (val & MVPP2_GMAC_STATUS0_MII_SPEED) + state->speed = SPEED_100; + else + state->speed = SPEED_10; + } + + state->pause = 0; + if (val & MVPP2_GMAC_STATUS0_RX_PAUSE) + state->pause |= MLO_PAUSE_RX; + if (val & MVPP2_GMAC_STATUS0_TX_PAUSE) + state->pause |= MLO_PAUSE_TX; +} + +static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + struct mvpp2_port *port = mvpp2_pcs_to_port(pcs); + u32 mask, val, an, old_an, changed; + + mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | + MVPP2_GMAC_IN_BAND_AUTONEG | + MVPP2_GMAC_AN_SPEED_EN | + MVPP2_GMAC_FLOW_CTRL_AUTONEG | + MVPP2_GMAC_AN_DUPLEX_EN; + + if (phylink_autoneg_inband(mode)) { + mask |= MVPP2_GMAC_CONFIG_MII_SPEED | + MVPP2_GMAC_CONFIG_GMII_SPEED | + MVPP2_GMAC_CONFIG_FULL_DUPLEX; + val = MVPP2_GMAC_IN_BAND_AUTONEG; + + if (interface == PHY_INTERFACE_MODE_SGMII) { + /* SGMII mode receives the speed and duplex from PHY */ + val |= MVPP2_GMAC_AN_SPEED_EN | + MVPP2_GMAC_AN_DUPLEX_EN; + } else { + /* 802.3z mode has fixed speed and duplex */ + val |= MVPP2_GMAC_CONFIG_GMII_SPEED | + MVPP2_GMAC_CONFIG_FULL_DUPLEX; + + /* The FLOW_CTRL_AUTONEG bit selects either the hardware + * automatically or the bits in MVPP22_GMAC_CTRL_4_REG + * manually controls the GMAC pause modes. + */ + if (permit_pause_to_mac) + val |= MVPP2_GMAC_FLOW_CTRL_AUTONEG; + + /* Configure advertisement bits */ + mask |= MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN; + if (phylink_test(advertising, Pause)) + val |= MVPP2_GMAC_FC_ADV_EN; + if (phylink_test(advertising, Asym_Pause)) + val |= MVPP2_GMAC_FC_ADV_ASM_EN; + } + } else { + val = 0; + } + + old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + an = (an & ~mask) | val; + changed = an ^ old_an; + if (changed) + writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + + /* We are only interested in the advertisement bits changing */ + return changed & (MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN); +} + +static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs) +{ + struct mvpp2_port *port = mvpp2_pcs_to_port(pcs); + u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + + writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN, + port->base + MVPP2_GMAC_AUTONEG_CONFIG); + writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN, + port->base + MVPP2_GMAC_AUTONEG_CONFIG); +} + +static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = { + .pcs_get_state = mvpp2_gmac_pcs_get_state, + .pcs_config = mvpp2_gmac_pcs_config, + .pcs_an_restart = mvpp2_gmac_pcs_an_restart, +}; + +static void mvpp2_phylink_validate(struct phylink_config *config, + unsigned long *supported, + struct phylink_link_state *state) +{ + struct mvpp2_port *port = mvpp2_phylink_to_port(config); + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + /* Invalid combinations */ + switch (state->interface) { + case PHY_INTERFACE_MODE_10GBASER: + case PHY_INTERFACE_MODE_XAUI: + if (!mvpp2_port_supports_xlg(port)) + goto empty_set; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + if (!mvpp2_port_supports_rgmii(port)) + goto empty_set; + break; + default: + break; + } + + phylink_set(mask, Autoneg); + phylink_set_port_modes(mask); + + switch (state->interface) { + case PHY_INTERFACE_MODE_10GBASER: + case PHY_INTERFACE_MODE_XAUI: + case PHY_INTERFACE_MODE_NA: + if (mvpp2_port_supports_xlg(port)) { + phylink_set(mask, 10000baseT_Full); + phylink_set(mask, 10000baseCR_Full); + phylink_set(mask, 10000baseSR_Full); + phylink_set(mask, 10000baseLR_Full); + phylink_set(mask, 10000baseLRM_Full); + phylink_set(mask, 10000baseER_Full); + phylink_set(mask, 10000baseKR_Full); + } + if (state->interface != PHY_INTERFACE_MODE_NA) + break; + fallthrough; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_SGMII: + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + if (state->interface != PHY_INTERFACE_MODE_NA) + break; + fallthrough; + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + if (port->comphy || + state->interface != PHY_INTERFACE_MODE_2500BASEX) { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + } + if (port->comphy || + state->interface == PHY_INTERFACE_MODE_2500BASEX) { + phylink_set(mask, 2500baseT_Full); + phylink_set(mask, 2500baseX_Full); + } + break; + default: + goto empty_set; + } + + bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_and(state->advertising, state->advertising, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); + + phylink_helper_basex_speed(state); + return; + +empty_set: + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, + const struct phylink_link_state *state) +{ + u32 val; + + mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, + MVPP22_XLG_CTRL0_MAC_RESET_DIS, + MVPP22_XLG_CTRL0_MAC_RESET_DIS); + mvpp2_modify(port->base + MVPP22_XLG_CTRL4_REG, + MVPP22_XLG_CTRL4_MACMODSELECT_GMAC | + MVPP22_XLG_CTRL4_EN_IDLE_CHECK | + MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC, + MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC); + + /* Wait for reset to deassert */ + do { + val = readl(port->base + MVPP22_XLG_CTRL0_REG); + } while (!(val & MVPP22_XLG_CTRL0_MAC_RESET_DIS)); +} + +static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, + const struct phylink_link_state *state) +{ + u32 old_ctrl0, ctrl0; + u32 old_ctrl2, ctrl2; + u32 old_ctrl4, ctrl4; + + old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG); + old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG); + + ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK; + ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK); + + /* Configure port type */ + if (phy_interface_mode_is_8023z(state->interface)) { + ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK; + ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; + ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | + MVPP22_CTRL4_DP_CLK_SEL | + MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; + } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { + ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK; + ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; + ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | + MVPP22_CTRL4_DP_CLK_SEL | + MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; + } else if (phy_interface_mode_is_rgmii(state->interface)) { + ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL; + ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | + MVPP22_CTRL4_SYNC_BYPASS_DIS | + MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; + } + + /* Configure negotiation style */ + if (!phylink_autoneg_inband(mode)) { + /* Phy or fixed speed - no in-band AN, nothing to do, leave the + * configured speed, duplex and flow control as-is. + */ + } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { + /* SGMII in-band mode receives the speed and duplex from + * the PHY. Flow control information is not received. */ + } else if (phy_interface_mode_is_8023z(state->interface)) { + /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can + * they negotiate duplex: they are always operating with a fixed + * speed of 1000/2500Mbps in full duplex, so force 1000/2500 + * speed and full duplex here. + */ + ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK; + } + + if (old_ctrl0 != ctrl0) + writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG); + if (old_ctrl2 != ctrl2) + writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); + if (old_ctrl4 != ctrl4) + writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG); +} + +static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct mvpp2_port *port = mvpp2_phylink_to_port(config); + + /* Check for invalid configuration */ + if (mvpp2_is_xlg(interface) && port->gop_id != 0) { + netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name); + return -EINVAL; + } + + if (port->phy_interface != interface || + phylink_autoneg_inband(mode)) { + /* Force the link down when changing the interface or if in + * in-band mode to ensure we do not change the configuration + * while the hardware is indicating link is up. We force both + * XLG and GMAC down to ensure that they're both in a known + * state. + */ + mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG, + MVPP2_GMAC_FORCE_LINK_PASS | + MVPP2_GMAC_FORCE_LINK_DOWN, + MVPP2_GMAC_FORCE_LINK_DOWN); + + if (mvpp2_port_supports_xlg(port)) + mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, + MVPP22_XLG_CTRL0_FORCE_LINK_PASS | + MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, + MVPP22_XLG_CTRL0_FORCE_LINK_DOWN); + } + + /* Make sure the port is disabled when reconfiguring the mode */ + mvpp2_port_disable(port); + + if (port->phy_interface != interface) { + /* Place GMAC into reset */ + mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG, + MVPP2_GMAC_PORT_RESET_MASK, + MVPP2_GMAC_PORT_RESET_MASK); + + if (port->priv->hw_version == MVPP22) { + mvpp22_gop_mask_irq(port); + + phy_power_off(port->comphy); + } + } + + /* Select the appropriate PCS operations depending on the + * configured interface mode. We will only switch to a mode + * that the validate() checks have already passed. + */ + if (mvpp2_is_xlg(interface)) + port->phylink_pcs.ops = &mvpp2_phylink_xlg_pcs_ops; + else + port->phylink_pcs.ops = &mvpp2_phylink_gmac_pcs_ops; + + return 0; +} + +static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct mvpp2_port *port = mvpp2_phylink_to_port(config); + int ret; + + ret = mvpp2__mac_prepare(config, mode, interface); + if (ret == 0) + phylink_set_pcs(port->phylink, &port->phylink_pcs); + + return ret; +} + +static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ + struct mvpp2_port *port = mvpp2_phylink_to_port(config); + + /* mac (re)configuration */ + if (mvpp2_is_xlg(state->interface)) + mvpp2_xlg_config(port, mode, state); + else if (phy_interface_mode_is_rgmii(state->interface) || + phy_interface_mode_is_8023z(state->interface) || + state->interface == PHY_INTERFACE_MODE_SGMII) + mvpp2_gmac_config(port, mode, state); + + if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) + mvpp2_port_loopback_set(port, state); +} + +static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct mvpp2_port *port = mvpp2_phylink_to_port(config); + + if (port->priv->hw_version == MVPP22 && + port->phy_interface != interface) { + port->phy_interface = interface; + + /* Reconfigure the serdes lanes */ + mvpp22_mode_reconfigure(port); + + /* Unmask interrupts */ + mvpp22_gop_unmask_irq(port); + } + + if (!mvpp2_is_xlg(interface)) { + /* Release GMAC reset and wait */ + mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG, + MVPP2_GMAC_PORT_RESET_MASK, 0); + + while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & + MVPP2_GMAC_PORT_RESET_MASK) + continue; + } + + mvpp2_port_enable(port); + + /* Allow the link to come up if in in-band mode, otherwise the + * link is forced via mac_link_down()/mac_link_up() + */ + if (phylink_autoneg_inband(mode)) { + if (mvpp2_is_xlg(interface)) + mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, + MVPP22_XLG_CTRL0_FORCE_LINK_PASS | + MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, 0); + else + mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG, + MVPP2_GMAC_FORCE_LINK_PASS | + MVPP2_GMAC_FORCE_LINK_DOWN, 0); + } + + return 0; +} + +static void mvpp2_mac_link_up(struct phylink_config *config, + struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct mvpp2_port *port = mvpp2_phylink_to_port(config); + u32 val; + + if (mvpp2_is_xlg(interface)) { + if (!phylink_autoneg_inband(mode)) { + val = MVPP22_XLG_CTRL0_FORCE_LINK_PASS; + if (tx_pause) + val |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN; + if (rx_pause) + val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; + + mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, + MVPP22_XLG_CTRL0_FORCE_LINK_DOWN | + MVPP22_XLG_CTRL0_FORCE_LINK_PASS | + MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN | + MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN, val); + } + } else { + if (!phylink_autoneg_inband(mode)) { + val = MVPP2_GMAC_FORCE_LINK_PASS; + + if (speed == SPEED_1000 || speed == SPEED_2500) + val |= MVPP2_GMAC_CONFIG_GMII_SPEED; + else if (speed == SPEED_100) + val |= MVPP2_GMAC_CONFIG_MII_SPEED; + + if (duplex == DUPLEX_FULL) + val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; + + mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG, + MVPP2_GMAC_FORCE_LINK_DOWN | + MVPP2_GMAC_FORCE_LINK_PASS | + MVPP2_GMAC_CONFIG_MII_SPEED | + MVPP2_GMAC_CONFIG_GMII_SPEED | + MVPP2_GMAC_CONFIG_FULL_DUPLEX, val); + } + + /* We can always update the flow control enable bits; + * these will only be effective if flow control AN + * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled. + */ + val = 0; + if (tx_pause) + val |= MVPP22_CTRL4_TX_FC_EN; + if (rx_pause) + val |= MVPP22_CTRL4_RX_FC_EN; + + mvpp2_modify(port->base + MVPP22_GMAC_CTRL_4_REG, + MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN, + val); + } + + mvpp2_port_enable(port); + + mvpp2_egress_enable(port); + mvpp2_ingress_enable(port); + netif_tx_wake_all_queues(port->dev); +} + +static void mvpp2_mac_link_down(struct phylink_config *config, + unsigned int mode, phy_interface_t interface) +{ + struct mvpp2_port *port = mvpp2_phylink_to_port(config); + u32 val; + + if (!phylink_autoneg_inband(mode)) { + if (mvpp2_is_xlg(interface)) { + val = readl(port->base + MVPP22_XLG_CTRL0_REG); + val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS; + val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN; + writel(val, port->base + MVPP22_XLG_CTRL0_REG); + } else { + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + val &= ~MVPP2_GMAC_FORCE_LINK_PASS; + val |= MVPP2_GMAC_FORCE_LINK_DOWN; + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + } + } + + netif_tx_stop_all_queues(port->dev); + mvpp2_egress_disable(port); + mvpp2_ingress_disable(port); + + mvpp2_port_disable(port); +} + +static const struct phylink_mac_ops mvpp2_phylink_ops = { + .validate = mvpp2_phylink_validate, + .mac_prepare = mvpp2_mac_prepare, + .mac_config = mvpp2_mac_config, + .mac_finish = mvpp2_mac_finish, + .mac_link_up = mvpp2_mac_link_up, + .mac_link_down = mvpp2_mac_link_down, +}; + +/* Work-around for ACPI */ +static void mvpp2_acpi_start(struct mvpp2_port *port) +{ + /* Phylink isn't used as of now for ACPI, so the MAC has to be + * configured manually when the interface is started. This will + * be removed as soon as the phylink ACPI support lands in. + */ + struct phylink_link_state state = { + .interface = port->phy_interface, + }; + mvpp2__mac_prepare(&port->phylink_config, MLO_AN_INBAND, + port->phy_interface); + mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state); + port->phylink_pcs.ops->pcs_config(&port->phylink_pcs, MLO_AN_INBAND, + port->phy_interface, + state.advertising, false); + mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND, + port->phy_interface); + mvpp2_mac_link_up(&port->phylink_config, NULL, + MLO_AN_INBAND, port->phy_interface, + SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false); +} + +/* Ports initialization */ +static int mvpp2_port_probe(struct platform_device *pdev, + struct fwnode_handle *port_fwnode, + struct mvpp2 *priv) +{ + struct phy *comphy = NULL; + struct mvpp2_port *port; + struct mvpp2_port_pcpu *port_pcpu; + struct device_node *port_node = to_of_node(port_fwnode); + netdev_features_t features; + struct net_device *dev; + struct phylink *phylink; + char *mac_from = ""; + unsigned int ntxqs, nrxqs, thread; + unsigned long flags = 0; + bool has_tx_irqs; + u32 id; + int phy_mode; + int err, i; + + has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags); + if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) { + dev_err(&pdev->dev, + "not enough IRQs to support multi queue mode\n"); + return -EINVAL; + } + + ntxqs = MVPP2_MAX_TXQ; + nrxqs = mvpp2_get_nrxqs(priv); + + dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs); + if (!dev) + return -ENOMEM; + + phy_mode = fwnode_get_phy_mode(port_fwnode); + if (phy_mode < 0) { + dev_err(&pdev->dev, "incorrect phy mode\n"); + err = phy_mode; + goto err_free_netdev; + } + + /* + * Rewrite 10GBASE-KR to 10GBASE-R for compatibility with existing DT. + * Existing usage of 10GBASE-KR is not correct; no backplane + * negotiation is done, and this driver does not actually support + * 10GBASE-KR. + */ + if (phy_mode == PHY_INTERFACE_MODE_10GKR) + phy_mode = PHY_INTERFACE_MODE_10GBASER; + + if (port_node) { + comphy = devm_of_phy_get(&pdev->dev, port_node, NULL); + if (IS_ERR(comphy)) { + if (PTR_ERR(comphy) == -EPROBE_DEFER) { + err = -EPROBE_DEFER; + goto err_free_netdev; + } + comphy = NULL; + } + } + + if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) { + err = -EINVAL; + dev_err(&pdev->dev, "missing port-id value\n"); + goto err_free_netdev; + } + + dev->tx_queue_len = MVPP2_MAX_TXD_MAX; + dev->watchdog_timeo = 5 * HZ; + dev->netdev_ops = &mvpp2_netdev_ops; + dev->ethtool_ops = &mvpp2_eth_tool_ops; + + port = netdev_priv(dev); + port->dev = dev; + port->fwnode = port_fwnode; + port->has_phy = !!of_find_property(port_node, "phy", NULL); + port->ntxqs = ntxqs; + port->nrxqs = nrxqs; + port->priv = priv; + port->has_tx_irqs = has_tx_irqs; + port->flags = flags; + + err = mvpp2_queue_vectors_init(port, port_node); + if (err) + goto err_free_netdev; + + if (port_node) + port->port_irq = of_irq_get_byname(port_node, "link"); + else + port->port_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1); + if (port->port_irq == -EPROBE_DEFER) { + err = -EPROBE_DEFER; + goto err_deinit_qvecs; + } + if (port->port_irq <= 0) + /* the link irq is optional */ + port->port_irq = 0; + + if (fwnode_property_read_bool(port_fwnode, "marvell,loopback")) + port->flags |= MVPP2_F_LOOPBACK; + + port->id = id; + if (priv->hw_version == MVPP21) + port->first_rxq = port->id * port->nrxqs; + else + port->first_rxq = port->id * priv->max_port_rxqs; + + port->of_node = port_node; + port->phy_interface = phy_mode; + port->comphy = comphy; + + if (priv->hw_version == MVPP21) { + port->base = devm_platform_ioremap_resource(pdev, 2 + id); + if (IS_ERR(port->base)) { + err = PTR_ERR(port->base); + goto err_free_irq; + } + + port->stats_base = port->priv->lms_base + + MVPP21_MIB_COUNTERS_OFFSET + + port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ; + } else { + if (fwnode_property_read_u32(port_fwnode, "gop-port-id", + &port->gop_id)) { + err = -EINVAL; + dev_err(&pdev->dev, "missing gop-port-id value\n"); + goto err_deinit_qvecs; + } + + port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id); + port->stats_base = port->priv->iface_base + + MVPP22_MIB_COUNTERS_OFFSET + + port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ; + + /* We may want a property to describe whether we should use + * MAC hardware timestamping. + */ + if (priv->tai) + port->hwtstamp = true; + } + + /* Alloc per-cpu and ethtool stats */ + port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats); + if (!port->stats) { + err = -ENOMEM; + goto err_free_irq; + } + + port->ethtool_stats = devm_kcalloc(&pdev->dev, + MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs), + sizeof(u64), GFP_KERNEL); + if (!port->ethtool_stats) { + err = -ENOMEM; + goto err_free_stats; + } + + mutex_init(&port->gather_stats_lock); + INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics); + + mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from); + + port->tx_ring_size = MVPP2_MAX_TXD_DFLT; + port->rx_ring_size = MVPP2_MAX_RXD_DFLT; + SET_NETDEV_DEV(dev, &pdev->dev); + + err = mvpp2_port_init(port); + if (err < 0) { + dev_err(&pdev->dev, "failed to init port %d\n", id); + goto err_free_stats; + } + + mvpp2_port_periodic_xon_disable(port); + + mvpp2_mac_reset_assert(port); + mvpp22_pcs_reset_assert(port); + + port->pcpu = alloc_percpu(struct mvpp2_port_pcpu); + if (!port->pcpu) { + err = -ENOMEM; + goto err_free_txq_pcpu; + } + + if (!port->has_tx_irqs) { + for (thread = 0; thread < priv->nthreads; thread++) { + port_pcpu = per_cpu_ptr(port->pcpu, thread); + + hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_PINNED_SOFT); + port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; + port_pcpu->timer_scheduled = false; + port_pcpu->dev = dev; + } + } + + features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO; + dev->features = features | NETIF_F_RXCSUM; + dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO | + NETIF_F_HW_VLAN_CTAG_FILTER; + + if (mvpp22_rss_is_supported()) { + dev->hw_features |= NETIF_F_RXHASH; + dev->features |= NETIF_F_NTUPLE; + } + + if (!port->priv->percpu_pools) + mvpp2_set_hw_csum(port, port->pool_long->id); + + dev->vlan_features |= features; + dev->gso_max_segs = MVPP2_MAX_TSO_SEGS; + dev->priv_flags |= IFF_UNICAST_FLT; + + /* MTU range: 68 - 9704 */ + dev->min_mtu = ETH_MIN_MTU; + /* 9704 == 9728 - 20 and rounding to 8 */ + dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; + dev->dev.of_node = port_node; + + /* Phylink isn't used w/ ACPI as of now */ + if (port_node) { + port->phylink_config.dev = &dev->dev; + port->phylink_config.type = PHYLINK_NETDEV; + + phylink = phylink_create(&port->phylink_config, port_fwnode, + phy_mode, &mvpp2_phylink_ops); + if (IS_ERR(phylink)) { + err = PTR_ERR(phylink); + goto err_free_port_pcpu; + } + port->phylink = phylink; + } else { + port->phylink = NULL; + } + + /* Cycle the comphy to power it down, saving 270mW per port - + * don't worry about an error powering it up. When the comphy + * driver does this, we can remove this code. + */ + if (port->comphy) { + err = mvpp22_comphy_init(port); + if (err == 0) + phy_power_off(port->comphy); + } + + err = register_netdev(dev); + if (err < 0) { + dev_err(&pdev->dev, "failed to register netdev\n"); + goto err_phylink; + } + netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); + + priv->port_list[priv->port_count++] = port; + + return 0; + +err_phylink: + if (port->phylink) + phylink_destroy(port->phylink); +err_free_port_pcpu: + free_percpu(port->pcpu); +err_free_txq_pcpu: + for (i = 0; i < port->ntxqs; i++) + free_percpu(port->txqs[i]->pcpu); +err_free_stats: + free_percpu(port->stats); +err_free_irq: + if (port->port_irq) + irq_dispose_mapping(port->port_irq); +err_deinit_qvecs: + mvpp2_queue_vectors_deinit(port); +err_free_netdev: + free_netdev(dev); + return err; +} + +/* Ports removal routine */ +static void mvpp2_port_remove(struct mvpp2_port *port) +{ + int i; + + unregister_netdev(port->dev); + if (port->phylink) + phylink_destroy(port->phylink); + free_percpu(port->pcpu); + free_percpu(port->stats); + for (i = 0; i < port->ntxqs; i++) + free_percpu(port->txqs[i]->pcpu); + mvpp2_queue_vectors_deinit(port); + if (port->port_irq) + irq_dispose_mapping(port->port_irq); + free_netdev(port->dev); +} + +/* Initialize decoding windows */ +static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram, + struct mvpp2 *priv) +{ + u32 win_enable; + int i; + + for (i = 0; i < 6; i++) { + mvpp2_write(priv, MVPP2_WIN_BASE(i), 0); + mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0); + + if (i < 4) + mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0); + } + + win_enable = 0; + + for (i = 0; i < dram->num_cs; i++) { + const struct mbus_dram_window *cs = dram->cs + i; + + mvpp2_write(priv, MVPP2_WIN_BASE(i), + (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | + dram->mbus_dram_target_id); + + mvpp2_write(priv, MVPP2_WIN_SIZE(i), + (cs->size - 1) & 0xffff0000); + + win_enable |= (1 << i); + } + + mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable); +} + +/* Initialize Rx FIFO's */ +static void mvpp2_rx_fifo_init(struct mvpp2 *priv) +{ + int port; + + for (port = 0; port < MVPP2_MAX_PORTS; port++) { + mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), + MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); + mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), + MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB); + } + + mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, + MVPP2_RX_FIFO_PORT_MIN_PKT); + mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); +} + +static void mvpp22_rx_fifo_init(struct mvpp2 *priv) +{ + int port; + + /* The FIFO size parameters are set depending on the maximum speed a + * given port can handle: + * - Port 0: 10Gbps + * - Port 1: 2.5Gbps + * - Ports 2 and 3: 1Gbps + */ + + mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0), + MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB); + mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0), + MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB); + + mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1), + MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB); + mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1), + MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB); + + for (port = 2; port < MVPP2_MAX_PORTS; port++) { + mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), + MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); + mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), + MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB); + } + + mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, + MVPP2_RX_FIFO_PORT_MIN_PKT); + mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); +} + +/* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G + * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G, + * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB. + */ +static void mvpp22_tx_fifo_init(struct mvpp2 *priv) +{ + int port, size, thrs; + + for (port = 0; port < MVPP2_MAX_PORTS; port++) { + if (port == 0) { + size = MVPP22_TX_FIFO_DATA_SIZE_10KB; + thrs = MVPP2_TX_FIFO_THRESHOLD_10KB; + } else { + size = MVPP22_TX_FIFO_DATA_SIZE_3KB; + thrs = MVPP2_TX_FIFO_THRESHOLD_3KB; + } + mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size); + mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs); + } +} + +static void mvpp2_axi_init(struct mvpp2 *priv) +{ + u32 val, rdval, wrval; + + mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0); + + /* AXI Bridge Configuration */ + + rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE + << MVPP22_AXI_ATTR_CACHE_OFFS; + rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM + << MVPP22_AXI_ATTR_DOMAIN_OFFS; + + wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE + << MVPP22_AXI_ATTR_CACHE_OFFS; + wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM + << MVPP22_AXI_ATTR_DOMAIN_OFFS; + + /* BM */ + mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval); + mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval); + + /* Descriptors */ + mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval); + mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval); + mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval); + mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval); + + /* Buffer Data */ + mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval); + mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval); + + val = MVPP22_AXI_CODE_CACHE_NON_CACHE + << MVPP22_AXI_CODE_CACHE_OFFS; + val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM + << MVPP22_AXI_CODE_DOMAIN_OFFS; + mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val); + mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val); + + val = MVPP22_AXI_CODE_CACHE_RD_CACHE + << MVPP22_AXI_CODE_CACHE_OFFS; + val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM + << MVPP22_AXI_CODE_DOMAIN_OFFS; + + mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val); + + val = MVPP22_AXI_CODE_CACHE_WR_CACHE + << MVPP22_AXI_CODE_CACHE_OFFS; + val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM + << MVPP22_AXI_CODE_DOMAIN_OFFS; + + mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val); +} + +/* Initialize network controller common part HW */ +static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) +{ + const struct mbus_dram_target_info *dram_target_info; + int err, i; + u32 val; + + /* MBUS windows configuration */ + dram_target_info = mv_mbus_dram_info(); + if (dram_target_info) + mvpp2_conf_mbus_windows(dram_target_info, priv); + + if (priv->hw_version == MVPP22) + mvpp2_axi_init(priv); + + /* Disable HW PHY polling */ + if (priv->hw_version == MVPP21) { + val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); + val |= MVPP2_PHY_AN_STOP_SMI0_MASK; + writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); + } else { + val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG); + val &= ~MVPP22_SMI_POLLING_EN; + writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG); + } + + /* Allocate and initialize aggregated TXQs */ + priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS, + sizeof(*priv->aggr_txqs), + GFP_KERNEL); + if (!priv->aggr_txqs) + return -ENOMEM; + + for (i = 0; i < MVPP2_MAX_THREADS; i++) { + priv->aggr_txqs[i].id = i; + priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; + err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv); + if (err < 0) + return err; + } + + /* Fifo Init */ + if (priv->hw_version == MVPP21) { + mvpp2_rx_fifo_init(priv); + } else { + mvpp22_rx_fifo_init(priv); + mvpp22_tx_fifo_init(priv); + } + + if (priv->hw_version == MVPP21) + writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, + priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); + + /* Allow cache snoop when transmiting packets */ + mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); + + /* Buffer Manager initialization */ + err = mvpp2_bm_init(&pdev->dev, priv); + if (err < 0) + return err; + + /* Parser default initialization */ + err = mvpp2_prs_default_init(pdev, priv); + if (err < 0) + return err; + + /* Classifier default initialization */ + mvpp2_cls_init(priv); + + return 0; +} + +static int mvpp2_probe(struct platform_device *pdev) +{ + const struct acpi_device_id *acpi_id; + struct fwnode_handle *fwnode = pdev->dev.fwnode; + struct fwnode_handle *port_fwnode; + struct mvpp2 *priv; + struct resource *res; + void __iomem *base; + int i, shared; + int err; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + if (has_acpi_companion(&pdev->dev)) { + acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table, + &pdev->dev); + if (!acpi_id) + return -EINVAL; + priv->hw_version = (unsigned long)acpi_id->driver_data; + } else { + priv->hw_version = + (unsigned long)of_device_get_match_data(&pdev->dev); + } + + /* multi queue mode isn't supported on PPV2.1, fallback to single + * mode + */ + if (priv->hw_version == MVPP21) + queue_mode = MVPP2_QDIST_SINGLE_MODE; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + if (priv->hw_version == MVPP21) { + priv->lms_base = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(priv->lms_base)) + return PTR_ERR(priv->lms_base); + } else { + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) { + dev_err(&pdev->dev, "Invalid resource\n"); + return -EINVAL; + } + if (has_acpi_companion(&pdev->dev)) { + /* In case the MDIO memory region is declared in + * the ACPI, it can already appear as 'in-use' + * in the OS. Because it is overlapped by second + * region of the network controller, make + * sure it is released, before requesting it again. + * The care is taken by mvpp2 driver to avoid + * concurrent access to this memory region. + */ + release_resource(res); + } + priv->iface_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->iface_base)) + return PTR_ERR(priv->iface_base); + } + + if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) { + priv->sysctrl_base = + syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "marvell,system-controller"); + if (IS_ERR(priv->sysctrl_base)) + /* The system controller regmap is optional for dt + * compatibility reasons. When not provided, the + * configuration of the GoP relies on the + * firmware/bootloader. + */ + priv->sysctrl_base = NULL; + } + + if (priv->hw_version == MVPP22 && + mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS) + priv->percpu_pools = 1; + + mvpp2_setup_bm_pool(); + + + priv->nthreads = min_t(unsigned int, num_present_cpus(), + MVPP2_MAX_THREADS); + + shared = num_present_cpus() - priv->nthreads; + if (shared > 0) + bitmap_set(&priv->lock_map, 0, + min_t(int, shared, MVPP2_MAX_THREADS)); + + for (i = 0; i < MVPP2_MAX_THREADS; i++) { + u32 addr_space_sz; + + addr_space_sz = (priv->hw_version == MVPP21 ? + MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ); + priv->swth_base[i] = base + i * addr_space_sz; + } + + if (priv->hw_version == MVPP21) + priv->max_port_rxqs = 8; + else + priv->max_port_rxqs = 32; + + if (dev_of_node(&pdev->dev)) { + priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk"); + if (IS_ERR(priv->pp_clk)) + return PTR_ERR(priv->pp_clk); + err = clk_prepare_enable(priv->pp_clk); + if (err < 0) + return err; + + priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk"); + if (IS_ERR(priv->gop_clk)) { + err = PTR_ERR(priv->gop_clk); + goto err_pp_clk; + } + err = clk_prepare_enable(priv->gop_clk); + if (err < 0) + goto err_pp_clk; + + if (priv->hw_version == MVPP22) { + priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk"); + if (IS_ERR(priv->mg_clk)) { + err = PTR_ERR(priv->mg_clk); + goto err_gop_clk; + } + + err = clk_prepare_enable(priv->mg_clk); + if (err < 0) + goto err_gop_clk; + + priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk"); + if (IS_ERR(priv->mg_core_clk)) { + priv->mg_core_clk = NULL; + } else { + err = clk_prepare_enable(priv->mg_core_clk); + if (err < 0) + goto err_mg_clk; + } + } + + priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk"); + if (IS_ERR(priv->axi_clk)) { + err = PTR_ERR(priv->axi_clk); + if (err == -EPROBE_DEFER) + goto err_mg_core_clk; + priv->axi_clk = NULL; + } else { + err = clk_prepare_enable(priv->axi_clk); + if (err < 0) + goto err_mg_core_clk; + } + + /* Get system's tclk rate */ + priv->tclk = clk_get_rate(priv->pp_clk); + } else if (device_property_read_u32(&pdev->dev, "clock-frequency", + &priv->tclk)) { + dev_err(&pdev->dev, "missing clock-frequency value\n"); + return -EINVAL; + } + + if (priv->hw_version == MVPP22) { + err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK); + if (err) + goto err_axi_clk; + /* Sadly, the BM pools all share the same register to + * store the high 32 bits of their address. So they + * must all have the same high 32 bits, which forces + * us to restrict coherent memory to DMA_BIT_MASK(32). + */ + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) + goto err_axi_clk; + } + + /* Initialize network controller */ + err = mvpp2_init(pdev, priv); + if (err < 0) { + dev_err(&pdev->dev, "failed to initialize controller\n"); + goto err_axi_clk; + } + + err = mvpp22_tai_probe(&pdev->dev, priv); + if (err < 0) + goto err_axi_clk; + + /* Initialize ports */ + fwnode_for_each_available_child_node(fwnode, port_fwnode) { + err = mvpp2_port_probe(pdev, port_fwnode, priv); + if (err < 0) + goto err_port_probe; + } + + if (priv->port_count == 0) { + dev_err(&pdev->dev, "no ports enabled\n"); + err = -ENODEV; + goto err_axi_clk; + } + + /* Statistics must be gathered regularly because some of them (like + * packets counters) are 32-bit registers and could overflow quite + * quickly. For instance, a 10Gb link used at full bandwidth with the + * smallest packets (64B) will overflow a 32-bit counter in less than + * 30 seconds. Then, use a workqueue to fill 64-bit counters. + */ + snprintf(priv->queue_name, sizeof(priv->queue_name), + "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev), + priv->port_count > 1 ? "+" : ""); + priv->stats_queue = create_singlethread_workqueue(priv->queue_name); + if (!priv->stats_queue) { + err = -ENOMEM; + goto err_port_probe; + } + + mvpp2_dbgfs_init(priv, pdev->name); + + platform_set_drvdata(pdev, priv); + return 0; + +err_port_probe: + fwnode_handle_put(port_fwnode); + + i = 0; + fwnode_for_each_available_child_node(fwnode, port_fwnode) { + if (priv->port_list[i]) + mvpp2_port_remove(priv->port_list[i]); + i++; + } +err_axi_clk: + clk_disable_unprepare(priv->axi_clk); + +err_mg_core_clk: + if (priv->hw_version == MVPP22) + clk_disable_unprepare(priv->mg_core_clk); +err_mg_clk: + if (priv->hw_version == MVPP22) + clk_disable_unprepare(priv->mg_clk); +err_gop_clk: + clk_disable_unprepare(priv->gop_clk); +err_pp_clk: + clk_disable_unprepare(priv->pp_clk); + return err; +} + +static int mvpp2_remove(struct platform_device *pdev) +{ + struct mvpp2 *priv = platform_get_drvdata(pdev); + struct fwnode_handle *fwnode = pdev->dev.fwnode; + int i = 0, poolnum = MVPP2_BM_POOLS_NUM; + struct fwnode_handle *port_fwnode; + + mvpp2_dbgfs_cleanup(priv); + + fwnode_for_each_available_child_node(fwnode, port_fwnode) { + if (priv->port_list[i]) { + mutex_destroy(&priv->port_list[i]->gather_stats_lock); + mvpp2_port_remove(priv->port_list[i]); + } + i++; + } + + destroy_workqueue(priv->stats_queue); + + if (priv->percpu_pools) + poolnum = mvpp2_get_nrxqs(priv) * 2; + + for (i = 0; i < poolnum; i++) { + struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; + + mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool); + } + + for (i = 0; i < MVPP2_MAX_THREADS; i++) { + struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i]; + + dma_free_coherent(&pdev->dev, + MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, + aggr_txq->descs, + aggr_txq->descs_dma); + } + + if (is_acpi_node(port_fwnode)) + return 0; + + clk_disable_unprepare(priv->axi_clk); + clk_disable_unprepare(priv->mg_core_clk); + clk_disable_unprepare(priv->mg_clk); + clk_disable_unprepare(priv->pp_clk); + clk_disable_unprepare(priv->gop_clk); + + return 0; +} + +static const struct of_device_id mvpp2_match[] = { + { + .compatible = "marvell,armada-375-pp2", + .data = (void *)MVPP21, + }, + { + .compatible = "marvell,armada-7k-pp22", + .data = (void *)MVPP22, + }, + { } +}; +MODULE_DEVICE_TABLE(of, mvpp2_match); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id mvpp2_acpi_match[] = { + { "MRVL0110", MVPP22 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match); +#endif + +static struct platform_driver mvpp2_driver = { + .probe = mvpp2_probe, + .remove = mvpp2_remove, + .driver = { + .name = MVPP2_DRIVER_NAME, + .of_match_table = mvpp2_match, + .acpi_match_table = ACPI_PTR(mvpp2_acpi_match), + }, +}; + +static int __init mvpp2_driver_init(void) +{ + return platform_driver_register(&mvpp2_driver); +} +module_init(mvpp2_driver_init); + +static void __exit mvpp2_driver_exit(void) +{ + platform_driver_unregister(&mvpp2_driver); + mvpp2_dbgfs_exit(); +} +module_exit(mvpp2_driver_exit); + +MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com"); +MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c new file mode 100644 index 000000000..dd590086f --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -0,0 +1,2532 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Header Parser helpers for Marvell PPv2 Network Controller + * + * Copyright (C) 2014 Marvell + * + * Marcin Wojtas <mw@semihalf.com> + */ + +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/platform_device.h> +#include <uapi/linux/ppp_defs.h> +#include <net/ip.h> +#include <net/ipv6.h> + +#include "mvpp2.h" +#include "mvpp2_prs.h" + +/* Update parser tcam and sram hw entries */ +static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) +{ + int i; + + if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) + return -EINVAL; + + /* Clear entry invalidation bit */ + pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; + + /* Write sram index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); + for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]); + + /* Write tcam index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); + for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]); + + return 0; +} + +/* Initialize tcam entry from hw */ +int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, + int tid) +{ + int i; + + if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1) + return -EINVAL; + + memset(pe, 0, sizeof(*pe)); + pe->index = tid; + + /* Write tcam index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); + + pe->tcam[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, + MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); + if (pe->tcam[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) + return MVPP2_PRS_TCAM_ENTRY_INVALID; + + for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) + pe->tcam[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); + + /* Write sram index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); + for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) + pe->sram[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); + + return 0; +} + +/* Invalidate tcam hw entry */ +static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) +{ + /* Write index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); + mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD), + MVPP2_PRS_TCAM_INV_MASK); +} + +/* Enable shadow table entry and set its lookup ID */ +static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu) +{ + priv->prs_shadow[index].valid = true; + priv->prs_shadow[index].lu = lu; +} + +/* Update ri fields in shadow table entry */ +static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index, + unsigned int ri, unsigned int ri_mask) +{ + priv->prs_shadow[index].ri_mask = ri_mask; + priv->prs_shadow[index].ri = ri; +} + +/* Update lookup field in tcam sw entry */ +static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) +{ + pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU(MVPP2_PRS_LU_MASK); + pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK); + pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU(lu & MVPP2_PRS_LU_MASK); + pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK); +} + +/* Update mask for single port in tcam sw entry */ +static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, + unsigned int port, bool add) +{ + if (add) + pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(BIT(port)); + else + pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(BIT(port)); +} + +/* Update port map in tcam sw entry */ +static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, + unsigned int ports) +{ + pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT(MVPP2_PRS_PORT_MASK); + pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(MVPP2_PRS_PORT_MASK); + pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(~ports & MVPP2_PRS_PORT_MASK); +} + +/* Obtain port map from tcam sw entry */ +unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) +{ + return (~pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] >> 24) & MVPP2_PRS_PORT_MASK; +} + +/* Set byte of data and its enable bits in tcam sw entry */ +static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, + unsigned int offs, unsigned char byte, + unsigned char enable) +{ + int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE; + + pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(0xff << pos); + pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(MVPP2_PRS_TCAM_EN(0xff) << pos); + pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= byte << pos; + pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= MVPP2_PRS_TCAM_EN(enable << pos); +} + +/* Get byte of data and its enable bits from tcam sw entry */ +void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, + unsigned int offs, unsigned char *byte, + unsigned char *enable) +{ + int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE; + + *byte = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> pos) & 0xff; + *enable = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> (pos + 16)) & 0xff; +} + +/* Compare tcam data bytes with a pattern */ +static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs, + u16 data) +{ + u16 tcam_data; + + tcam_data = pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] & 0xffff; + return tcam_data == data; +} + +/* Update ai bits in tcam sw entry */ +static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, + unsigned int bits, unsigned int enable) +{ + int i; + + for (i = 0; i < MVPP2_PRS_AI_BITS; i++) { + if (!(enable & BIT(i))) + continue; + + if (bits & BIT(i)) + pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= BIT(i); + else + pe->tcam[MVPP2_PRS_TCAM_AI_WORD] &= ~BIT(i); + } + + pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= MVPP2_PRS_TCAM_AI_EN(enable); +} + +/* Get ai bits from tcam sw entry */ +static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe) +{ + return pe->tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK; +} + +/* Set ethertype in tcam sw entry */ +static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset, + unsigned short ethertype) +{ + mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff); + mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff); +} + +/* Set vid in tcam sw entry */ +static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset, + unsigned short vid) +{ + mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf); + mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff); +} + +/* Set bits in sram sw entry */ +static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, + u32 val) +{ + pe->sram[MVPP2_BIT_TO_WORD(bit_num)] |= (val << (MVPP2_BIT_IN_WORD(bit_num))); +} + +/* Clear bits in sram sw entry */ +static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num, + u32 val) +{ + pe->sram[MVPP2_BIT_TO_WORD(bit_num)] &= ~(val << (MVPP2_BIT_IN_WORD(bit_num))); +} + +/* Update ri bits in sram sw entry */ +static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, + unsigned int bits, unsigned int mask) +{ + unsigned int i; + + for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { + if (!(mask & BIT(i))) + continue; + + if (bits & BIT(i)) + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_OFFS + i, + 1); + else + mvpp2_prs_sram_bits_clear(pe, + MVPP2_PRS_SRAM_RI_OFFS + i, + 1); + + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); + } +} + +/* Obtain ri bits from sram sw entry */ +static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe) +{ + return pe->sram[MVPP2_PRS_SRAM_RI_WORD]; +} + +/* Update ai bits in sram sw entry */ +static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, + unsigned int bits, unsigned int mask) +{ + unsigned int i; + + for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { + if (!(mask & BIT(i))) + continue; + + if (bits & BIT(i)) + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_OFFS + i, + 1); + else + mvpp2_prs_sram_bits_clear(pe, + MVPP2_PRS_SRAM_AI_OFFS + i, + 1); + + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); + } +} + +/* Read ai bits from sram sw entry */ +static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe) +{ + u8 bits; + /* ai is stored on bits 90->97; so it spreads across two u32 */ + int ai_off = MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_AI_OFFS); + int ai_shift = MVPP2_BIT_IN_WORD(MVPP2_PRS_SRAM_AI_OFFS); + + bits = (pe->sram[ai_off] >> ai_shift) | + (pe->sram[ai_off + 1] << (32 - ai_shift)); + + return bits; +} + +/* In sram sw entry set lookup ID field of the tcam key to be used in the next + * lookup interation + */ +static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, + unsigned int lu) +{ + int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS; + + mvpp2_prs_sram_bits_clear(pe, sram_next_off, + MVPP2_PRS_SRAM_NEXT_LU_MASK); + mvpp2_prs_sram_bits_set(pe, sram_next_off, lu); +} + +/* In the sram sw entry set sign and value of the next lookup offset + * and the offset value generated to the classifier + */ +static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, + unsigned int op) +{ + /* Set sign */ + if (shift < 0) { + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); + shift = 0 - shift; + } else { + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); + } + + /* Set value */ + pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] |= + shift & MVPP2_PRS_SRAM_SHIFT_MASK; + + /* Reset and set operation */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op); + + /* Set base offset as current */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); +} + +/* In the sram sw entry set sign and value of the user defined offset + * generated to the classifier + */ +static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, + unsigned int type, int offset, + unsigned int op) +{ + /* Set sign */ + if (offset < 0) { + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); + offset = 0 - offset; + } else { + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); + } + + /* Set value */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS, + MVPP2_PRS_SRAM_UDF_MASK); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, + offset & MVPP2_PRS_SRAM_UDF_MASK); + + /* Set offset type */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, + MVPP2_PRS_SRAM_UDF_TYPE_MASK); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type); + + /* Set offset operation */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, + MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, + op & MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); + + /* Set base offset as current */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); +} + +/* Find parser flow entry */ +static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) +{ + struct mvpp2_prs_entry pe; + int tid; + + /* Go through the all entires with MVPP2_PRS_LU_FLOWS */ + for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) { + u8 bits; + + if (!priv->prs_shadow[tid].valid || + priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + bits = mvpp2_prs_sram_ai_get(&pe); + + /* Sram store classification lookup ID in AI bits [5:0] */ + if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow) + return tid; + } + + return -ENOENT; +} + +/* Return first free tcam index, seeking from start to end */ +static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start, + unsigned char end) +{ + int tid; + + if (start > end) + swap(start, end); + + if (end >= MVPP2_PRS_TCAM_SRAM_SIZE) + end = MVPP2_PRS_TCAM_SRAM_SIZE - 1; + + for (tid = start; tid <= end; tid++) { + if (!priv->prs_shadow[tid].valid) + return tid; + } + + return -EINVAL; +} + +/* Drop flow control pause frames */ +static void mvpp2_prs_drop_fc(struct mvpp2 *priv) +{ + unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 }; + struct mvpp2_prs_entry pe; + unsigned int len; + + memset(&pe, 0, sizeof(pe)); + + /* For all ports - drop flow control frames */ + pe.index = MVPP2_PE_FC_DROP; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + + /* Set match on DA */ + len = ETH_ALEN; + while (len--) + mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff); + + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, + MVPP2_PRS_RI_DROP_MASK); + + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + mvpp2_prs_hw_write(priv, &pe); +} + +/* Enable/disable dropping all mac da's */ +static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) +{ + struct mvpp2_prs_entry pe; + + if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { + /* Entry exist - update port only */ + mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL); + } else { + /* Entry doesn't exist - create new */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + pe.index = MVPP2_PE_DROP_ALL; + + /* Non-promiscuous mode for all ports - DROP unknown packets */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, + MVPP2_PRS_RI_DROP_MASK); + + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port, add); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Set port to unicast or multicast promiscuous mode */ +void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, + enum mvpp2_prs_l2_cast l2_cast, bool add) +{ + struct mvpp2_prs_entry pe; + unsigned char cast_match; + unsigned int ri; + int tid; + + if (l2_cast == MVPP2_PRS_L2_UNI_CAST) { + cast_match = MVPP2_PRS_UCAST_VAL; + tid = MVPP2_PE_MAC_UC_PROMISCUOUS; + ri = MVPP2_PRS_RI_L2_UCAST; + } else { + cast_match = MVPP2_PRS_MCAST_VAL; + tid = MVPP2_PE_MAC_MC_PROMISCUOUS; + ri = MVPP2_PRS_RI_L2_MCAST; + } + + /* promiscuous mode - Accept unknown unicast or multicast packets */ + if (priv->prs_shadow[tid].valid) { + mvpp2_prs_init_from_hw(priv, &pe, tid); + } else { + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + pe.index = tid; + + /* Continue - set next lookup */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); + + /* Set result info bits */ + mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK); + + /* Match UC or MC addresses */ + mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match, + MVPP2_PRS_CAST_MASK); + + /* Shift to ethertype */ + mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port, add); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Set entry for dsa packets */ +static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, + bool tagged, bool extend) +{ + struct mvpp2_prs_entry pe; + int tid, shift; + + if (extend) { + tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED; + shift = 8; + } else { + tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED; + shift = 4; + } + + if (priv->prs_shadow[tid].valid) { + /* Entry exist - update port only */ + mvpp2_prs_init_from_hw(priv, &pe, tid); + } else { + /* Entry doesn't exist - create new */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); + pe.index = tid; + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); + + if (tagged) { + /* Set tagged bit in DSA tag */ + mvpp2_prs_tcam_data_byte_set(&pe, 0, + MVPP2_PRS_TCAM_DSA_TAGGED_BIT, + MVPP2_PRS_TCAM_DSA_TAGGED_BIT); + + /* Set ai bits for next iteration */ + if (extend) + mvpp2_prs_sram_ai_update(&pe, 1, + MVPP2_PRS_SRAM_AI_MASK); + else + mvpp2_prs_sram_ai_update(&pe, 0, + MVPP2_PRS_SRAM_AI_MASK); + + /* Set result info bits to 'single vlan' */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE, + MVPP2_PRS_RI_VLAN_MASK); + /* If packet is tagged continue check vid filtering */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); + } else { + /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/ + mvpp2_prs_sram_shift_set(&pe, shift, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Set result info bits to 'no vlans' */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, + MVPP2_PRS_RI_VLAN_MASK); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + } + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port, add); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Set entry for dsa ethertype */ +static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port, + bool add, bool tagged, bool extend) +{ + struct mvpp2_prs_entry pe; + int tid, shift, port_mask; + + if (extend) { + tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED : + MVPP2_PE_ETYPE_EDSA_UNTAGGED; + port_mask = 0; + shift = 8; + } else { + tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED : + MVPP2_PE_ETYPE_DSA_UNTAGGED; + port_mask = MVPP2_PRS_PORT_MASK; + shift = 4; + } + + if (priv->prs_shadow[tid].valid) { + /* Entry exist - update port only */ + mvpp2_prs_init_from_hw(priv, &pe, tid); + } else { + /* Entry doesn't exist - create new */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); + pe.index = tid; + + /* Set ethertype */ + mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA); + mvpp2_prs_match_etype(&pe, 2, 0); + + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK, + MVPP2_PRS_RI_DSA_MASK); + /* Shift ethertype + 2 byte reserved + tag*/ + mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); + + if (tagged) { + /* Set tagged bit in DSA tag */ + mvpp2_prs_tcam_data_byte_set(&pe, + MVPP2_ETH_TYPE_LEN + 2 + 3, + MVPP2_PRS_TCAM_DSA_TAGGED_BIT, + MVPP2_PRS_TCAM_DSA_TAGGED_BIT); + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, + MVPP2_PRS_SRAM_AI_MASK); + /* If packet is tagged continue check vlans */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); + } else { + /* Set result info bits to 'no vlans' */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, + MVPP2_PRS_RI_VLAN_MASK); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + } + /* Mask/unmask all ports, depending on dsa type */ + mvpp2_prs_tcam_port_map_set(&pe, port_mask); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port, add); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Search for existing single/triple vlan entry */ +static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai) +{ + struct mvpp2_prs_entry pe; + int tid; + + /* Go through the all entries with MVPP2_PRS_LU_VLAN */ + for (tid = MVPP2_PE_FIRST_FREE_TID; + tid <= MVPP2_PE_LAST_FREE_TID; tid++) { + unsigned int ri_bits, ai_bits; + bool match; + + if (!priv->prs_shadow[tid].valid || + priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid); + if (!match) + continue; + + /* Get vlan type */ + ri_bits = mvpp2_prs_sram_ri_get(&pe); + ri_bits &= MVPP2_PRS_RI_VLAN_MASK; + + /* Get current ai value from tcam */ + ai_bits = mvpp2_prs_tcam_ai_get(&pe); + /* Clear double vlan bit */ + ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT; + + if (ai != ai_bits) + continue; + + if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || + ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) + return tid; + } + + return -ENOENT; +} + +/* Add/update single/triple vlan entry */ +static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, + unsigned int port_map) +{ + struct mvpp2_prs_entry pe; + int tid_aux, tid; + int ret = 0; + + memset(&pe, 0, sizeof(pe)); + + tid = mvpp2_prs_vlan_find(priv, tpid, ai); + + if (tid < 0) { + /* Create new tcam entry */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID, + MVPP2_PE_FIRST_FREE_TID); + if (tid < 0) + return tid; + + /* Get last double vlan tid */ + for (tid_aux = MVPP2_PE_LAST_FREE_TID; + tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) { + unsigned int ri_bits; + + if (!priv->prs_shadow[tid_aux].valid || + priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid_aux); + ri_bits = mvpp2_prs_sram_ri_get(&pe); + if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) == + MVPP2_PRS_RI_VLAN_DOUBLE) + break; + } + + if (tid <= tid_aux) + return -EINVAL; + + memset(&pe, 0, sizeof(pe)); + pe.index = tid; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); + + mvpp2_prs_match_etype(&pe, 0, tpid); + + /* VLAN tag detected, proceed with VID filtering */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + if (ai == MVPP2_PRS_SINGLE_VLAN_AI) { + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE, + MVPP2_PRS_RI_VLAN_MASK); + } else { + ai |= MVPP2_PRS_DBL_VLAN_AI_BIT; + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE, + MVPP2_PRS_RI_VLAN_MASK); + } + mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK); + + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); + } else { + mvpp2_prs_init_from_hw(priv, &pe, tid); + } + /* Update ports' mask */ + mvpp2_prs_tcam_port_map_set(&pe, port_map); + + mvpp2_prs_hw_write(priv, &pe); + + return ret; +} + +/* Get first free double vlan ai number */ +static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv) +{ + int i; + + for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) { + if (!priv->prs_double_vlans[i]) + return i; + } + + return -EINVAL; +} + +/* Search for existing double vlan entry */ +static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1, + unsigned short tpid2) +{ + struct mvpp2_prs_entry pe; + int tid; + + /* Go through the all entries with MVPP2_PRS_LU_VLAN */ + for (tid = MVPP2_PE_FIRST_FREE_TID; + tid <= MVPP2_PE_LAST_FREE_TID; tid++) { + unsigned int ri_mask; + bool match; + + if (!priv->prs_shadow[tid].valid || + priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + + match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) && + mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2); + + if (!match) + continue; + + ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK; + if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE) + return tid; + } + + return -ENOENT; +} + +/* Add or update double vlan entry */ +static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, + unsigned short tpid2, + unsigned int port_map) +{ + int tid_aux, tid, ai, ret = 0; + struct mvpp2_prs_entry pe; + + memset(&pe, 0, sizeof(pe)); + + tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2); + + if (tid < 0) { + /* Create new tcam entry */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + /* Set ai value for new double vlan entry */ + ai = mvpp2_prs_double_vlan_ai_free_get(priv); + if (ai < 0) + return ai; + + /* Get first single/triple vlan tid */ + for (tid_aux = MVPP2_PE_FIRST_FREE_TID; + tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) { + unsigned int ri_bits; + + if (!priv->prs_shadow[tid_aux].valid || + priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid_aux); + ri_bits = mvpp2_prs_sram_ri_get(&pe); + ri_bits &= MVPP2_PRS_RI_VLAN_MASK; + if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || + ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) + break; + } + + if (tid >= tid_aux) + return -ERANGE; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); + pe.index = tid; + + priv->prs_double_vlans[ai] = true; + + mvpp2_prs_match_etype(&pe, 0, tpid1); + mvpp2_prs_match_etype(&pe, 4, tpid2); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); + /* Shift 4 bytes - skip outer vlan tag */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, + MVPP2_PRS_RI_VLAN_MASK); + mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT, + MVPP2_PRS_SRAM_AI_MASK); + + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); + } else { + mvpp2_prs_init_from_hw(priv, &pe, tid); + } + + /* Update ports' mask */ + mvpp2_prs_tcam_port_map_set(&pe, port_map); + mvpp2_prs_hw_write(priv, &pe); + + return ret; +} + +/* IPv4 header parsing for fragmentation and L4 offset */ +static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, + unsigned int ri, unsigned int ri_mask) +{ + struct mvpp2_prs_entry pe; + int tid; + + if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && + (proto != IPPROTO_IGMP)) + return -EINVAL; + + /* Not fragmented packet */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); + pe.index = tid; + + /* Set next lu to IPv4 */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); + mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L4 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, + sizeof(struct iphdr) - 4, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, + MVPP2_PRS_IPV4_DIP_AI_BIT); + mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); + + mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, + MVPP2_PRS_TCAM_PROTO_MASK_L); + mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, + MVPP2_PRS_TCAM_PROTO_MASK); + + mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK); + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Fragmented packet */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + pe.index = tid; + /* Clear ri before updating */ + pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); + + mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE, + ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); + + mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0); + mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* IPv4 L3 multicast or broadcast */ +static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast) +{ + struct mvpp2_prs_entry pe; + int mask, tid; + + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); + pe.index = tid; + + switch (l3_cast) { + case MVPP2_PRS_L3_MULTI_CAST: + mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC, + MVPP2_PRS_IPV4_MC_MASK); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + break; + case MVPP2_PRS_L3_BROAD_CAST: + mask = MVPP2_PRS_IPV4_BC_MASK; + mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask); + mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask); + mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask); + mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + break; + default: + return -EINVAL; + } + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, + MVPP2_PRS_IPV4_DIP_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Set entries for protocols over IPv6 */ +static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto, + unsigned int ri, unsigned int ri_mask) +{ + struct mvpp2_prs_entry pe; + int tid; + + if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && + (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP)) + return -EINVAL; + + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = tid; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, + sizeof(struct ipv6hdr) - 6, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK); + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Write HW */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* IPv6 L3 multicast entry */ +static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast) +{ + struct mvpp2_prs_entry pe; + int tid; + + if (l3_cast != MVPP2_PRS_L3_MULTI_CAST) + return -EINVAL; + + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = tid; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Shift back to IPv6 NH */ + mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC, + MVPP2_PRS_IPV6_MC_MASK); + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Parser per-port initialization */ +static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first, + int lu_max, int offset) +{ + u32 val; + + /* Set lookup ID */ + val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG); + val &= ~MVPP2_PRS_PORT_LU_MASK(port); + val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first); + mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val); + + /* Set maximum number of loops for packet received from port */ + val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port)); + val &= ~MVPP2_PRS_MAX_LOOP_MASK(port); + val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max); + mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val); + + /* Set initial offset for packet header extraction for the first + * searching loop + */ + val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port)); + val &= ~MVPP2_PRS_INIT_OFF_MASK(port); + val |= MVPP2_PRS_INIT_OFF_VAL(port, offset); + mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val); +} + +/* Default flow entries initialization for all ports */ +static void mvpp2_prs_def_flow_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int port; + + for (port = 0; port < MVPP2_MAX_PORTS; port++) { + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port; + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + + /* Set flow ID*/ + mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_hw_write(priv, &pe); + } +} + +/* Set default entry for Marvell Header field */ +static void mvpp2_prs_mh_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + + memset(&pe, 0, sizeof(pe)); + + pe.index = MVPP2_PE_MH_DEFAULT; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH); + mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH); + mvpp2_prs_hw_write(priv, &pe); +} + +/* Set default entires (place holder) for promiscuous, non-promiscuous and + * multicast MAC addresses + */ +static void mvpp2_prs_mac_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + + memset(&pe, 0, sizeof(pe)); + + /* Non-promiscuous mode for all ports - DROP unknown packets */ + pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, + MVPP2_PRS_RI_DROP_MASK); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + mvpp2_prs_hw_write(priv, &pe); + + /* Create dummy entries for drop all and promiscuous modes */ + mvpp2_prs_drop_fc(priv); + mvpp2_prs_mac_drop_all_set(priv, 0, false); + mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false); + mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false); +} + +/* Set default entries for various types of dsa packets */ +static void mvpp2_prs_dsa_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + + /* None tagged EDSA entry - place holder */ + mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, + MVPP2_PRS_EDSA); + + /* Tagged EDSA entry - place holder */ + mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + + /* None tagged DSA entry - place holder */ + mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, + MVPP2_PRS_DSA); + + /* Tagged DSA entry - place holder */ + mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + + /* None tagged EDSA ethertype entry - place holder*/ + mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + + /* Tagged EDSA ethertype entry - place holder*/ + mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, + MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + + /* None tagged DSA ethertype entry */ + mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + + /* Tagged DSA ethertype entry */ + mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, + MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + + /* Set default entry, in case DSA or EDSA tag not found */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); + pe.index = MVPP2_PE_DSA_DEFAULT; + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); + + /* Shift 0 bytes */ + mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + + /* Clear all sram ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Initialize parser entries for VID filtering */ +static void mvpp2_prs_vid_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + + memset(&pe, 0, sizeof(pe)); + + /* Set default vid entry */ + pe.index = MVPP2_PE_VID_FLTR_DEFAULT; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); + + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT); + + /* Skip VLAN header - Set offset to 4 bytes */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); + mvpp2_prs_hw_write(priv, &pe); + + /* Set default vid entry for extended DSA*/ + memset(&pe, 0, sizeof(pe)); + + /* Set default vid entry */ + pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT, + MVPP2_PRS_EDSA_VID_AI_BIT); + + /* Skip VLAN header - Set offset to 8 bytes */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); + mvpp2_prs_hw_write(priv, &pe); +} + +/* Match basic ethertypes */ +static int mvpp2_prs_etype_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int tid; + + /* Ethertype: PPPoE */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES); + + mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK, + MVPP2_PRS_RI_PPPOE_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = false; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK, + MVPP2_PRS_RI_PPPOE_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: ARP */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP); + + /* Generate flow in the next iteration*/ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = true; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: LBTD */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE); + + /* Generate flow in the next iteration*/ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | + MVPP2_PRS_RI_UDF3_RX_SPECIAL, + MVPP2_PRS_RI_CPU_CODE_MASK | + MVPP2_PRS_RI_UDF3_MASK); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = true; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | + MVPP2_PRS_RI_UDF3_RX_SPECIAL, + MVPP2_PRS_RI_CPU_CODE_MASK | + MVPP2_PRS_RI_UDF3_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: IPv4 without options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, ETH_P_IP); + mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, + MVPP2_PRS_IPV4_HEAD_MASK | + MVPP2_PRS_IPV4_IHL_MASK); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Skip eth_type + 4 bytes of IP header */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = false; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: IPv4 with options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + pe.index = tid; + + mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_IPV4_HEAD, + MVPP2_PRS_IPV4_HEAD_MASK); + + /* Clear ri before updating */ + pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, + MVPP2_PRS_RI_L3_PROTO_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = false; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: IPv6 without options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6); + + /* Skip DIP of IPV6 header */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 + + MVPP2_MAX_L3_ADDR_SIZE, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = false; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */ + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = MVPP2_PE_ETH_TYPE_UN; + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Generate flow in the next iteration*/ + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Set L3 offset even it's unknown L3 */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = true; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Configure vlan entries and detect up to 2 successive VLAN tags. + * Possible options: + * 0x8100, 0x88A8 + * 0x8100, 0x8100 + * 0x8100 + * 0x88A8 + */ +static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int err; + + priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), + MVPP2_PRS_DBL_VLANS_MAX, + GFP_KERNEL); + if (!priv->prs_double_vlans) + return -ENOMEM; + + /* Double VLAN: 0x8100, 0x88A8 */ + err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD, + MVPP2_PRS_PORT_MASK); + if (err) + return err; + + /* Double VLAN: 0x8100, 0x8100 */ + err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q, + MVPP2_PRS_PORT_MASK); + if (err) + return err; + + /* Single VLAN: 0x88a8 */ + err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI, + MVPP2_PRS_PORT_MASK); + if (err) + return err; + + /* Single VLAN: 0x8100 */ + err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI, + MVPP2_PRS_PORT_MASK); + if (err) + return err; + + /* Set default double vlan entry */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); + pe.index = MVPP2_PE_VLAN_DBL; + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); + + /* Clear ai for next iterations */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, + MVPP2_PRS_RI_VLAN_MASK); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT, + MVPP2_PRS_DBL_VLAN_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); + mvpp2_prs_hw_write(priv, &pe); + + /* Set default vlan none entry */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); + pe.index = MVPP2_PE_VLAN_NONE; + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, + MVPP2_PRS_RI_VLAN_MASK); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Set entries for PPPoE ethertype */ +static int mvpp2_prs_pppoe_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int tid; + + /* IPv4 over PPPoE with options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, PPP_IP); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Skip eth_type + 4 bytes of IP header */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_hw_write(priv, &pe); + + /* IPv4 over PPPoE without options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + pe.index = tid; + + mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, + MVPP2_PRS_IPV4_HEAD_MASK | + MVPP2_PRS_IPV4_IHL_MASK); + + /* Clear ri before updating */ + pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_L3_PROTO_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_hw_write(priv, &pe); + + /* IPv6 over PPPoE */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, PPP_IPV6); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Jump to DIP of IPV6 header */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 + + MVPP2_MAX_L3_ADDR_SIZE, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_hw_write(priv, &pe); + + /* Non-IP over PPPoE */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); + pe.index = tid; + + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, + MVPP2_PRS_RI_L3_PROTO_MASK); + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + /* Set L3 offset even if it's unknown L3 */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Initialize entries for IPv4 */ +static int mvpp2_prs_ip4_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int err; + + /* Set entries for TCP, UDP and IGMP over IPv4 */ + err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_RI_L4_PROTO_MASK); + if (err) + return err; + + err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_RI_L4_PROTO_MASK); + if (err) + return err; + + err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP, + MVPP2_PRS_RI_CPU_CODE_RX_SPEC | + MVPP2_PRS_RI_UDF3_RX_SPECIAL, + MVPP2_PRS_RI_CPU_CODE_MASK | + MVPP2_PRS_RI_UDF3_MASK); + if (err) + return err; + + /* IPv4 Broadcast */ + err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST); + if (err) + return err; + + /* IPv4 Multicast */ + err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST); + if (err) + return err; + + /* Default IPv4 entry for unknown protocols */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); + pe.index = MVPP2_PE_IP4_PROTO_UN; + + /* Set next lu to IPv4 */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); + mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L4 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, + sizeof(struct iphdr) - 4, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, + MVPP2_PRS_IPV4_DIP_AI_BIT); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, + MVPP2_PRS_RI_L4_PROTO_MASK); + + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Default IPv4 entry for unicast address */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); + pe.index = MVPP2_PE_IP4_ADDR_UN; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, + MVPP2_PRS_IPV4_DIP_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Initialize entries for IPv6 */ +static int mvpp2_prs_ip6_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int tid, err; + + /* Set entries for TCP, UDP and ICMP over IPv6 */ + err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP, + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_RI_L4_PROTO_MASK); + if (err) + return err; + + err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP, + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_RI_L4_PROTO_MASK); + if (err) + return err; + + err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6, + MVPP2_PRS_RI_CPU_CODE_RX_SPEC | + MVPP2_PRS_RI_UDF3_RX_SPECIAL, + MVPP2_PRS_RI_CPU_CODE_MASK | + MVPP2_PRS_RI_UDF3_MASK); + if (err) + return err; + + /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */ + /* Result Info: UDF7=1, DS lite */ + err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP, + MVPP2_PRS_RI_UDF7_IP6_LITE, + MVPP2_PRS_RI_UDF7_MASK); + if (err) + return err; + + /* IPv6 multicast */ + err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST); + if (err) + return err; + + /* Entry for checking hop limit */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = tid; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN | + MVPP2_PRS_RI_DROP_MASK, + MVPP2_PRS_RI_L3_PROTO_MASK | + MVPP2_PRS_RI_DROP_MASK); + + mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK); + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Default IPv6 entry for unknown protocols */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = MVPP2_PE_IP6_PROTO_UN; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, + MVPP2_PRS_RI_L4_PROTO_MASK); + /* Set L4 offset relatively to our current place */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, + sizeof(struct ipv6hdr) - 4, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Default IPv6 entry for unknown ext protocols */ + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = MVPP2_PE_IP6_EXT_PROTO_UN; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, + MVPP2_PRS_RI_L4_PROTO_MASK); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT, + MVPP2_PRS_IPV6_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Default IPv6 entry for unicast address */ + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = MVPP2_PE_IP6_ADDR_UN; + + /* Finished: go to IPv6 again */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Shift back to IPV6 NH */ + mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Find tcam entry with matched pair <vid,port> */ +static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask) +{ + unsigned char byte[2], enable[2]; + struct mvpp2_prs_entry pe; + u16 rvid, rmask; + int tid; + + /* Go through the all entries with MVPP2_PRS_LU_VID */ + for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); + tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { + if (!port->priv->prs_shadow[tid].valid || + port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) + continue; + + mvpp2_prs_init_from_hw(port->priv, &pe, tid); + + mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); + mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); + + rvid = ((byte[0] & 0xf) << 8) + byte[1]; + rmask = ((enable[0] & 0xf) << 8) + enable[1]; + + if (rvid != vid || rmask != mask) + continue; + + return tid; + } + + return -ENOENT; +} + +/* Write parser entry for VID filtering */ +int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) +{ + unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START + + port->id * MVPP2_PRS_VLAN_FILT_MAX; + unsigned int mask = 0xfff, reg_val, shift; + struct mvpp2 *priv = port->priv; + struct mvpp2_prs_entry pe; + int tid; + + memset(&pe, 0, sizeof(pe)); + + /* Scan TCAM and see if entry with this <vid,port> already exist */ + tid = mvpp2_prs_vid_range_find(port, vid, mask); + + reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); + if (reg_val & MVPP2_DSA_EXTENDED) + shift = MVPP2_VLAN_TAG_EDSA_LEN; + else + shift = MVPP2_VLAN_TAG_LEN; + + /* No such entry */ + if (tid < 0) { + + /* Go through all entries from first to last in vlan range */ + tid = mvpp2_prs_tcam_first_free(priv, vid_start, + vid_start + + MVPP2_PRS_VLAN_FILT_MAX_ENTRY); + + /* There isn't room for a new VID filter */ + if (tid < 0) + return tid; + + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); + pe.index = tid; + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + } else { + mvpp2_prs_init_from_hw(priv, &pe, tid); + } + + /* Enable the current port */ + mvpp2_prs_tcam_port_set(&pe, port->id, true); + + /* Continue - set next lookup */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + + /* Skip VLAN header - Set offset to 4 or 8 bytes */ + mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Set match on VID */ + mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Write parser entry for VID filtering */ +void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid) +{ + struct mvpp2 *priv = port->priv; + int tid; + + /* Scan TCAM and see if entry with this <vid,port> already exist */ + tid = mvpp2_prs_vid_range_find(port, vid, 0xfff); + + /* No such entry */ + if (tid < 0) + return; + + mvpp2_prs_hw_inv(priv, tid); + priv->prs_shadow[tid].valid = false; +} + +/* Remove all existing VID filters on this port */ +void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + int tid; + + for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); + tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { + if (priv->prs_shadow[tid].valid) { + mvpp2_prs_hw_inv(priv, tid); + priv->prs_shadow[tid].valid = false; + } + } +} + +/* Remove VID filering entry for this port */ +void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port) +{ + unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); + struct mvpp2 *priv = port->priv; + + /* Invalidate the guard entry */ + mvpp2_prs_hw_inv(priv, tid); + + priv->prs_shadow[tid].valid = false; +} + +/* Add guard entry that drops packets when no VID is matched on this port */ +void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port) +{ + unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); + struct mvpp2 *priv = port->priv; + unsigned int reg_val, shift; + struct mvpp2_prs_entry pe; + + if (priv->prs_shadow[tid].valid) + return; + + memset(&pe, 0, sizeof(pe)); + + pe.index = tid; + + reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); + if (reg_val & MVPP2_DSA_EXTENDED) + shift = MVPP2_VLAN_TAG_EDSA_LEN; + else + shift = MVPP2_VLAN_TAG_LEN; + + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port->id, true); + + /* Continue - set next lookup */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + + /* Skip VLAN header - Set offset to 4 or 8 bytes */ + mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Drop VLAN packets that don't belong to any VIDs on this port */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, + MVPP2_PRS_RI_DROP_MASK); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); + mvpp2_prs_hw_write(priv, &pe); +} + +/* Parser default initialization */ +int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) +{ + int err, index, i; + + /* Enable tcam table */ + mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); + + /* Clear all tcam and sram entries */ + for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) { + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); + for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0); + + mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index); + for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0); + } + + /* Invalidate all tcam entries */ + for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) + mvpp2_prs_hw_inv(priv, index); + + priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, + sizeof(*priv->prs_shadow), + GFP_KERNEL); + if (!priv->prs_shadow) + return -ENOMEM; + + /* Always start from lookup = 0 */ + for (index = 0; index < MVPP2_MAX_PORTS; index++) + mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, + MVPP2_PRS_PORT_LU_MAX, 0); + + mvpp2_prs_def_flow_init(priv); + + mvpp2_prs_mh_init(priv); + + mvpp2_prs_mac_init(priv); + + mvpp2_prs_dsa_init(priv); + + mvpp2_prs_vid_init(priv); + + err = mvpp2_prs_etype_init(priv); + if (err) + return err; + + err = mvpp2_prs_vlan_init(pdev, priv); + if (err) + return err; + + err = mvpp2_prs_pppoe_init(priv); + if (err) + return err; + + err = mvpp2_prs_ip6_init(priv); + if (err) + return err; + + err = mvpp2_prs_ip4_init(priv); + if (err) + return err; + + return 0; +} + +/* Compare MAC DA with tcam entry data */ +static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, + const u8 *da, unsigned char *mask) +{ + unsigned char tcam_byte, tcam_mask; + int index; + + for (index = 0; index < ETH_ALEN; index++) { + mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask); + if (tcam_mask != mask[index]) + return false; + + if ((tcam_mask & tcam_byte) != (da[index] & mask[index])) + return false; + } + + return true; +} + +/* Find tcam entry with matched pair <MAC DA, port> */ +static int +mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, + unsigned char *mask, int udf_type) +{ + struct mvpp2_prs_entry pe; + int tid; + + /* Go through the all entires with MVPP2_PRS_LU_MAC */ + for (tid = MVPP2_PE_MAC_RANGE_START; + tid <= MVPP2_PE_MAC_RANGE_END; tid++) { + unsigned int entry_pmap; + + if (!priv->prs_shadow[tid].valid || + (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || + (priv->prs_shadow[tid].udf != udf_type)) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + entry_pmap = mvpp2_prs_tcam_port_map_get(&pe); + + if (mvpp2_prs_mac_range_equals(&pe, da, mask) && + entry_pmap == pmap) + return tid; + } + + return -ENOENT; +} + +/* Update parser's mac da entry */ +int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) +{ + unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + struct mvpp2 *priv = port->priv; + unsigned int pmap, len, ri; + struct mvpp2_prs_entry pe; + int tid; + + memset(&pe, 0, sizeof(pe)); + + /* Scan TCAM and see if entry with this <MAC DA, port> already exist */ + tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask, + MVPP2_PRS_UDF_MAC_DEF); + + /* No such entry */ + if (tid < 0) { + if (!add) + return 0; + + /* Create new TCAM entry */ + /* Go through the all entries from first to last */ + tid = mvpp2_prs_tcam_first_free(priv, + MVPP2_PE_MAC_RANGE_START, + MVPP2_PE_MAC_RANGE_END); + if (tid < 0) + return tid; + + pe.index = tid; + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + } else { + mvpp2_prs_init_from_hw(priv, &pe, tid); + } + + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port->id, add); + + /* Invalidate the entry if no ports are left enabled */ + pmap = mvpp2_prs_tcam_port_map_get(&pe); + if (pmap == 0) { + if (add) + return -EINVAL; + + mvpp2_prs_hw_inv(priv, pe.index); + priv->prs_shadow[pe.index].valid = false; + return 0; + } + + /* Continue - set next lookup */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); + + /* Set match on DA */ + len = ETH_ALEN; + while (len--) + mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff); + + /* Set result info bits */ + if (is_broadcast_ether_addr(da)) { + ri = MVPP2_PRS_RI_L2_BCAST; + } else if (is_multicast_ether_addr(da)) { + ri = MVPP2_PRS_RI_L2_MCAST; + } else { + ri = MVPP2_PRS_RI_L2_UCAST; + + if (ether_addr_equal(da, port->dev->dev_addr)) + ri |= MVPP2_PRS_RI_MAC_ME_MASK; + } + + mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK | + MVPP2_PRS_RI_MAC_ME_MASK); + mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK | + MVPP2_PRS_RI_MAC_ME_MASK); + + /* Shift to ethertype */ + mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Update shadow table and hw entry */ + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF; + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da) +{ + struct mvpp2_port *port = netdev_priv(dev); + int err; + + /* Remove old parser entry */ + err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false); + if (err) + return err; + + /* Add new parser entry */ + err = mvpp2_prs_mac_da_accept(port, da, true); + if (err) + return err; + + /* Set addr in the device */ + ether_addr_copy(dev->dev_addr, da); + + return 0; +} + +void mvpp2_prs_mac_del_all(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + struct mvpp2_prs_entry pe; + unsigned long pmap; + int index, tid; + + for (tid = MVPP2_PE_MAC_RANGE_START; + tid <= MVPP2_PE_MAC_RANGE_END; tid++) { + unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; + + if (!priv->prs_shadow[tid].valid || + (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || + (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + + pmap = mvpp2_prs_tcam_port_map_get(&pe); + + /* We only want entries active on this port */ + if (!test_bit(port->id, &pmap)) + continue; + + /* Read mac addr from entry */ + for (index = 0; index < ETH_ALEN; index++) + mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index], + &da_mask[index]); + + /* Special cases : Don't remove broadcast and port's own + * address + */ + if (is_broadcast_ether_addr(da) || + ether_addr_equal(da, port->dev->dev_addr)) + continue; + + /* Remove entry from TCAM */ + mvpp2_prs_mac_da_accept(port, da, false); + } +} + +int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) +{ + switch (type) { + case MVPP2_TAG_TYPE_EDSA: + /* Add port to EDSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, true, + MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + mvpp2_prs_dsa_tag_set(priv, port, true, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + /* Remove port from DSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + break; + + case MVPP2_TAG_TYPE_DSA: + /* Add port to DSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, true, + MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + mvpp2_prs_dsa_tag_set(priv, port, true, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + /* Remove port from EDSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + break; + + case MVPP2_TAG_TYPE_MH: + case MVPP2_TAG_TYPE_NONE: + /* Remove port form EDSA and DSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + break; + + default: + if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA)) + return -EINVAL; + } + + return 0; +} + +int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask) +{ + struct mvpp2_prs_entry pe; + u8 *ri_byte, *ri_byte_mask; + int tid, i; + + memset(&pe, 0, sizeof(pe)); + + tid = mvpp2_prs_tcam_first_free(priv, + MVPP2_PE_LAST_FREE_TID, + MVPP2_PE_FIRST_FREE_TID); + if (tid < 0) + return tid; + + pe.index = tid; + + ri_byte = (u8 *)&ri; + ri_byte_mask = (u8 *)&ri_mask; + + mvpp2_prs_sram_ai_update(&pe, flow, MVPP2_PRS_FLOW_ID_MASK); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); + + for (i = 0; i < 4; i++) { + mvpp2_prs_tcam_data_byte_set(&pe, i, ri_byte[i], + ri_byte_mask[i]); + } + + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Set prs flow for the port */ +int mvpp2_prs_def_flow(struct mvpp2_port *port) +{ + struct mvpp2_prs_entry pe; + int tid; + + memset(&pe, 0, sizeof(pe)); + + tid = mvpp2_prs_flow_find(port->priv, port->id); + + /* Such entry not exist */ + if (tid < 0) { + /* Go through the all entires from last to first */ + tid = mvpp2_prs_tcam_first_free(port->priv, + MVPP2_PE_LAST_FREE_TID, + MVPP2_PE_FIRST_FREE_TID); + if (tid < 0) + return tid; + + pe.index = tid; + + /* Set flow ID*/ + mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); + + /* Update shadow table */ + mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS); + } else { + mvpp2_prs_init_from_hw(port->priv, &pe, tid); + } + + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id)); + mvpp2_prs_hw_write(port->priv, &pe); + + return 0; +} + +int mvpp2_prs_hits(struct mvpp2 *priv, int index) +{ + u32 val; + + if (index > MVPP2_PRS_TCAM_SRAM_SIZE) + return -EINVAL; + + mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index); + + val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG); + + val &= MVPP2_PRS_TCAM_HIT_CNT_MASK; + + return val; +} diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h new file mode 100644 index 000000000..4b68dd374 --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h @@ -0,0 +1,333 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Header Parser definitions for Marvell PPv2 Network Controller + * + * Copyright (C) 2014 Marvell + * + * Marcin Wojtas <mw@semihalf.com> + */ +#ifndef _MVPP2_PRS_H_ +#define _MVPP2_PRS_H_ + +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/platform_device.h> + +#include "mvpp2.h" + +/* Parser constants */ +#define MVPP2_PRS_TCAM_SRAM_SIZE 256 +#define MVPP2_PRS_TCAM_WORDS 6 +#define MVPP2_PRS_SRAM_WORDS 4 +#define MVPP2_PRS_FLOW_ID_SIZE 64 +#define MVPP2_PRS_FLOW_ID_MASK 0x3f +#define MVPP2_PRS_TCAM_ENTRY_INVALID 1 +#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5) +#define MVPP2_PRS_IPV4_HEAD 0x40 +#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0 +#define MVPP2_PRS_IPV4_MC 0xe0 +#define MVPP2_PRS_IPV4_MC_MASK 0xf0 +#define MVPP2_PRS_IPV4_BC_MASK 0xff +#define MVPP2_PRS_IPV4_IHL 0x5 +#define MVPP2_PRS_IPV4_IHL_MASK 0xf +#define MVPP2_PRS_IPV6_MC 0xff +#define MVPP2_PRS_IPV6_MC_MASK 0xff +#define MVPP2_PRS_IPV6_HOP_MASK 0xff +#define MVPP2_PRS_TCAM_PROTO_MASK 0xff +#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f +#define MVPP2_PRS_DBL_VLANS_MAX 100 +#define MVPP2_PRS_CAST_MASK BIT(0) +#define MVPP2_PRS_MCAST_VAL BIT(0) +#define MVPP2_PRS_UCAST_VAL 0x0 + +/* Tcam structure: + * - lookup ID - 4 bits + * - port ID - 1 byte + * - additional information - 1 byte + * - header data - 8 bytes + * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0). + */ +#define MVPP2_PRS_AI_BITS 8 +#define MVPP2_PRS_AI_MASK 0xff +#define MVPP2_PRS_PORT_MASK 0xff +#define MVPP2_PRS_LU_MASK 0xf + +/* TCAM entries in registers are accessed using 16 data bits + 16 enable bits */ +#define MVPP2_PRS_BYTE_TO_WORD(byte) ((byte) / 2) +#define MVPP2_PRS_BYTE_IN_WORD(byte) ((byte) % 2) + +#define MVPP2_PRS_TCAM_EN(data) ((data) << 16) +#define MVPP2_PRS_TCAM_AI_WORD 4 +#define MVPP2_PRS_TCAM_AI(ai) (ai) +#define MVPP2_PRS_TCAM_AI_EN(ai) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_AI(ai)) +#define MVPP2_PRS_TCAM_PORT_WORD 4 +#define MVPP2_PRS_TCAM_PORT(p) ((p) << 8) +#define MVPP2_PRS_TCAM_PORT_EN(p) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_PORT(p)) +#define MVPP2_PRS_TCAM_LU_WORD 5 +#define MVPP2_PRS_TCAM_LU(lu) (lu) +#define MVPP2_PRS_TCAM_LU_EN(lu) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_LU(lu)) +#define MVPP2_PRS_TCAM_INV_WORD 5 + +#define MVPP2_PRS_VID_TCAM_BYTE 2 + +/* TCAM range for unicast and multicast filtering. We have 25 entries per port, + * with 4 dedicated to UC filtering and the rest to multicast filtering. + * Additionnally we reserve one entry for the broadcast address, and one for + * each port's own address. + */ +#define MVPP2_PRS_MAC_UC_MC_FILT_MAX 25 +#define MVPP2_PRS_MAC_RANGE_SIZE 80 + +/* Number of entries per port dedicated to UC and MC filtering */ +#define MVPP2_PRS_MAC_UC_FILT_MAX 4 +#define MVPP2_PRS_MAC_MC_FILT_MAX (MVPP2_PRS_MAC_UC_MC_FILT_MAX - \ + MVPP2_PRS_MAC_UC_FILT_MAX) + +/* There is a TCAM range reserved for VLAN filtering entries, range size is 33 + * 10 VLAN ID filter entries per port + * 1 default VLAN filter entry per port + * It is assumed that there are 3 ports for filter, not including loopback port + */ +#define MVPP2_PRS_VLAN_FILT_MAX 11 +#define MVPP2_PRS_VLAN_FILT_RANGE_SIZE 33 + +#define MVPP2_PRS_VLAN_FILT_MAX_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 2) +#define MVPP2_PRS_VLAN_FILT_DFLT_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 1) + +/* Tcam entries ID */ +#define MVPP2_PE_DROP_ALL 0 +#define MVPP2_PE_FIRST_FREE_TID 1 + +/* MAC filtering range */ +#define MVPP2_PE_MAC_RANGE_END (MVPP2_PE_VID_FILT_RANGE_START - 1) +#define MVPP2_PE_MAC_RANGE_START (MVPP2_PE_MAC_RANGE_END - \ + MVPP2_PRS_MAC_RANGE_SIZE + 1) +/* VLAN filtering range */ +#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31) +#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \ + MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1) +#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1) +#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) +#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29) +#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) +#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 27) +#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 22) +#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 21) +#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 20) +#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 19) +#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18) +#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17) +#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16) +#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15) +#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14) +#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 13) +#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 12) +#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 11) +#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 10) +#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 9) +#define MVPP2_PE_VID_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 8) +#define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7) +#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 6) +#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 5) +#define MVPP2_PE_FC_DROP (MVPP2_PRS_TCAM_SRAM_SIZE - 4) +#define MVPP2_PE_MAC_MC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 3) +#define MVPP2_PE_MAC_UC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2) +#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1) + +#define MVPP2_PRS_VID_PORT_FIRST(port) (MVPP2_PE_VID_FILT_RANGE_START + \ + ((port) * MVPP2_PRS_VLAN_FILT_MAX)) +#define MVPP2_PRS_VID_PORT_LAST(port) (MVPP2_PRS_VID_PORT_FIRST(port) \ + + MVPP2_PRS_VLAN_FILT_MAX_ENTRY) +/* Index of default vid filter for given port */ +#define MVPP2_PRS_VID_PORT_DFLT(port) (MVPP2_PRS_VID_PORT_FIRST(port) \ + + MVPP2_PRS_VLAN_FILT_DFLT_ENTRY) + +/* Sram structure + * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0). + */ +#define MVPP2_PRS_SRAM_RI_OFFS 0 +#define MVPP2_PRS_SRAM_RI_WORD 0 +#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32 +#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1 +#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32 +#define MVPP2_PRS_SRAM_SHIFT_OFFS 64 +#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72 +#define MVPP2_PRS_SRAM_SHIFT_MASK 0xff +#define MVPP2_PRS_SRAM_UDF_OFFS 73 +#define MVPP2_PRS_SRAM_UDF_BITS 8 +#define MVPP2_PRS_SRAM_UDF_MASK 0xff +#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81 +#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82 +#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7 +#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1 +#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3 +#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89 +#define MVPP2_PRS_SRAM_AI_OFFS 90 +#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98 +#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8 +#define MVPP2_PRS_SRAM_AI_MASK 0xff +#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106 +#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf +#define MVPP2_PRS_SRAM_LU_DONE_BIT 110 +#define MVPP2_PRS_SRAM_LU_GEN_BIT 111 + +/* Sram result info bits assignment */ +#define MVPP2_PRS_RI_MAC_ME_MASK 0x1 +#define MVPP2_PRS_RI_DSA_MASK 0x2 +#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3)) +#define MVPP2_PRS_RI_VLAN_NONE 0x0 +#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2) +#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3) +#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3)) +#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70 +#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4) +#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10)) +#define MVPP2_PRS_RI_L2_UCAST 0x0 +#define MVPP2_PRS_RI_L2_MCAST BIT(9) +#define MVPP2_PRS_RI_L2_BCAST BIT(10) +#define MVPP2_PRS_RI_PPPOE_MASK 0x800 +#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14)) +#define MVPP2_PRS_RI_L3_UN 0x0 +#define MVPP2_PRS_RI_L3_IP4 BIT(12) +#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13) +#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13)) +#define MVPP2_PRS_RI_L3_IP6 BIT(14) +#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14)) +#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14)) +#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16)) +#define MVPP2_PRS_RI_L3_UCAST 0x0 +#define MVPP2_PRS_RI_L3_MCAST BIT(15) +#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) +#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 +#define MVPP2_PRS_RI_IP_FRAG_TRUE BIT(17) +#define MVPP2_PRS_RI_UDF3_MASK 0x300000 +#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21) +#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000 +#define MVPP2_PRS_RI_L4_TCP BIT(22) +#define MVPP2_PRS_RI_L4_UDP BIT(23) +#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23)) +#define MVPP2_PRS_RI_UDF7_MASK 0x60000000 +#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29) +#define MVPP2_PRS_RI_DROP_MASK 0x80000000 + +#define MVPP2_PRS_IP_MASK (MVPP2_PRS_RI_L3_PROTO_MASK | \ + MVPP2_PRS_RI_IP_FRAG_MASK | \ + MVPP2_PRS_RI_L4_PROTO_MASK) + +/* Sram additional info bits assignment */ +#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0) +#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0) +#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1) +#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2) +#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3) +#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4) +#define MVPP2_PRS_SINGLE_VLAN_AI 0 +#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7) +#define MVPP2_PRS_EDSA_VID_AI_BIT BIT(0) + +/* DSA/EDSA type */ +#define MVPP2_PRS_TAGGED true +#define MVPP2_PRS_UNTAGGED false +#define MVPP2_PRS_EDSA true +#define MVPP2_PRS_DSA false + +/* MAC entries, shadow udf */ +enum mvpp2_prs_udf { + MVPP2_PRS_UDF_MAC_DEF, + MVPP2_PRS_UDF_MAC_RANGE, + MVPP2_PRS_UDF_L2_DEF, + MVPP2_PRS_UDF_L2_DEF_COPY, + MVPP2_PRS_UDF_L2_USER, +}; + +/* Lookup ID */ +enum mvpp2_prs_lookup { + MVPP2_PRS_LU_MH, + MVPP2_PRS_LU_MAC, + MVPP2_PRS_LU_DSA, + MVPP2_PRS_LU_VLAN, + MVPP2_PRS_LU_VID, + MVPP2_PRS_LU_L2, + MVPP2_PRS_LU_PPPOE, + MVPP2_PRS_LU_IP4, + MVPP2_PRS_LU_IP6, + MVPP2_PRS_LU_FLOWS, + MVPP2_PRS_LU_LAST, +}; + +struct mvpp2_prs_entry { + u32 index; + u32 tcam[MVPP2_PRS_TCAM_WORDS]; + u32 sram[MVPP2_PRS_SRAM_WORDS]; +}; + +struct mvpp2_prs_result_info { + u32 ri; + u32 ri_mask; +}; + +struct mvpp2_prs_shadow { + bool valid; + bool finish; + + /* Lookup ID */ + int lu; + + /* User defined offset */ + int udf; + + /* Result info */ + u32 ri; + u32 ri_mask; +}; + +int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv); + +int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, + int tid); + +unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe); + +void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, + unsigned int offs, unsigned char *byte, + unsigned char *enable); + +int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add); + +int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type); + +int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask); + +int mvpp2_prs_def_flow(struct mvpp2_port *port); + +void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port); + +void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port); + +int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid); + +void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid); + +void mvpp2_prs_vid_remove_all(struct mvpp2_port *port); + +void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, + enum mvpp2_prs_l2_cast l2_cast, bool add); + +void mvpp2_prs_mac_del_all(struct mvpp2_port *port); + +int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da); + +int mvpp2_prs_hits(struct mvpp2 *priv, int index); + +#endif diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_tai.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_tai.c new file mode 100644 index 000000000..95862aff4 --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_tai.c @@ -0,0 +1,457 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Marvell PP2.2 TAI support + * + * Note: + * Do NOT use the event capture support. + * Do Not even set the MPP muxes to allow PTP_EVENT_REQ to be used. + * It will disrupt the operation of this driver, and there is nothing + * that this driver can do to prevent that. Even using PTP_EVENT_REQ + * as an output will be seen as a trigger input, which can't be masked. + * When ever a trigger input is seen, the action in the TCFCR0_TCF + * field will be performed - whether it is a set, increment, decrement + * read, or frequency update. + * + * Other notes (useful, not specified in the documentation): + * - PTP_PULSE_OUT (PTP_EVENT_REQ MPP) + * It looks like the hardware can't generate a pulse at nsec=0. (The + * output doesn't trigger if the nsec field is zero.) + * Note: when configured as an output via the register at 0xfX441120, + * the input is still very much alive, and will trigger the current TCF + * function. + * - PTP_CLK_OUT (PTP_TRIG_GEN MPP) + * This generates a "PPS" signal determined by the CCC registers. It + * seems this is not aligned to the TOD counter in any way (it may be + * initially, but if you specify a non-round second interval, it won't, + * and you can't easily get it back.) + * - PTP_PCLK_OUT + * This generates a 50% duty cycle clock based on the TOD counter, and + * seems it can be set to any period of 1ns resolution. It is probably + * limited by the TOD step size. Its period is defined by the PCLK_CCC + * registers. Again, its alignment to the second is questionable. + * + * Consequently, we support none of these. + */ +#include <linux/io.h> +#include <linux/ptp_clock_kernel.h> +#include <linux/slab.h> + +#include "mvpp2.h" + +#define CR0_SW_NRESET BIT(0) + +#define TCFCR0_PHASE_UPDATE_ENABLE BIT(8) +#define TCFCR0_TCF_MASK (7 << 2) +#define TCFCR0_TCF_UPDATE (0 << 2) +#define TCFCR0_TCF_FREQUPDATE (1 << 2) +#define TCFCR0_TCF_INCREMENT (2 << 2) +#define TCFCR0_TCF_DECREMENT (3 << 2) +#define TCFCR0_TCF_CAPTURE (4 << 2) +#define TCFCR0_TCF_NOP (7 << 2) +#define TCFCR0_TCF_TRIGGER BIT(0) + +#define TCSR_CAPTURE_1_VALID BIT(1) +#define TCSR_CAPTURE_0_VALID BIT(0) + +struct mvpp2_tai { + struct ptp_clock_info caps; + struct ptp_clock *ptp_clock; + void __iomem *base; + spinlock_t lock; + u64 period; // nanosecond period in 32.32 fixed point + /* This timestamp is updated every two seconds */ + struct timespec64 stamp; +}; + +static void mvpp2_tai_modify(void __iomem *reg, u32 mask, u32 set) +{ + u32 val; + + val = readl_relaxed(reg) & ~mask; + val |= set & mask; + writel(val, reg); +} + +static void mvpp2_tai_write(u32 val, void __iomem *reg) +{ + writel_relaxed(val & 0xffff, reg); +} + +static u32 mvpp2_tai_read(void __iomem *reg) +{ + return readl_relaxed(reg) & 0xffff; +} + +static struct mvpp2_tai *ptp_to_tai(struct ptp_clock_info *ptp) +{ + return container_of(ptp, struct mvpp2_tai, caps); +} + +static void mvpp22_tai_read_ts(struct timespec64 *ts, void __iomem *base) +{ + ts->tv_sec = (u64)mvpp2_tai_read(base + 0) << 32 | + mvpp2_tai_read(base + 4) << 16 | + mvpp2_tai_read(base + 8); + + ts->tv_nsec = mvpp2_tai_read(base + 12) << 16 | + mvpp2_tai_read(base + 16); + + /* Read and discard fractional part */ + readl_relaxed(base + 20); + readl_relaxed(base + 24); +} + +static void mvpp2_tai_write_tlv(const struct timespec64 *ts, u32 frac, + void __iomem *base) +{ + mvpp2_tai_write(ts->tv_sec >> 32, base + MVPP22_TAI_TLV_SEC_HIGH); + mvpp2_tai_write(ts->tv_sec >> 16, base + MVPP22_TAI_TLV_SEC_MED); + mvpp2_tai_write(ts->tv_sec, base + MVPP22_TAI_TLV_SEC_LOW); + mvpp2_tai_write(ts->tv_nsec >> 16, base + MVPP22_TAI_TLV_NANO_HIGH); + mvpp2_tai_write(ts->tv_nsec, base + MVPP22_TAI_TLV_NANO_LOW); + mvpp2_tai_write(frac >> 16, base + MVPP22_TAI_TLV_FRAC_HIGH); + mvpp2_tai_write(frac, base + MVPP22_TAI_TLV_FRAC_LOW); +} + +static void mvpp2_tai_op(u32 op, void __iomem *base) +{ + /* Trigger the operation. Note that an external unmaskable + * event on PTP_EVENT_REQ will also trigger this action. + */ + mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0, + TCFCR0_TCF_MASK | TCFCR0_TCF_TRIGGER, + op | TCFCR0_TCF_TRIGGER); + mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0, TCFCR0_TCF_MASK, + TCFCR0_TCF_NOP); +} + +/* The adjustment has a range of +0.5ns to -0.5ns in 2^32 steps, so has units + * of 2^-32 ns. + * + * units(s) = 1 / (2^32 * 10^9) + * fractional = abs_scaled_ppm / (2^16 * 10^6) + * + * What we want to achieve: + * freq_adjusted = freq_nominal * (1 + fractional) + * freq_delta = freq_adjusted - freq_nominal => positive = faster + * freq_delta = freq_nominal * (1 + fractional) - freq_nominal + * So: freq_delta = freq_nominal * fractional + * + * However, we are dealing with periods, so: + * period_adjusted = period_nominal / (1 + fractional) + * period_delta = period_nominal - period_adjusted => positive = faster + * period_delta = period_nominal * fractional / (1 + fractional) + * + * Hence: + * period_delta = period_nominal * abs_scaled_ppm / + * (2^16 * 10^6 + abs_scaled_ppm) + * + * To avoid overflow, we reduce both sides of the divide operation by a factor + * of 16. + */ +static u64 mvpp22_calc_frac_ppm(struct mvpp2_tai *tai, long abs_scaled_ppm) +{ + u64 val = tai->period * abs_scaled_ppm >> 4; + + return div_u64(val, (1000000 << 12) + (abs_scaled_ppm >> 4)); +} + +static s32 mvpp22_calc_max_adj(struct mvpp2_tai *tai) +{ + return 1000000; +} + +static int mvpp22_tai_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct mvpp2_tai *tai = ptp_to_tai(ptp); + unsigned long flags; + void __iomem *base; + bool neg_adj; + s32 frac; + u64 val; + + neg_adj = scaled_ppm < 0; + if (neg_adj) + scaled_ppm = -scaled_ppm; + + val = mvpp22_calc_frac_ppm(tai, scaled_ppm); + + /* Convert to a signed 32-bit adjustment */ + if (neg_adj) { + /* -S32_MIN warns, -val < S32_MIN fails, so go for the easy + * solution. + */ + if (val > 0x80000000) + return -ERANGE; + + frac = -val; + } else { + if (val > S32_MAX) + return -ERANGE; + + frac = val; + } + + base = tai->base; + spin_lock_irqsave(&tai->lock, flags); + mvpp2_tai_write(frac >> 16, base + MVPP22_TAI_TLV_FRAC_HIGH); + mvpp2_tai_write(frac, base + MVPP22_TAI_TLV_FRAC_LOW); + mvpp2_tai_op(TCFCR0_TCF_FREQUPDATE, base); + spin_unlock_irqrestore(&tai->lock, flags); + + return 0; +} + +static int mvpp22_tai_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct mvpp2_tai *tai = ptp_to_tai(ptp); + struct timespec64 ts; + unsigned long flags; + void __iomem *base; + u32 tcf; + + /* We can't deal with S64_MIN */ + if (delta == S64_MIN) + return -ERANGE; + + if (delta < 0) { + delta = -delta; + tcf = TCFCR0_TCF_DECREMENT; + } else { + tcf = TCFCR0_TCF_INCREMENT; + } + + ts = ns_to_timespec64(delta); + + base = tai->base; + spin_lock_irqsave(&tai->lock, flags); + mvpp2_tai_write_tlv(&ts, 0, base); + mvpp2_tai_op(tcf, base); + spin_unlock_irqrestore(&tai->lock, flags); + + return 0; +} + +static int mvpp22_tai_gettimex64(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct mvpp2_tai *tai = ptp_to_tai(ptp); + unsigned long flags; + void __iomem *base; + u32 tcsr; + int ret; + + base = tai->base; + spin_lock_irqsave(&tai->lock, flags); + /* XXX: the only way to read the PTP time is for the CPU to trigger + * an event. However, there is no way to distinguish between the CPU + * triggered event, and an external event on PTP_EVENT_REQ. So this + * is incompatible with external use of PTP_EVENT_REQ. + */ + ptp_read_system_prets(sts); + mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0, + TCFCR0_TCF_MASK | TCFCR0_TCF_TRIGGER, + TCFCR0_TCF_CAPTURE | TCFCR0_TCF_TRIGGER); + ptp_read_system_postts(sts); + mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0, TCFCR0_TCF_MASK, + TCFCR0_TCF_NOP); + + tcsr = readl(base + MVPP22_TAI_TCSR); + if (tcsr & TCSR_CAPTURE_1_VALID) { + mvpp22_tai_read_ts(ts, base + MVPP22_TAI_TCV1_SEC_HIGH); + ret = 0; + } else if (tcsr & TCSR_CAPTURE_0_VALID) { + mvpp22_tai_read_ts(ts, base + MVPP22_TAI_TCV0_SEC_HIGH); + ret = 0; + } else { + /* We don't seem to have a reading... */ + ret = -EBUSY; + } + spin_unlock_irqrestore(&tai->lock, flags); + + return ret; +} + +static int mvpp22_tai_settime64(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct mvpp2_tai *tai = ptp_to_tai(ptp); + unsigned long flags; + void __iomem *base; + + base = tai->base; + spin_lock_irqsave(&tai->lock, flags); + mvpp2_tai_write_tlv(ts, 0, base); + + /* Trigger an update to load the value from the TLV registers + * into the TOD counter. Note that an external unmaskable event on + * PTP_EVENT_REQ will also trigger this action. + */ + mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0, + TCFCR0_PHASE_UPDATE_ENABLE | + TCFCR0_TCF_MASK | TCFCR0_TCF_TRIGGER, + TCFCR0_TCF_UPDATE | TCFCR0_TCF_TRIGGER); + mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0, TCFCR0_TCF_MASK, + TCFCR0_TCF_NOP); + spin_unlock_irqrestore(&tai->lock, flags); + + return 0; +} + +static long mvpp22_tai_aux_work(struct ptp_clock_info *ptp) +{ + struct mvpp2_tai *tai = ptp_to_tai(ptp); + + mvpp22_tai_gettimex64(ptp, &tai->stamp, NULL); + + return msecs_to_jiffies(2000); +} + +static void mvpp22_tai_set_step(struct mvpp2_tai *tai) +{ + void __iomem *base = tai->base; + u32 nano, frac; + + nano = upper_32_bits(tai->period); + frac = lower_32_bits(tai->period); + + /* As the fractional nanosecond is a signed offset, if the MSB (sign) + * bit is set, we have to increment the whole nanoseconds. + */ + if (frac >= 0x80000000) + nano += 1; + + mvpp2_tai_write(nano, base + MVPP22_TAI_TOD_STEP_NANO_CR); + mvpp2_tai_write(frac >> 16, base + MVPP22_TAI_TOD_STEP_FRAC_HIGH); + mvpp2_tai_write(frac, base + MVPP22_TAI_TOD_STEP_FRAC_LOW); +} + +static void mvpp22_tai_init(struct mvpp2_tai *tai) +{ + void __iomem *base = tai->base; + + mvpp22_tai_set_step(tai); + + /* Release the TAI reset */ + mvpp2_tai_modify(base + MVPP22_TAI_CR0, CR0_SW_NRESET, CR0_SW_NRESET); +} + +int mvpp22_tai_ptp_clock_index(struct mvpp2_tai *tai) +{ + return ptp_clock_index(tai->ptp_clock); +} + +void mvpp22_tai_tstamp(struct mvpp2_tai *tai, u32 tstamp, + struct skb_shared_hwtstamps *hwtstamp) +{ + struct timespec64 ts; + int delta; + + /* The tstamp consists of 2 bits of seconds and 30 bits of nanoseconds. + * We use our stored timestamp (tai->stamp) to form a full timestamp, + * and we must read the seconds exactly once. + */ + ts.tv_sec = READ_ONCE(tai->stamp.tv_sec); + ts.tv_nsec = tstamp & 0x3fffffff; + + /* Calculate the delta in seconds between our stored timestamp and + * the value read from the queue. Allow timestamps one second in the + * past, otherwise consider them to be in the future. + */ + delta = ((tstamp >> 30) - (ts.tv_sec & 3)) & 3; + if (delta == 3) + delta -= 4; + ts.tv_sec += delta; + + memset(hwtstamp, 0, sizeof(*hwtstamp)); + hwtstamp->hwtstamp = timespec64_to_ktime(ts); +} + +void mvpp22_tai_start(struct mvpp2_tai *tai) +{ + long delay; + + delay = mvpp22_tai_aux_work(&tai->caps); + + ptp_schedule_worker(tai->ptp_clock, delay); +} + +void mvpp22_tai_stop(struct mvpp2_tai *tai) +{ + ptp_cancel_worker_sync(tai->ptp_clock); +} + +static void mvpp22_tai_remove(void *priv) +{ + struct mvpp2_tai *tai = priv; + + if (!IS_ERR(tai->ptp_clock)) + ptp_clock_unregister(tai->ptp_clock); +} + +int mvpp22_tai_probe(struct device *dev, struct mvpp2 *priv) +{ + struct mvpp2_tai *tai; + int ret; + + tai = devm_kzalloc(dev, sizeof(*tai), GFP_KERNEL); + if (!tai) + return -ENOMEM; + + spin_lock_init(&tai->lock); + + tai->base = priv->iface_base; + + /* The step size consists of three registers - a 16-bit nanosecond step + * size, and a 32-bit fractional nanosecond step size split over two + * registers. The fractional nanosecond step size has units of 2^-32ns. + * + * To calculate this, we calculate: + * (10^9 + freq / 2) / (freq * 2^-32) + * which gives us the nanosecond step to the nearest integer in 16.32 + * fixed point format, and the fractional part of the step size with + * the MSB inverted. With rounding of the fractional nanosecond, and + * simplification, this becomes: + * (10^9 << 32 + freq << 31 + (freq + 1) >> 1) / freq + * + * So: + * div = (10^9 << 32 + freq << 31 + (freq + 1) >> 1) / freq + * nano = upper_32_bits(div); + * frac = lower_32_bits(div) ^ 0x80000000; + * Will give the values for the registers. + * + * This is all seems perfect, but alas it is not when considering the + * whole story. The system is clocked from 25MHz, which is multiplied + * by a PLL to 1GHz, and then divided by three, giving 333333333Hz + * (recurring). This gives exactly 3ns, but using 333333333Hz with + * the above gives an error of 13*2^-32ns. + * + * Consequently, we use the period rather than calculating from the + * frequency. + */ + tai->period = 3ULL << 32; + + mvpp22_tai_init(tai); + + tai->caps.owner = THIS_MODULE; + strscpy(tai->caps.name, "Marvell PP2.2", sizeof(tai->caps.name)); + tai->caps.max_adj = mvpp22_calc_max_adj(tai); + tai->caps.adjfine = mvpp22_tai_adjfine; + tai->caps.adjtime = mvpp22_tai_adjtime; + tai->caps.gettimex64 = mvpp22_tai_gettimex64; + tai->caps.settime64 = mvpp22_tai_settime64; + tai->caps.do_aux_work = mvpp22_tai_aux_work; + + ret = devm_add_action(dev, mvpp22_tai_remove, tai); + if (ret) + return ret; + + tai->ptp_clock = ptp_clock_register(&tai->caps, dev); + if (IS_ERR(tai->ptp_clock)) + return PTR_ERR(tai->ptp_clock); + + priv->tai = tai; + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig new file mode 100644 index 000000000..543a1d047 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig @@ -0,0 +1,41 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Marvell OcteonTX2 drivers configuration +# + +config OCTEONTX2_MBOX + tristate + +config OCTEONTX2_AF + tristate "Marvell OcteonTX2 RVU Admin Function driver" + select OCTEONTX2_MBOX + depends on (64BIT && COMPILE_TEST) || ARM64 + depends on PCI + help + This driver supports Marvell's OcteonTX2 Resource Virtualization + Unit's admin function manager which manages all RVU HW resources + and provides a medium to other PF/VFs to configure HW. Should be + enabled for other RVU device drivers to work. + +config NDC_DIS_DYNAMIC_CACHING + bool "Disable caching of dynamic entries in NDC" + depends on OCTEONTX2_AF + default n + help + This config option disables caching of dynamic entries such as NIX SQEs + , NPA stack pages etc in NDC. Also locks down NIX SQ/CQ/RQ/RSS and + NPA Aura/Pool contexts. + +config OCTEONTX2_PF + tristate "Marvell OcteonTX2 NIC Physical Function driver" + select OCTEONTX2_MBOX + depends on (64BIT && COMPILE_TEST) || ARM64 + depends on PCI + help + This driver supports Marvell's OcteonTX2 NIC physical function. + +config OCTEONTX2_VF + tristate "Marvell OcteonTX2 NIC Virtual Function driver" + depends on OCTEONTX2_PF + help + This driver supports Marvell's OcteonTX2 NIC virtual function. diff --git a/drivers/net/ethernet/marvell/octeontx2/Makefile b/drivers/net/ethernet/marvell/octeontx2/Makefile new file mode 100644 index 000000000..0064a69e0 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for Marvell OcteonTX2 device drivers. +# + +obj-$(CONFIG_OCTEONTX2_MBOX) += af/ +obj-$(CONFIG_OCTEONTX2_AF) += af/ +obj-$(CONFIG_OCTEONTX2_PF) += nic/ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile new file mode 100644 index 000000000..2f7a861d0 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for Marvell's OcteonTX2 RVU Admin Function driver +# + +ccflags-y += -I$(src) +obj-$(CONFIG_OCTEONTX2_MBOX) += octeontx2_mbox.o +obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o + +octeontx2_mbox-y := mbox.o rvu_trace.o +octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ + rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c new file mode 100644 index 000000000..c0a0a3127 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -0,0 +1,1027 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 CGX driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/acpi.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/phy.h> +#include <linux/of.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> + +#include "cgx.h" + +#define DRV_NAME "octeontx2-cgx" +#define DRV_STRING "Marvell OcteonTX2 CGX/MAC Driver" + +/** + * struct lmac + * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion + * @cmd_lock: Lock to serialize the command interface + * @resp: command response + * @link_info: link related information + * @event_cb: callback for linkchange events + * @event_cb_lock: lock for serializing callback with unregister + * @cmd_pend: flag set before new command is started + * flag cleared after command response is received + * @cgx: parent cgx port + * @lmac_id: lmac port id + * @name: lmac port name + */ +struct lmac { + wait_queue_head_t wq_cmd_cmplt; + struct mutex cmd_lock; + u64 resp; + struct cgx_link_user_info link_info; + struct cgx_event_cb event_cb; + spinlock_t event_cb_lock; + bool cmd_pend; + struct cgx *cgx; + u8 lmac_id; + char *name; +}; + +struct cgx { + void __iomem *reg_base; + struct pci_dev *pdev; + u8 cgx_id; + u8 lmac_count; + struct lmac *lmac_idmap[MAX_LMAC_PER_CGX]; + struct work_struct cgx_cmd_work; + struct workqueue_struct *cgx_cmd_workq; + struct list_head cgx_list; +}; + +static LIST_HEAD(cgx_list); + +/* Convert firmware speed encoding to user format(Mbps) */ +static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX]; + +/* Convert firmware lmac type encoding to string */ +static char *cgx_lmactype_string[LMAC_MODE_MAX]; + +/* CGX PHY management internal APIs */ +static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en); + +/* Supported devices */ +static const struct pci_device_id cgx_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) }, + { 0, } /* end of table */ +}; + +MODULE_DEVICE_TABLE(pci, cgx_id_table); + +static void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val) +{ + writeq(val, cgx->reg_base + (lmac << 18) + offset); +} + +static u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset) +{ + return readq(cgx->reg_base + (lmac << 18) + offset); +} + +static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx) +{ + if (!cgx || lmac_id >= MAX_LMAC_PER_CGX) + return NULL; + + return cgx->lmac_idmap[lmac_id]; +} + +int cgx_get_cgxcnt_max(void) +{ + struct cgx *cgx_dev; + int idmax = -ENODEV; + + list_for_each_entry(cgx_dev, &cgx_list, cgx_list) + if (cgx_dev->cgx_id > idmax) + idmax = cgx_dev->cgx_id; + + if (idmax < 0) + return 0; + + return idmax + 1; +} + +int cgx_get_lmac_cnt(void *cgxd) +{ + struct cgx *cgx = cgxd; + + if (!cgx) + return -ENODEV; + + return cgx->lmac_count; +} + +void *cgx_get_pdata(int cgx_id) +{ + struct cgx *cgx_dev; + + list_for_each_entry(cgx_dev, &cgx_list, cgx_list) { + if (cgx_dev->cgx_id == cgx_id) + return cgx_dev; + } + return NULL; +} + +int cgx_get_cgxid(void *cgxd) +{ + struct cgx *cgx = cgxd; + + if (!cgx) + return -EINVAL; + + return cgx->cgx_id; +} + +u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + u64 cfg; + + cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_CFG); + + return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT; +} + +/* Ensure the required lock for event queue(where asynchronous events are + * posted) is acquired before calling this API. Else an asynchronous event(with + * latest link status) can reach the destination before this function returns + * and could make the link status appear wrong. + */ +int cgx_get_link_info(void *cgxd, int lmac_id, + struct cgx_link_user_info *linfo) +{ + struct lmac *lmac = lmac_pdata(lmac_id, cgxd); + + if (!lmac) + return -ENODEV; + + *linfo = lmac->link_info; + return 0; +} + +static u64 mac2u64 (u8 *mac_addr) +{ + u64 mac = 0; + int index; + + for (index = ETH_ALEN - 1; index >= 0; index--) + mac |= ((u64)*mac_addr++) << (8 * index); + return mac; +} + +int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + u64 cfg; + + /* copy 6bytes from macaddr */ + /* memcpy(&cfg, mac_addr, 6); */ + + cfg = mac2u64 (mac_addr); + + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)), + cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49)); + + cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); + cfg |= CGX_DMAC_CTL0_CAM_ENABLE; + cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); + + return 0; +} + +u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + u64 cfg; + + cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8); + return cfg & CGX_RX_DMAC_ADR_MASK; +} + +int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind) +{ + struct cgx *cgx = cgxd; + + if (!cgx || lmac_id >= cgx->lmac_count) + return -ENODEV; + + cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F)); + return 0; +} + +static inline u8 cgx_get_lmac_type(struct cgx *cgx, int lmac_id) +{ + u64 cfg; + + cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); + return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK; +} + +/* Configure CGX LMAC in internal loopback mode */ +int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable) +{ + struct cgx *cgx = cgxd; + u8 lmac_type; + u64 cfg; + + if (!cgx || lmac_id >= cgx->lmac_count) + return -ENODEV; + + lmac_type = cgx_get_lmac_type(cgx, lmac_id); + if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) { + cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL); + if (enable) + cfg |= CGXX_GMP_PCS_MRX_CTL_LBK; + else + cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK; + cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg); + } else { + cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1); + if (enable) + cfg |= CGXX_SPUX_CONTROL1_LBK; + else + cfg &= ~CGXX_SPUX_CONTROL1_LBK; + cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg); + } + return 0; +} + +void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable) +{ + struct cgx *cgx = cgx_get_pdata(cgx_id); + u64 cfg = 0; + + if (!cgx) + return; + + if (enable) { + /* Enable promiscuous mode on LMAC */ + cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); + cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE); + cfg |= CGX_DMAC_BCAST_MODE; + cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); + + cfg = cgx_read(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8)); + cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE; + cgx_write(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg); + } else { + /* Disable promiscuous mode */ + cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); + cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE; + cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); + cfg = cgx_read(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8)); + cfg |= CGX_DMAC_CAM_ADDR_ENABLE; + cgx_write(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg); + } +} + +/* Enable or disable forwarding received pause frames to Tx block */ +void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable) +{ + struct cgx *cgx = cgxd; + u64 cfg; + + if (!cgx) + return; + + if (enable) { + cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); + cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; + cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); + + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); + cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK; + cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); + } else { + cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); + cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; + cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); + + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); + cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK; + cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); + } +} + +int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat) +{ + struct cgx *cgx = cgxd; + + if (!cgx || lmac_id >= cgx->lmac_count) + return -ENODEV; + *rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8)); + return 0; +} + +int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat) +{ + struct cgx *cgx = cgxd; + + if (!cgx || lmac_id >= cgx->lmac_count) + return -ENODEV; + *tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8)); + return 0; +} + +int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable) +{ + struct cgx *cgx = cgxd; + u64 cfg; + + if (!cgx || lmac_id >= cgx->lmac_count) + return -ENODEV; + + cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); + if (enable) + cfg |= DATA_PKT_RX_EN | DATA_PKT_TX_EN; + else + cfg &= ~(DATA_PKT_RX_EN | DATA_PKT_TX_EN); + cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg); + return 0; +} + +int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable) +{ + struct cgx *cgx = cgxd; + u64 cfg, last; + + if (!cgx || lmac_id >= cgx->lmac_count) + return -ENODEV; + + cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); + last = cfg; + if (enable) + cfg |= DATA_PKT_TX_EN; + else + cfg &= ~DATA_PKT_TX_EN; + + if (cfg != last) + cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg); + return !!(last & DATA_PKT_TX_EN); +} + +int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id, + u8 *tx_pause, u8 *rx_pause) +{ + struct cgx *cgx = cgxd; + u64 cfg; + + if (!cgx || lmac_id >= cgx->lmac_count) + return -ENODEV; + + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); + *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK); + + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL); + *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV); + return 0; +} + +int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id, + u8 tx_pause, u8 rx_pause) +{ + struct cgx *cgx = cgxd; + u64 cfg; + + if (!cgx || lmac_id >= cgx->lmac_count) + return -ENODEV; + + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); + cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK; + cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0; + cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); + + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL); + cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV; + cfg |= tx_pause ? CGX_SMUX_TX_CTL_L2P_BP_CONV : 0x0; + cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg); + + cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP); + if (tx_pause) { + cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id); + } else { + cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id); + cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id); + } + cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg); + return 0; +} + +static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable) +{ + u64 cfg; + + if (!cgx || lmac_id >= cgx->lmac_count) + return; + if (enable) { + /* Enable receive pause frames */ + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); + cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK; + cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); + + cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); + cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; + cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); + + /* Enable pause frames transmission */ + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL); + cfg |= CGX_SMUX_TX_CTL_L2P_BP_CONV; + cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg); + + /* Set pause time and interval */ + cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME, + DEFAULT_PAUSE_TIME); + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL); + cfg &= ~0xFFFFULL; + cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL, + cfg | (DEFAULT_PAUSE_TIME / 2)); + + cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME, + DEFAULT_PAUSE_TIME); + + cfg = cgx_read(cgx, lmac_id, + CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL); + cfg &= ~0xFFFFULL; + cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL, + cfg | (DEFAULT_PAUSE_TIME / 2)); + } else { + /* ALL pause frames received are completely ignored */ + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); + cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK; + cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); + + cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); + cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; + cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); + + /* Disable pause frames transmission */ + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL); + cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV; + cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg); + } +} + +void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable) +{ + struct cgx *cgx = cgxd; + u64 cfg; + + if (!cgx) + return; + + if (enable) { + /* Enable inbound PTP timestamping */ + cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); + cfg |= CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE; + cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); + + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); + cfg |= CGX_SMUX_RX_FRM_CTL_PTP_MODE; + cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); + } else { + /* Disable inbound PTP stamping */ + cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); + cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE; + cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); + + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); + cfg &= ~CGX_SMUX_RX_FRM_CTL_PTP_MODE; + cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); + } +} + +/* CGX Firmware interface low level support */ +static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac) +{ + struct cgx *cgx = lmac->cgx; + struct device *dev; + int err = 0; + u64 cmd; + + /* Ensure no other command is in progress */ + err = mutex_lock_interruptible(&lmac->cmd_lock); + if (err) + return err; + + /* Ensure command register is free */ + cmd = cgx_read(cgx, lmac->lmac_id, CGX_COMMAND_REG); + if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) { + err = -EBUSY; + goto unlock; + } + + /* Update ownership in command request */ + req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req); + + /* Mark this lmac as pending, before we start */ + lmac->cmd_pend = true; + + /* Start command in hardware */ + cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req); + + /* Ensure command is completed without errors */ + if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend, + msecs_to_jiffies(CGX_CMD_TIMEOUT))) { + dev = &cgx->pdev->dev; + dev_err(dev, "cgx port %d:%d cmd timeout\n", + cgx->cgx_id, lmac->lmac_id); + err = -EIO; + goto unlock; + } + + /* we have a valid command response */ + smp_rmb(); /* Ensure the latest updates are visible */ + *resp = lmac->resp; + +unlock: + mutex_unlock(&lmac->cmd_lock); + + return err; +} + +static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp, + struct cgx *cgx, int lmac_id) +{ + struct lmac *lmac; + int err; + + lmac = lmac_pdata(lmac_id, cgx); + if (!lmac) + return -ENODEV; + + err = cgx_fwi_cmd_send(req, resp, lmac); + + /* Check for valid response */ + if (!err) { + if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL) + return -EIO; + else + return 0; + } + + return err; +} + +static inline void cgx_link_usertable_init(void) +{ + cgx_speed_mbps[CGX_LINK_NONE] = 0; + cgx_speed_mbps[CGX_LINK_10M] = 10; + cgx_speed_mbps[CGX_LINK_100M] = 100; + cgx_speed_mbps[CGX_LINK_1G] = 1000; + cgx_speed_mbps[CGX_LINK_2HG] = 2500; + cgx_speed_mbps[CGX_LINK_5G] = 5000; + cgx_speed_mbps[CGX_LINK_10G] = 10000; + cgx_speed_mbps[CGX_LINK_20G] = 20000; + cgx_speed_mbps[CGX_LINK_25G] = 25000; + cgx_speed_mbps[CGX_LINK_40G] = 40000; + cgx_speed_mbps[CGX_LINK_50G] = 50000; + cgx_speed_mbps[CGX_LINK_100G] = 100000; + + cgx_lmactype_string[LMAC_MODE_SGMII] = "SGMII"; + cgx_lmactype_string[LMAC_MODE_XAUI] = "XAUI"; + cgx_lmactype_string[LMAC_MODE_RXAUI] = "RXAUI"; + cgx_lmactype_string[LMAC_MODE_10G_R] = "10G_R"; + cgx_lmactype_string[LMAC_MODE_40G_R] = "40G_R"; + cgx_lmactype_string[LMAC_MODE_QSGMII] = "QSGMII"; + cgx_lmactype_string[LMAC_MODE_25G_R] = "25G_R"; + cgx_lmactype_string[LMAC_MODE_50G_R] = "50G_R"; + cgx_lmactype_string[LMAC_MODE_100G_R] = "100G_R"; + cgx_lmactype_string[LMAC_MODE_USXGMII] = "USXGMII"; +} + +static inline void link_status_user_format(u64 lstat, + struct cgx_link_user_info *linfo, + struct cgx *cgx, u8 lmac_id) +{ + char *lmac_string; + + linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat); + linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat); + linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)]; + linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id); + lmac_string = cgx_lmactype_string[linfo->lmac_type_id]; + strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1); +} + +/* Hardware event handlers */ +static inline void cgx_link_change_handler(u64 lstat, + struct lmac *lmac) +{ + struct cgx_link_user_info *linfo; + struct cgx *cgx = lmac->cgx; + struct cgx_link_event event; + struct device *dev; + int err_type; + + dev = &cgx->pdev->dev; + + link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id); + err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat); + + event.cgx_id = cgx->cgx_id; + event.lmac_id = lmac->lmac_id; + + /* update the local copy of link status */ + lmac->link_info = event.link_uinfo; + linfo = &lmac->link_info; + + /* Ensure callback doesn't get unregistered until we finish it */ + spin_lock(&lmac->event_cb_lock); + + if (!lmac->event_cb.notify_link_chg) { + dev_dbg(dev, "cgx port %d:%d Link change handler null", + cgx->cgx_id, lmac->lmac_id); + if (err_type != CGX_ERR_NONE) { + dev_err(dev, "cgx port %d:%d Link error %d\n", + cgx->cgx_id, lmac->lmac_id, err_type); + } + dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n", + cgx->cgx_id, lmac->lmac_id, + linfo->link_up ? "UP" : "DOWN", linfo->speed); + goto err; + } + + if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data)) + dev_err(dev, "event notification failure\n"); +err: + spin_unlock(&lmac->event_cb_lock); +} + +static inline bool cgx_cmdresp_is_linkevent(u64 event) +{ + u8 id; + + id = FIELD_GET(EVTREG_ID, event); + if (id == CGX_CMD_LINK_BRING_UP || + id == CGX_CMD_LINK_BRING_DOWN) + return true; + else + return false; +} + +static inline bool cgx_event_is_linkevent(u64 event) +{ + if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE) + return true; + else + return false; +} + +static irqreturn_t cgx_fwi_event_handler(int irq, void *data) +{ + struct lmac *lmac = data; + struct cgx *cgx; + u64 event; + + cgx = lmac->cgx; + + event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG); + + if (!FIELD_GET(EVTREG_ACK, event)) + return IRQ_NONE; + + switch (FIELD_GET(EVTREG_EVT_TYPE, event)) { + case CGX_EVT_CMD_RESP: + /* Copy the response. Since only one command is active at a + * time, there is no way a response can get overwritten + */ + lmac->resp = event; + /* Ensure response is updated before thread context starts */ + smp_wmb(); + + /* There wont be separate events for link change initiated from + * software; Hence report the command responses as events + */ + if (cgx_cmdresp_is_linkevent(event)) + cgx_link_change_handler(event, lmac); + + /* Release thread waiting for completion */ + lmac->cmd_pend = false; + wake_up_interruptible(&lmac->wq_cmd_cmplt); + break; + case CGX_EVT_ASYNC: + if (cgx_event_is_linkevent(event)) + cgx_link_change_handler(event, lmac); + break; + } + + /* Any new event or command response will be posted by firmware + * only after the current status is acked. + * Ack the interrupt register as well. + */ + cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0); + cgx_write(lmac->cgx, lmac->lmac_id, CGXX_CMRX_INT, FW_CGX_INT); + + return IRQ_HANDLED; +} + +/* APIs for PHY management using CGX firmware interface */ + +/* callback registration for hardware events like link change */ +int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id) +{ + struct cgx *cgx = cgxd; + struct lmac *lmac; + + lmac = lmac_pdata(lmac_id, cgx); + if (!lmac) + return -ENODEV; + + lmac->event_cb = *cb; + + return 0; +} + +int cgx_lmac_evh_unregister(void *cgxd, int lmac_id) +{ + struct lmac *lmac; + unsigned long flags; + struct cgx *cgx = cgxd; + + lmac = lmac_pdata(lmac_id, cgx); + if (!lmac) + return -ENODEV; + + spin_lock_irqsave(&lmac->event_cb_lock, flags); + lmac->event_cb.notify_link_chg = NULL; + lmac->event_cb.data = NULL; + spin_unlock_irqrestore(&lmac->event_cb_lock, flags); + + return 0; +} + +int cgx_get_fwdata_base(u64 *base) +{ + u64 req = 0, resp; + struct cgx *cgx; + int err; + + cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list); + if (!cgx) + return -ENXIO; + + req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req); + err = cgx_fwi_cmd_generic(req, &resp, cgx, 0); + if (!err) + *base = FIELD_GET(RESP_FWD_BASE, resp); + + return err; +} + +static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable) +{ + u64 req = 0; + u64 resp; + + if (enable) + req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req); + else + req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req); + + return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id); +} + +static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx) +{ + u64 req = 0; + + req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req); + return cgx_fwi_cmd_generic(req, resp, cgx, 0); +} + +static int cgx_lmac_verify_fwi_version(struct cgx *cgx) +{ + struct device *dev = &cgx->pdev->dev; + int major_ver, minor_ver; + u64 resp; + int err; + + if (!cgx->lmac_count) + return 0; + + err = cgx_fwi_read_version(&resp, cgx); + if (err) + return err; + + major_ver = FIELD_GET(RESP_MAJOR_VER, resp); + minor_ver = FIELD_GET(RESP_MINOR_VER, resp); + dev_dbg(dev, "Firmware command interface version = %d.%d\n", + major_ver, minor_ver); + if (major_ver != CGX_FIRMWARE_MAJOR_VER) + return -EIO; + else + return 0; +} + +static void cgx_lmac_linkup_work(struct work_struct *work) +{ + struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work); + struct device *dev = &cgx->pdev->dev; + int i, err; + + /* Do Link up for all the lmacs */ + for (i = 0; i < cgx->lmac_count; i++) { + err = cgx_fwi_link_change(cgx, i, true); + if (err) + dev_info(dev, "cgx port %d:%d Link up command failed\n", + cgx->cgx_id, i); + } +} + +int cgx_lmac_linkup_start(void *cgxd) +{ + struct cgx *cgx = cgxd; + + if (!cgx) + return -ENODEV; + + queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work); + + return 0; +} + +static int cgx_lmac_init(struct cgx *cgx) +{ + struct lmac *lmac; + int i, err; + + cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7; + if (cgx->lmac_count > MAX_LMAC_PER_CGX) + cgx->lmac_count = MAX_LMAC_PER_CGX; + + for (i = 0; i < cgx->lmac_count; i++) { + lmac = kcalloc(1, sizeof(struct lmac), GFP_KERNEL); + if (!lmac) + return -ENOMEM; + lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL); + if (!lmac->name) { + err = -ENOMEM; + goto err_lmac_free; + } + sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i); + lmac->lmac_id = i; + lmac->cgx = cgx; + init_waitqueue_head(&lmac->wq_cmd_cmplt); + mutex_init(&lmac->cmd_lock); + spin_lock_init(&lmac->event_cb_lock); + err = request_irq(pci_irq_vector(cgx->pdev, + CGX_LMAC_FWI + i * 9), + cgx_fwi_event_handler, 0, lmac->name, lmac); + if (err) + goto err_irq; + + /* Enable interrupt */ + cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S, + FW_CGX_INT); + + /* Add reference */ + cgx->lmac_idmap[i] = lmac; + cgx_lmac_pause_frm_config(cgx, i, true); + } + + return cgx_lmac_verify_fwi_version(cgx); + +err_irq: + kfree(lmac->name); +err_lmac_free: + kfree(lmac); + return err; +} + +static int cgx_lmac_exit(struct cgx *cgx) +{ + struct lmac *lmac; + int i; + + if (cgx->cgx_cmd_workq) { + flush_workqueue(cgx->cgx_cmd_workq); + destroy_workqueue(cgx->cgx_cmd_workq); + cgx->cgx_cmd_workq = NULL; + } + + /* Free all lmac related resources */ + for (i = 0; i < cgx->lmac_count; i++) { + cgx_lmac_pause_frm_config(cgx, i, false); + lmac = cgx->lmac_idmap[i]; + if (!lmac) + continue; + free_irq(pci_irq_vector(cgx->pdev, CGX_LMAC_FWI + i * 9), lmac); + kfree(lmac->name); + kfree(lmac); + } + + return 0; +} + +static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct cgx *cgx; + int err, nvec; + + cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL); + if (!cgx) + return -ENOMEM; + cgx->pdev = pdev; + + pci_set_drvdata(pdev, cgx); + + err = pci_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device\n"); + pci_set_drvdata(pdev, NULL); + return err; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(dev, "PCI request regions failed 0x%x\n", err); + goto err_disable_device; + } + + /* MAP configuration registers */ + cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); + if (!cgx->reg_base) { + dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n"); + err = -ENOMEM; + goto err_release_regions; + } + + nvec = CGX_NVEC; + err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX); + if (err < 0 || err != nvec) { + dev_err(dev, "Request for %d msix vectors failed, err %d\n", + nvec, err); + goto err_release_regions; + } + + cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) + & CGX_ID_MASK; + + /* init wq for processing linkup requests */ + INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work); + cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0); + if (!cgx->cgx_cmd_workq) { + dev_err(dev, "alloc workqueue failed for cgx cmd"); + err = -ENOMEM; + goto err_free_irq_vectors; + } + + list_add(&cgx->cgx_list, &cgx_list); + + cgx_link_usertable_init(); + + err = cgx_lmac_init(cgx); + if (err) + goto err_release_lmac; + + return 0; + +err_release_lmac: + cgx_lmac_exit(cgx); + list_del(&cgx->cgx_list); +err_free_irq_vectors: + pci_free_irq_vectors(pdev); +err_release_regions: + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + return err; +} + +static void cgx_remove(struct pci_dev *pdev) +{ + struct cgx *cgx = pci_get_drvdata(pdev); + + cgx_lmac_exit(cgx); + list_del(&cgx->cgx_list); + pci_free_irq_vectors(pdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +struct pci_driver cgx_driver = { + .name = DRV_NAME, + .id_table = cgx_id_table, + .probe = cgx_probe, + .remove = cgx_remove, +}; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h new file mode 100644 index 000000000..e176a6c65 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 CGX driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CGX_H +#define CGX_H + +#include "mbox.h" +#include "cgx_fw_if.h" + + /* PCI device IDs */ +#define PCI_DEVID_OCTEONTX2_CGX 0xA059 + +/* PCI BAR nos */ +#define PCI_CFG_REG_BAR_NUM 0 + +#define CGX_ID_MASK 0x7 +#define MAX_LMAC_PER_CGX 4 +#define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */ +#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX) + +/* Registers */ +#define CGXX_CMRX_CFG 0x00 +#define CMR_P2X_SEL_MASK GENMASK_ULL(61, 59) +#define CMR_P2X_SEL_SHIFT 59ULL +#define CMR_P2X_SEL_NIX0 1ULL +#define CMR_P2X_SEL_NIX1 2ULL +#define DATA_PKT_TX_EN BIT_ULL(53) +#define DATA_PKT_RX_EN BIT_ULL(54) +#define CGX_LMAC_TYPE_SHIFT 40 +#define CGX_LMAC_TYPE_MASK 0xF +#define CGXX_CMRX_INT 0x040 +#define FW_CGX_INT BIT_ULL(1) +#define CGXX_CMRX_INT_ENA_W1S 0x058 +#define CGXX_CMRX_RX_ID_MAP 0x060 +#define CGXX_CMRX_RX_STAT0 0x070 +#define CGXX_CMRX_RX_LMACS 0x128 +#define CGXX_CMRX_RX_DMAC_CTL0 0x1F8 +#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3) +#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3) +#define CGX_DMAC_MCAST_MODE BIT_ULL(1) +#define CGX_DMAC_BCAST_MODE BIT_ULL(0) +#define CGXX_CMRX_RX_DMAC_CAM0 0x200 +#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48) +#define CGXX_CMRX_RX_DMAC_CAM1 0x400 +#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0) +#define CGXX_CMRX_TX_STAT0 0x700 +#define CGXX_SCRATCH0_REG 0x1050 +#define CGXX_SCRATCH1_REG 0x1058 +#define CGX_CONST 0x2000 +#define CGXX_SPUX_CONTROL1 0x10000 +#define CGXX_SPUX_CONTROL1_LBK BIT_ULL(14) +#define CGXX_GMP_PCS_MRX_CTL 0x30000 +#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14) + +#define CGXX_SMUX_RX_FRM_CTL 0x20020 +#define CGX_SMUX_RX_FRM_CTL_CTL_BCK BIT_ULL(3) +#define CGX_SMUX_RX_FRM_CTL_PTP_MODE BIT_ULL(12) +#define CGXX_GMP_GMI_RXX_FRM_CTL 0x38028 +#define CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK BIT_ULL(3) +#define CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE BIT_ULL(12) +#define CGXX_SMUX_TX_CTL 0x20178 +#define CGXX_SMUX_TX_PAUSE_PKT_TIME 0x20110 +#define CGXX_SMUX_TX_PAUSE_PKT_INTERVAL 0x20120 +#define CGXX_GMP_GMI_TX_PAUSE_PKT_TIME 0x38230 +#define CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL 0x38248 +#define CGX_SMUX_TX_CTL_L2P_BP_CONV BIT_ULL(7) +#define CGXX_CMR_RX_OVR_BP 0x130 +#define CGX_CMR_RX_OVR_BP_EN(X) BIT_ULL(((X) + 8)) +#define CGX_CMR_RX_OVR_BP_BP(X) BIT_ULL(((X) + 4)) + +#define CGX_COMMAND_REG CGXX_SCRATCH1_REG +#define CGX_EVENT_REG CGXX_SCRATCH0_REG +#define CGX_CMD_TIMEOUT 2200 /* msecs */ +#define DEFAULT_PAUSE_TIME 0x7FF + +#define CGX_NVEC 37 +#define CGX_LMAC_FWI 0 + +enum cgx_nix_stat_type { + NIX_STATS_RX, + NIX_STATS_TX, +}; + +enum LMAC_TYPE { + LMAC_MODE_SGMII = 0, + LMAC_MODE_XAUI = 1, + LMAC_MODE_RXAUI = 2, + LMAC_MODE_10G_R = 3, + LMAC_MODE_40G_R = 4, + LMAC_MODE_QSGMII = 6, + LMAC_MODE_25G_R = 7, + LMAC_MODE_50G_R = 8, + LMAC_MODE_100G_R = 9, + LMAC_MODE_USXGMII = 10, + LMAC_MODE_MAX, +}; + +struct cgx_link_event { + struct cgx_link_user_info link_uinfo; + u8 cgx_id; + u8 lmac_id; +}; + +/** + * struct cgx_event_cb + * @notify_link_chg: callback for link change notification + * @data: data passed to callback function + */ +struct cgx_event_cb { + int (*notify_link_chg)(struct cgx_link_event *event, void *data); + void *data; +}; + +extern struct pci_driver cgx_driver; + +int cgx_get_cgxcnt_max(void); +int cgx_get_cgxid(void *cgxd); +int cgx_get_lmac_cnt(void *cgxd); +void *cgx_get_pdata(int cgx_id); +int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind); +int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id); +int cgx_lmac_evh_unregister(void *cgxd, int lmac_id); +int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat); +int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat); +int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable); +int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable); +int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr); +u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id); +void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable); +void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable); +int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable); +int cgx_get_link_info(void *cgxd, int lmac_id, + struct cgx_link_user_info *linfo); +int cgx_lmac_linkup_start(void *cgxd); +int cgx_get_fwdata_base(u64 *base); +int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id, + u8 *tx_pause, u8 *rx_pause); +int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id, + u8 tx_pause, u8 rx_pause); +void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable); +u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id); + +#endif /* CGX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h new file mode 100644 index 000000000..c3702fa58 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h @@ -0,0 +1,202 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 CGX driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __CGX_FW_INTF_H__ +#define __CGX_FW_INTF_H__ + +#include <linux/bitops.h> +#include <linux/bitfield.h> + +#define CGX_FIRMWARE_MAJOR_VER 1 +#define CGX_FIRMWARE_MINOR_VER 0 + +#define CGX_EVENT_ACK 1UL + +/* CGX error types. set for cmd response status as CGX_STAT_FAIL */ +enum cgx_error_type { + CGX_ERR_NONE, + CGX_ERR_LMAC_NOT_ENABLED, + CGX_ERR_LMAC_MODE_INVALID, + CGX_ERR_REQUEST_ID_INVALID, + CGX_ERR_PREV_ACK_NOT_CLEAR, + CGX_ERR_PHY_LINK_DOWN, + CGX_ERR_PCS_RESET_FAIL, + CGX_ERR_AN_CPT_FAIL, + CGX_ERR_TX_NOT_IDLE, + CGX_ERR_RX_NOT_IDLE, + CGX_ERR_SPUX_BR_BLKLOCK_FAIL, + CGX_ERR_SPUX_RX_ALIGN_FAIL, + CGX_ERR_SPUX_TX_FAULT, + CGX_ERR_SPUX_RX_FAULT, + CGX_ERR_SPUX_RESET_FAIL, + CGX_ERR_SPUX_AN_RESET_FAIL, + CGX_ERR_SPUX_USX_AN_RESET_FAIL, + CGX_ERR_SMUX_RX_LINK_NOT_OK, + CGX_ERR_PCS_RECV_LINK_FAIL, + CGX_ERR_TRAINING_FAIL, + CGX_ERR_RX_EQU_FAIL, + CGX_ERR_SPUX_BER_FAIL, + CGX_ERR_SPUX_RSFEC_ALGN_FAIL, /* = 22 */ +}; + +/* LINK speed types */ +enum cgx_link_speed { + CGX_LINK_NONE, + CGX_LINK_10M, + CGX_LINK_100M, + CGX_LINK_1G, + CGX_LINK_2HG, + CGX_LINK_5G, + CGX_LINK_10G, + CGX_LINK_20G, + CGX_LINK_25G, + CGX_LINK_40G, + CGX_LINK_50G, + CGX_LINK_100G, + CGX_LINK_SPEED_MAX, +}; + +/* REQUEST ID types. Input to firmware */ +enum cgx_cmd_id { + CGX_CMD_NONE, + CGX_CMD_GET_FW_VER, + CGX_CMD_GET_MAC_ADDR, + CGX_CMD_SET_MTU, + CGX_CMD_GET_LINK_STS, /* optional to user */ + CGX_CMD_LINK_BRING_UP, + CGX_CMD_LINK_BRING_DOWN, + CGX_CMD_INTERNAL_LBK, + CGX_CMD_EXTERNAL_LBK, + CGX_CMD_HIGIG, + CGX_CMD_LINK_STATE_CHANGE, + CGX_CMD_MODE_CHANGE, /* hot plug support */ + CGX_CMD_INTF_SHUTDOWN, + CGX_CMD_GET_MKEX_PRFL_SIZE, + CGX_CMD_GET_MKEX_PRFL_ADDR, + CGX_CMD_GET_FWD_BASE, /* get base address of shared FW data */ +}; + +/* async event ids */ +enum cgx_evt_id { + CGX_EVT_NONE, + CGX_EVT_LINK_CHANGE, +}; + +/* event types - cause of interrupt */ +enum cgx_evt_type { + CGX_EVT_ASYNC, + CGX_EVT_CMD_RESP +}; + +enum cgx_stat { + CGX_STAT_SUCCESS, + CGX_STAT_FAIL +}; + +enum cgx_cmd_own { + CGX_CMD_OWN_NS, + CGX_CMD_OWN_FIRMWARE, +}; + +/* m - bit mask + * y - value to be written in the bitrange + * x - input value whose bitrange to be modified + */ +#define FIELD_SET(m, y, x) \ + (((x) & ~(m)) | \ + FIELD_PREP((m), (y))) + +/* scratchx(0) CSR used for ATF->non-secure SW communication. + * This acts as the status register + * Provides details on command ack/status, command response, error details + */ +#define EVTREG_ACK BIT_ULL(0) +#define EVTREG_EVT_TYPE BIT_ULL(1) +#define EVTREG_STAT BIT_ULL(2) +#define EVTREG_ID GENMASK_ULL(8, 3) + +/* Response to command IDs with command status as CGX_STAT_FAIL + * + * Not applicable for commands : + * CGX_CMD_LINK_BRING_UP/DOWN/CGX_EVT_LINK_CHANGE + */ +#define EVTREG_ERRTYPE GENMASK_ULL(18, 9) + +/* Response to cmd ID as CGX_CMD_GET_FW_VER with cmd status as + * CGX_STAT_SUCCESS + */ +#define RESP_MAJOR_VER GENMASK_ULL(12, 9) +#define RESP_MINOR_VER GENMASK_ULL(16, 13) + +/* Response to cmd ID as CGX_CMD_GET_MAC_ADDR with cmd status as + * CGX_STAT_SUCCESS + */ +#define RESP_MAC_ADDR GENMASK_ULL(56, 9) + +/* Response to cmd ID as CGX_CMD_GET_MKEX_PRFL_SIZE with cmd status as + * CGX_STAT_SUCCESS + */ +#define RESP_MKEX_PRFL_SIZE GENMASK_ULL(63, 9) + +/* Response to cmd ID as CGX_CMD_GET_MKEX_PRFL_ADDR with cmd status as + * CGX_STAT_SUCCESS + */ +#define RESP_MKEX_PRFL_ADDR GENMASK_ULL(63, 9) + +/* Response to cmd ID as CGX_CMD_GET_FWD_BASE with cmd status as + * CGX_STAT_SUCCESS + */ +#define RESP_FWD_BASE GENMASK_ULL(56, 9) + +/* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE + * status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS + * + * In case of CGX_STAT_FAIL, it indicates CGX configuration failed + * when processing link up/down/change command. + * Both err_type and current link status will be updated + * + * In case of CGX_STAT_SUCCESS, err_type will be CGX_ERR_NONE and current + * link status will be updated + */ +struct cgx_lnk_sts { + uint64_t reserved1:9; + uint64_t link_up:1; + uint64_t full_duplex:1; + uint64_t speed:4; /* cgx_link_speed */ + uint64_t err_type:10; + uint64_t reserved2:39; +}; + +#define RESP_LINKSTAT_UP GENMASK_ULL(9, 9) +#define RESP_LINKSTAT_FDUPLEX GENMASK_ULL(10, 10) +#define RESP_LINKSTAT_SPEED GENMASK_ULL(14, 11) +#define RESP_LINKSTAT_ERRTYPE GENMASK_ULL(24, 15) + +/* scratchx(1) CSR used for non-secure SW->ATF communication + * This CSR acts as a command register + */ +#define CMDREG_OWN BIT_ULL(0) +#define CMDREG_ID GENMASK_ULL(7, 2) + +/* Any command using enable/disable as an argument need + * to set this bitfield. + * Ex: Loopback, HiGig... + */ +#define CMDREG_ENABLE BIT_ULL(8) + +/* command argument to be passed for cmd ID - CGX_CMD_SET_MTU */ +#define CMDMTU_SIZE GENMASK_ULL(23, 8) + +/* command argument to be passed for cmd ID - CGX_CMD_LINK_CHANGE */ +#define CMDLINKCHANGE_LINKUP BIT_ULL(8) +#define CMDLINKCHANGE_FULLDPLX BIT_ULL(9) +#define CMDLINKCHANGE_SPEED GENMASK_ULL(13, 10) + +#endif /* __CGX_FW_INTF_H__ */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h new file mode 100644 index 000000000..f48eb66ed --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -0,0 +1,220 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef COMMON_H +#define COMMON_H + +#include "rvu_struct.h" + +#define OTX2_ALIGN 128 /* Align to cacheline */ + +#define Q_SIZE_16 0ULL /* 16 entries */ +#define Q_SIZE_64 1ULL /* 64 entries */ +#define Q_SIZE_256 2ULL +#define Q_SIZE_1K 3ULL +#define Q_SIZE_4K 4ULL +#define Q_SIZE_16K 5ULL +#define Q_SIZE_64K 6ULL +#define Q_SIZE_256K 7ULL +#define Q_SIZE_1M 8ULL /* Million entries */ +#define Q_SIZE_MIN Q_SIZE_16 +#define Q_SIZE_MAX Q_SIZE_1M + +#define Q_COUNT(x) (16ULL << (2 * x)) +#define Q_SIZE(x, n) ((ilog2(x) - (n)) / 2) + +/* Admin queue info */ + +/* Since we intend to add only one instruction at a time, + * keep queue size to it's minimum. + */ +#define AQ_SIZE Q_SIZE_16 +/* HW head & tail pointer mask */ +#define AQ_PTR_MASK 0xFFFFF + +struct qmem { + void *base; + dma_addr_t iova; + int alloc_sz; + u16 entry_sz; + u8 align; + u32 qsize; +}; + +static inline int qmem_alloc(struct device *dev, struct qmem **q, + int qsize, int entry_sz) +{ + struct qmem *qmem; + int aligned_addr; + + if (!qsize) + return -EINVAL; + + *q = devm_kzalloc(dev, sizeof(*qmem), GFP_KERNEL); + if (!*q) + return -ENOMEM; + qmem = *q; + + qmem->entry_sz = entry_sz; + qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN; + qmem->base = dma_alloc_coherent(dev, qmem->alloc_sz, + &qmem->iova, GFP_KERNEL); + if (!qmem->base) + return -ENOMEM; + + qmem->qsize = qsize; + + aligned_addr = ALIGN((u64)qmem->iova, OTX2_ALIGN); + qmem->align = (aligned_addr - qmem->iova); + qmem->base += qmem->align; + qmem->iova += qmem->align; + return 0; +} + +static inline void qmem_free(struct device *dev, struct qmem *qmem) +{ + if (!qmem) + return; + + if (qmem->base) + dma_free_coherent(dev, qmem->alloc_sz, + qmem->base - qmem->align, + qmem->iova - qmem->align); + devm_kfree(dev, qmem); +} + +struct admin_queue { + struct qmem *inst; + struct qmem *res; + spinlock_t lock; /* Serialize inst enqueue from PFs */ +}; + +/* NPA aura count */ +enum npa_aura_sz { + NPA_AURA_SZ_0, + NPA_AURA_SZ_128, + NPA_AURA_SZ_256, + NPA_AURA_SZ_512, + NPA_AURA_SZ_1K, + NPA_AURA_SZ_2K, + NPA_AURA_SZ_4K, + NPA_AURA_SZ_8K, + NPA_AURA_SZ_16K, + NPA_AURA_SZ_32K, + NPA_AURA_SZ_64K, + NPA_AURA_SZ_128K, + NPA_AURA_SZ_256K, + NPA_AURA_SZ_512K, + NPA_AURA_SZ_1M, + NPA_AURA_SZ_MAX, +}; + +#define NPA_AURA_COUNT(x) (1ULL << ((x) + 6)) + +/* NPA AQ result structure for init/read/write of aura HW contexts */ +struct npa_aq_aura_res { + struct npa_aq_res_s res; + struct npa_aura_s aura_ctx; + struct npa_aura_s ctx_mask; +}; + +/* NPA AQ result structure for init/read/write of pool HW contexts */ +struct npa_aq_pool_res { + struct npa_aq_res_s res; + struct npa_pool_s pool_ctx; + struct npa_pool_s ctx_mask; +}; + +/* NIX Transmit schedulers */ +enum nix_scheduler { + NIX_TXSCH_LVL_SMQ = 0x0, + NIX_TXSCH_LVL_MDQ = 0x0, + NIX_TXSCH_LVL_TL4 = 0x1, + NIX_TXSCH_LVL_TL3 = 0x2, + NIX_TXSCH_LVL_TL2 = 0x3, + NIX_TXSCH_LVL_TL1 = 0x4, + NIX_TXSCH_LVL_CNT = 0x5, +}; + +#define TXSCH_RR_QTM_MAX ((1 << 24) - 1) +#define TXSCH_TL1_DFLT_RR_QTM TXSCH_RR_QTM_MAX +#define TXSCH_TL1_DFLT_RR_PRIO (0x1ull) +#define MAX_SCHED_WEIGHT 0xFF +#define DFLT_RR_WEIGHT 71 +#define DFLT_RR_QTM ((DFLT_RR_WEIGHT * TXSCH_RR_QTM_MAX) \ + / MAX_SCHED_WEIGHT) + +/* Min/Max packet sizes, excluding FCS */ +#define NIC_HW_MIN_FRS 40 +#define NIC_HW_MAX_FRS 9212 +#define SDP_HW_MAX_FRS 65535 + +/* NIX RX action operation*/ +#define NIX_RX_ACTIONOP_DROP (0x0ull) +#define NIX_RX_ACTIONOP_UCAST (0x1ull) +#define NIX_RX_ACTIONOP_UCAST_IPSEC (0x2ull) +#define NIX_RX_ACTIONOP_MCAST (0x3ull) +#define NIX_RX_ACTIONOP_RSS (0x4ull) + +/* NIX TX action operation*/ +#define NIX_TX_ACTIONOP_DROP (0x0ull) +#define NIX_TX_ACTIONOP_UCAST_DEFAULT (0x1ull) +#define NIX_TX_ACTIONOP_UCAST_CHAN (0x2ull) +#define NIX_TX_ACTIONOP_MCAST (0x3ull) +#define NIX_TX_ACTIONOP_DROP_VIOL (0x5ull) + +#define NPC_MCAM_KEY_X1 0 +#define NPC_MCAM_KEY_X2 1 +#define NPC_MCAM_KEY_X4 2 + +#define NIX_INTF_RX 0 +#define NIX_INTF_TX 1 + +#define NIX_INTF_TYPE_CGX 0 +#define NIX_INTF_TYPE_LBK 1 + +#define MAX_LMAC_PKIND 12 +#define NIX_LINK_CGX_LMAC(a, b) (0 + 4 * (a) + (b)) +#define NIX_LINK_LBK(a) (12 + (a)) +#define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c)) +#define NIX_CHAN_LBK_CHX(a, b) (0 + 0x100 * (a) + (b)) + +/* NIX LSO format indices. + * As of now TSO is the only one using, so statically assigning indices. + */ +#define NIX_LSO_FORMAT_IDX_TSOV4 0 +#define NIX_LSO_FORMAT_IDX_TSOV6 1 + +/* RSS info */ +#define MAX_RSS_GROUPS 8 +/* Group 0 has to be used in default pkt forwarding MCAM entries + * reserved for NIXLFs. Groups 1-7 can be used for RSS for ntuple + * filters. + */ +#define DEFAULT_RSS_CONTEXT_GROUP 0 +#define MAX_RSS_INDIR_TBL_SIZE 256 /* 1 << Max adder bits */ + +/* NDC info */ +enum ndc_idx_e { + NIX0_RX = 0x0, + NIX0_TX = 0x1, + NPA0_U = 0x2, +}; + +enum ndc_ctype_e { + CACHING = 0x0, + BYPASS = 0x1, +}; + +#define NDC_MAX_PORT 6 +#define NDC_READ_TRANS 0 +#define NDC_WRITE_TRANS 1 + +#endif /* COMMON_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c new file mode 100644 index 000000000..bbabb8e64 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c @@ -0,0 +1,375 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/pci.h> + +#include "rvu_reg.h" +#include "mbox.h" +#include "rvu_trace.h" + +static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); + +void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid) +{ + void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE); + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + struct mbox_hdr *tx_hdr, *rx_hdr; + + tx_hdr = hw_mbase + mbox->tx_start; + rx_hdr = hw_mbase + mbox->rx_start; + + mdev->msg_size = 0; + mdev->rsp_size = 0; + tx_hdr->num_msgs = 0; + tx_hdr->msg_size = 0; + rx_hdr->num_msgs = 0; + rx_hdr->msg_size = 0; +} +EXPORT_SYMBOL(__otx2_mbox_reset); + +void otx2_mbox_reset(struct otx2_mbox *mbox, int devid) +{ + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + + spin_lock(&mdev->mbox_lock); + __otx2_mbox_reset(mbox, devid); + spin_unlock(&mdev->mbox_lock); +} +EXPORT_SYMBOL(otx2_mbox_reset); + +void otx2_mbox_destroy(struct otx2_mbox *mbox) +{ + mbox->reg_base = NULL; + mbox->hwbase = NULL; + + kfree(mbox->dev); + mbox->dev = NULL; +} +EXPORT_SYMBOL(otx2_mbox_destroy); + +int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev, + void *reg_base, int direction, int ndevs) +{ + struct otx2_mbox_dev *mdev; + int devid; + + switch (direction) { + case MBOX_DIR_AFPF: + case MBOX_DIR_PFVF: + mbox->tx_start = MBOX_DOWN_TX_START; + mbox->rx_start = MBOX_DOWN_RX_START; + mbox->tx_size = MBOX_DOWN_TX_SIZE; + mbox->rx_size = MBOX_DOWN_RX_SIZE; + break; + case MBOX_DIR_PFAF: + case MBOX_DIR_VFPF: + mbox->tx_start = MBOX_DOWN_RX_START; + mbox->rx_start = MBOX_DOWN_TX_START; + mbox->tx_size = MBOX_DOWN_RX_SIZE; + mbox->rx_size = MBOX_DOWN_TX_SIZE; + break; + case MBOX_DIR_AFPF_UP: + case MBOX_DIR_PFVF_UP: + mbox->tx_start = MBOX_UP_TX_START; + mbox->rx_start = MBOX_UP_RX_START; + mbox->tx_size = MBOX_UP_TX_SIZE; + mbox->rx_size = MBOX_UP_RX_SIZE; + break; + case MBOX_DIR_PFAF_UP: + case MBOX_DIR_VFPF_UP: + mbox->tx_start = MBOX_UP_RX_START; + mbox->rx_start = MBOX_UP_TX_START; + mbox->tx_size = MBOX_UP_RX_SIZE; + mbox->rx_size = MBOX_UP_TX_SIZE; + break; + default: + return -ENODEV; + } + + switch (direction) { + case MBOX_DIR_AFPF: + case MBOX_DIR_AFPF_UP: + mbox->trigger = RVU_AF_AFPF_MBOX0; + mbox->tr_shift = 4; + break; + case MBOX_DIR_PFAF: + case MBOX_DIR_PFAF_UP: + mbox->trigger = RVU_PF_PFAF_MBOX1; + mbox->tr_shift = 0; + break; + case MBOX_DIR_PFVF: + case MBOX_DIR_PFVF_UP: + mbox->trigger = RVU_PF_VFX_PFVF_MBOX0; + mbox->tr_shift = 12; + break; + case MBOX_DIR_VFPF: + case MBOX_DIR_VFPF_UP: + mbox->trigger = RVU_VF_VFPF_MBOX1; + mbox->tr_shift = 0; + break; + default: + return -ENODEV; + } + + mbox->reg_base = reg_base; + mbox->hwbase = hwbase; + mbox->pdev = pdev; + + mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL); + if (!mbox->dev) { + otx2_mbox_destroy(mbox); + return -ENOMEM; + } + + mbox->ndevs = ndevs; + for (devid = 0; devid < ndevs; devid++) { + mdev = &mbox->dev[devid]; + mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE); + spin_lock_init(&mdev->mbox_lock); + /* Init header to reset value */ + otx2_mbox_reset(mbox, devid); + } + + return 0; +} +EXPORT_SYMBOL(otx2_mbox_init); + +int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT); + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + struct device *sender = &mbox->pdev->dev; + + while (!time_after(jiffies, timeout)) { + if (mdev->num_msgs == mdev->msgs_acked) + return 0; + usleep_range(800, 1000); + } + dev_dbg(sender, "timed out while waiting for rsp\n"); + return -EIO; +} +EXPORT_SYMBOL(otx2_mbox_wait_for_rsp); + +int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid) +{ + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + unsigned long timeout = jiffies + 1 * HZ; + + while (!time_after(jiffies, timeout)) { + if (mdev->num_msgs == mdev->msgs_acked) + return 0; + cpu_relax(); + } + return -EIO; +} +EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp); + +void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid) +{ + void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE); + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + struct mbox_hdr *tx_hdr, *rx_hdr; + + tx_hdr = hw_mbase + mbox->tx_start; + rx_hdr = hw_mbase + mbox->rx_start; + + /* If bounce buffer is implemented copy mbox messages from + * bounce buffer to hw mbox memory. + */ + if (mdev->mbase != hw_mbase) + memcpy(hw_mbase + mbox->tx_start + msgs_offset, + mdev->mbase + mbox->tx_start + msgs_offset, + mdev->msg_size); + + spin_lock(&mdev->mbox_lock); + + tx_hdr->msg_size = mdev->msg_size; + + /* Reset header for next messages */ + mdev->msg_size = 0; + mdev->rsp_size = 0; + mdev->msgs_acked = 0; + + /* Sync mbox data into memory */ + smp_wmb(); + + /* num_msgs != 0 signals to the peer that the buffer has a number of + * messages. So this should be written after writing all the messages + * to the shared memory. + */ + tx_hdr->num_msgs = mdev->num_msgs; + rx_hdr->num_msgs = 0; + + trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size); + + spin_unlock(&mdev->mbox_lock); + + /* The interrupt should be fired after num_msgs is written + * to the shared memory + */ + writeq(1, (void __iomem *)mbox->reg_base + + (mbox->trigger | (devid << mbox->tr_shift))); +} +EXPORT_SYMBOL(otx2_mbox_msg_send); + +struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, + int size, int size_rsp) +{ + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + struct mbox_msghdr *msghdr = NULL; + + spin_lock(&mdev->mbox_lock); + size = ALIGN(size, MBOX_MSG_ALIGN); + size_rsp = ALIGN(size_rsp, MBOX_MSG_ALIGN); + /* Check if there is space in mailbox */ + if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset) + goto exit; + if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset) + goto exit; + + if (mdev->msg_size == 0) + mdev->num_msgs = 0; + mdev->num_msgs++; + + msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size; + + /* Clear the whole msg region */ + memset(msghdr, 0, size); + /* Init message header with reset values */ + msghdr->ver = OTX2_MBOX_VERSION; + mdev->msg_size += size; + mdev->rsp_size += size_rsp; + msghdr->next_msgoff = mdev->msg_size + msgs_offset; +exit: + spin_unlock(&mdev->mbox_lock); + + return msghdr; +} +EXPORT_SYMBOL(otx2_mbox_alloc_msg_rsp); + +struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid, + struct mbox_msghdr *msg) +{ + unsigned long imsg = mbox->tx_start + msgs_offset; + unsigned long irsp = mbox->rx_start + msgs_offset; + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + u16 msgs; + + spin_lock(&mdev->mbox_lock); + + if (mdev->num_msgs != mdev->msgs_acked) + goto error; + + for (msgs = 0; msgs < mdev->msgs_acked; msgs++) { + struct mbox_msghdr *pmsg = mdev->mbase + imsg; + struct mbox_msghdr *prsp = mdev->mbase + irsp; + + if (msg == pmsg) { + if (pmsg->id != prsp->id) + goto error; + spin_unlock(&mdev->mbox_lock); + return prsp; + } + + imsg = mbox->tx_start + pmsg->next_msgoff; + irsp = mbox->rx_start + prsp->next_msgoff; + } + +error: + spin_unlock(&mdev->mbox_lock); + return ERR_PTR(-ENODEV); +} +EXPORT_SYMBOL(otx2_mbox_get_rsp); + +int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid) +{ + unsigned long ireq = mbox->tx_start + msgs_offset; + unsigned long irsp = mbox->rx_start + msgs_offset; + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + int rc = -ENODEV; + u16 msgs; + + spin_lock(&mdev->mbox_lock); + + if (mdev->num_msgs != mdev->msgs_acked) + goto exit; + + for (msgs = 0; msgs < mdev->msgs_acked; msgs++) { + struct mbox_msghdr *preq = mdev->mbase + ireq; + struct mbox_msghdr *prsp = mdev->mbase + irsp; + + if (preq->id != prsp->id) { + trace_otx2_msg_check(mbox->pdev, preq->id, + prsp->id, prsp->rc); + goto exit; + } + if (prsp->rc) { + rc = prsp->rc; + trace_otx2_msg_check(mbox->pdev, preq->id, + prsp->id, prsp->rc); + goto exit; + } + + ireq = mbox->tx_start + preq->next_msgoff; + irsp = mbox->rx_start + prsp->next_msgoff; + } + rc = 0; +exit: + spin_unlock(&mdev->mbox_lock); + return rc; +} +EXPORT_SYMBOL(otx2_mbox_check_rsp_msgs); + +int +otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id) +{ + struct msg_rsp *rsp; + + rsp = (struct msg_rsp *) + otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp)); + if (!rsp) + return -ENOMEM; + rsp->hdr.id = id; + rsp->hdr.sig = OTX2_MBOX_RSP_SIG; + rsp->hdr.rc = MBOX_MSG_INVALID; + rsp->hdr.pcifunc = pcifunc; + return 0; +} +EXPORT_SYMBOL(otx2_reply_invalid_msg); + +bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid) +{ + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + bool ret; + + spin_lock(&mdev->mbox_lock); + ret = mdev->num_msgs != 0; + spin_unlock(&mdev->mbox_lock); + + return ret; +} +EXPORT_SYMBOL(otx2_mbox_nonempty); + +const char *otx2_mbox_id2name(u16 id) +{ + switch (id) { +#define M(_name, _id, _1, _2, _3) case _id: return # _name; + MBOX_MESSAGES +#undef M + default: + return "INVALID ID"; + } +} +EXPORT_SYMBOL(otx2_mbox_id2name); + +MODULE_AUTHOR("Marvell International Ltd."); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h new file mode 100644 index 000000000..263a21129 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -0,0 +1,884 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef MBOX_H +#define MBOX_H + +#include <linux/etherdevice.h> +#include <linux/sizes.h> + +#include "rvu_struct.h" +#include "common.h" + +#define MBOX_SIZE SZ_64K + +/* AF/PF: PF initiated, PF/VF VF initiated */ +#define MBOX_DOWN_RX_START 0 +#define MBOX_DOWN_RX_SIZE (46 * SZ_1K) +#define MBOX_DOWN_TX_START (MBOX_DOWN_RX_START + MBOX_DOWN_RX_SIZE) +#define MBOX_DOWN_TX_SIZE (16 * SZ_1K) +/* AF/PF: AF initiated, PF/VF PF initiated */ +#define MBOX_UP_RX_START (MBOX_DOWN_TX_START + MBOX_DOWN_TX_SIZE) +#define MBOX_UP_RX_SIZE SZ_1K +#define MBOX_UP_TX_START (MBOX_UP_RX_START + MBOX_UP_RX_SIZE) +#define MBOX_UP_TX_SIZE SZ_1K + +#if MBOX_UP_TX_SIZE + MBOX_UP_TX_START != MBOX_SIZE +# error "incorrect mailbox area sizes" +#endif + +#define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull)) + +#define MBOX_RSP_TIMEOUT 2000 /* Time(ms) to wait for mbox response */ + +#define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */ + +/* Mailbox directions */ +#define MBOX_DIR_AFPF 0 /* AF replies to PF */ +#define MBOX_DIR_PFAF 1 /* PF sends messages to AF */ +#define MBOX_DIR_PFVF 2 /* PF replies to VF */ +#define MBOX_DIR_VFPF 3 /* VF sends messages to PF */ +#define MBOX_DIR_AFPF_UP 4 /* AF sends messages to PF */ +#define MBOX_DIR_PFAF_UP 5 /* PF replies to AF */ +#define MBOX_DIR_PFVF_UP 6 /* PF sends messages to VF */ +#define MBOX_DIR_VFPF_UP 7 /* VF replies to PF */ + +struct otx2_mbox_dev { + void *mbase; /* This dev's mbox region */ + spinlock_t mbox_lock; + u16 msg_size; /* Total msg size to be sent */ + u16 rsp_size; /* Total rsp size to be sure the reply is ok */ + u16 num_msgs; /* No of msgs sent or waiting for response */ + u16 msgs_acked; /* No of msgs for which response is received */ +}; + +struct otx2_mbox { + struct pci_dev *pdev; + void *hwbase; /* Mbox region advertised by HW */ + void *reg_base;/* CSR base for this dev */ + u64 trigger; /* Trigger mbox notification */ + u16 tr_shift; /* Mbox trigger shift */ + u64 rx_start; /* Offset of Rx region in mbox memory */ + u64 tx_start; /* Offset of Tx region in mbox memory */ + u16 rx_size; /* Size of Rx region */ + u16 tx_size; /* Size of Tx region */ + u16 ndevs; /* The number of peers */ + struct otx2_mbox_dev *dev; +}; + +/* Header which preceeds all mbox messages */ +struct mbox_hdr { + u64 msg_size; /* Total msgs size embedded */ + u16 num_msgs; /* No of msgs embedded */ +}; + +/* Header which preceeds every msg and is also part of it */ +struct mbox_msghdr { + u16 pcifunc; /* Who's sending this msg */ + u16 id; /* Mbox message ID */ +#define OTX2_MBOX_REQ_SIG (0xdead) +#define OTX2_MBOX_RSP_SIG (0xbeef) + u16 sig; /* Signature, for validating corrupted msgs */ +#define OTX2_MBOX_VERSION (0x0001) + u16 ver; /* Version of msg's structure for this ID */ + u16 next_msgoff; /* Offset of next msg within mailbox region */ + int rc; /* Msg process'ed response code */ +}; + +void otx2_mbox_reset(struct otx2_mbox *mbox, int devid); +void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid); +void otx2_mbox_destroy(struct otx2_mbox *mbox); +int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase, + struct pci_dev *pdev, void __force *reg_base, + int direction, int ndevs); +void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid); +int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid); +int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid); +struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, + int size, int size_rsp); +struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid, + struct mbox_msghdr *msg); +int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid); +int otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, + u16 pcifunc, u16 id); +bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid); +const char *otx2_mbox_id2name(u16 id); +static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox, + int devid, int size) +{ + return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0); +} + +/* Mailbox message types */ +#define MBOX_MSG_MASK 0xFFFF +#define MBOX_MSG_INVALID 0xFFFE +#define MBOX_MSG_MAX 0xFFFF + +#define MBOX_MESSAGES \ +/* Generic mbox IDs (range 0x000 - 0x1FF) */ \ +M(READY, 0x001, ready, msg_req, ready_msg_rsp) \ +M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \ +M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \ +M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \ +M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \ +M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \ +M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \ +/* CGX mbox IDs (range 0x200 - 0x3FF) */ \ +M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \ +M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \ +M(CGX_STATS, 0x202, cgx_stats, msg_req, cgx_stats_rsp) \ +M(CGX_MAC_ADDR_SET, 0x203, cgx_mac_addr_set, cgx_mac_addr_set_or_get, \ + cgx_mac_addr_set_or_get) \ +M(CGX_MAC_ADDR_GET, 0x204, cgx_mac_addr_get, cgx_mac_addr_set_or_get, \ + cgx_mac_addr_set_or_get) \ +M(CGX_PROMISC_ENABLE, 0x205, cgx_promisc_enable, msg_req, msg_rsp) \ +M(CGX_PROMISC_DISABLE, 0x206, cgx_promisc_disable, msg_req, msg_rsp) \ +M(CGX_START_LINKEVENTS, 0x207, cgx_start_linkevents, msg_req, msg_rsp) \ +M(CGX_STOP_LINKEVENTS, 0x208, cgx_stop_linkevents, msg_req, msg_rsp) \ +M(CGX_GET_LINKINFO, 0x209, cgx_get_linkinfo, msg_req, cgx_link_info_msg) \ +M(CGX_INTLBK_ENABLE, 0x20A, cgx_intlbk_enable, msg_req, msg_rsp) \ +M(CGX_INTLBK_DISABLE, 0x20B, cgx_intlbk_disable, msg_req, msg_rsp) \ +M(CGX_PTP_RX_ENABLE, 0x20C, cgx_ptp_rx_enable, msg_req, msg_rsp) \ +M(CGX_PTP_RX_DISABLE, 0x20D, cgx_ptp_rx_disable, msg_req, msg_rsp) \ +M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \ + cgx_pause_frm_cfg) \ +/* NPA mbox IDs (range 0x400 - 0x5FF) */ \ +M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \ + npa_lf_alloc_req, npa_lf_alloc_rsp) \ +M(NPA_LF_FREE, 0x401, npa_lf_free, msg_req, msg_rsp) \ +M(NPA_AQ_ENQ, 0x402, npa_aq_enq, npa_aq_enq_req, npa_aq_enq_rsp) \ +M(NPA_HWCTX_DISABLE, 0x403, npa_hwctx_disable, hwctx_disable_req, msg_rsp)\ +/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \ +/* TIM mbox IDs (range 0x800 - 0x9FF) */ \ +/* CPT mbox IDs (range 0xA00 - 0xBFF) */ \ +/* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \ +M(NPC_MCAM_ALLOC_ENTRY, 0x6000, npc_mcam_alloc_entry, npc_mcam_alloc_entry_req,\ + npc_mcam_alloc_entry_rsp) \ +M(NPC_MCAM_FREE_ENTRY, 0x6001, npc_mcam_free_entry, \ + npc_mcam_free_entry_req, msg_rsp) \ +M(NPC_MCAM_WRITE_ENTRY, 0x6002, npc_mcam_write_entry, \ + npc_mcam_write_entry_req, msg_rsp) \ +M(NPC_MCAM_ENA_ENTRY, 0x6003, npc_mcam_ena_entry, \ + npc_mcam_ena_dis_entry_req, msg_rsp) \ +M(NPC_MCAM_DIS_ENTRY, 0x6004, npc_mcam_dis_entry, \ + npc_mcam_ena_dis_entry_req, msg_rsp) \ +M(NPC_MCAM_SHIFT_ENTRY, 0x6005, npc_mcam_shift_entry, npc_mcam_shift_entry_req,\ + npc_mcam_shift_entry_rsp) \ +M(NPC_MCAM_ALLOC_COUNTER, 0x6006, npc_mcam_alloc_counter, \ + npc_mcam_alloc_counter_req, \ + npc_mcam_alloc_counter_rsp) \ +M(NPC_MCAM_FREE_COUNTER, 0x6007, npc_mcam_free_counter, \ + npc_mcam_oper_counter_req, msg_rsp) \ +M(NPC_MCAM_UNMAP_COUNTER, 0x6008, npc_mcam_unmap_counter, \ + npc_mcam_unmap_counter_req, msg_rsp) \ +M(NPC_MCAM_CLEAR_COUNTER, 0x6009, npc_mcam_clear_counter, \ + npc_mcam_oper_counter_req, msg_rsp) \ +M(NPC_MCAM_COUNTER_STATS, 0x600a, npc_mcam_counter_stats, \ + npc_mcam_oper_counter_req, \ + npc_mcam_oper_counter_rsp) \ +M(NPC_MCAM_ALLOC_AND_WRITE_ENTRY, 0x600b, npc_mcam_alloc_and_write_entry, \ + npc_mcam_alloc_and_write_entry_req, \ + npc_mcam_alloc_and_write_entry_rsp) \ +M(NPC_GET_KEX_CFG, 0x600c, npc_get_kex_cfg, \ + msg_req, npc_get_kex_cfg_rsp) \ +/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \ +M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \ + nix_lf_alloc_req, nix_lf_alloc_rsp) \ +M(NIX_LF_FREE, 0x8001, nix_lf_free, msg_req, msg_rsp) \ +M(NIX_AQ_ENQ, 0x8002, nix_aq_enq, nix_aq_enq_req, nix_aq_enq_rsp) \ +M(NIX_HWCTX_DISABLE, 0x8003, nix_hwctx_disable, \ + hwctx_disable_req, msg_rsp) \ +M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc, \ + nix_txsch_alloc_req, nix_txsch_alloc_rsp) \ +M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free, nix_txsch_free_req, msg_rsp) \ +M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, msg_rsp) \ +M(NIX_STATS_RST, 0x8007, nix_stats_rst, msg_req, msg_rsp) \ +M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, msg_rsp) \ +M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, \ + nix_rss_flowkey_cfg, \ + nix_rss_flowkey_cfg_rsp) \ +M(NIX_SET_MAC_ADDR, 0x800a, nix_set_mac_addr, nix_set_mac_addr, msg_rsp) \ +M(NIX_SET_RX_MODE, 0x800b, nix_set_rx_mode, nix_rx_mode, msg_rsp) \ +M(NIX_SET_HW_FRS, 0x800c, nix_set_hw_frs, nix_frs_cfg, msg_rsp) \ +M(NIX_LF_START_RX, 0x800d, nix_lf_start_rx, msg_req, msg_rsp) \ +M(NIX_LF_STOP_RX, 0x800e, nix_lf_stop_rx, msg_req, msg_rsp) \ +M(NIX_MARK_FORMAT_CFG, 0x800f, nix_mark_format_cfg, \ + nix_mark_format_cfg, \ + nix_mark_format_cfg_rsp) \ +M(NIX_SET_RX_CFG, 0x8010, nix_set_rx_cfg, nix_rx_cfg, msg_rsp) \ +M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \ + nix_lso_format_cfg, \ + nix_lso_format_cfg_rsp) \ +M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp) \ +M(NIX_LF_PTP_TX_ENABLE, 0x8013, nix_lf_ptp_tx_enable, msg_req, msg_rsp) \ +M(NIX_LF_PTP_TX_DISABLE, 0x8014, nix_lf_ptp_tx_disable, msg_req, msg_rsp) \ +M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \ + nix_bp_cfg_rsp) \ +M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \ +M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \ + +/* Messages initiated by AF (range 0xC00 - 0xDFF) */ +#define MBOX_UP_CGX_MESSAGES \ +M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp) + +enum { +#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id, +MBOX_MESSAGES +MBOX_UP_CGX_MESSAGES +#undef M +}; + +/* Mailbox message formats */ + +#define RVU_DEFAULT_PF_FUNC 0xFFFF + +/* Generic request msg used for those mbox messages which + * don't send any data in the request. + */ +struct msg_req { + struct mbox_msghdr hdr; +}; + +/* Generic rsponse msg used a ack or response for those mbox + * messages which doesn't have a specific rsp msg format. + */ +struct msg_rsp { + struct mbox_msghdr hdr; +}; + +/* RVU mailbox error codes + * Range 256 - 300. + */ +enum rvu_af_status { + RVU_INVALID_VF_ID = -256, +}; + +struct ready_msg_rsp { + struct mbox_msghdr hdr; + u16 sclk_freq; /* SCLK frequency (in MHz) */ + u16 rclk_freq; /* RCLK frequency (in MHz) */ +}; + +/* Structure for requesting resource provisioning. + * 'modify' flag to be used when either requesting more + * or to detach partial of a cetain resource type. + * Rest of the fields specify how many of what type to + * be attached. + */ +struct rsrc_attach { + struct mbox_msghdr hdr; + u8 modify:1; + u8 npalf:1; + u8 nixlf:1; + u16 sso; + u16 ssow; + u16 timlfs; + u16 cptlfs; +}; + +/* Structure for relinquishing resources. + * 'partial' flag to be used when relinquishing all resources + * but only of a certain type. If not set, all resources of all + * types provisioned to the RVU function will be detached. + */ +struct rsrc_detach { + struct mbox_msghdr hdr; + u8 partial:1; + u8 npalf:1; + u8 nixlf:1; + u8 sso:1; + u8 ssow:1; + u8 timlfs:1; + u8 cptlfs:1; +}; + +#define MSIX_VECTOR_INVALID 0xFFFF +#define MAX_RVU_BLKLF_CNT 256 + +struct msix_offset_rsp { + struct mbox_msghdr hdr; + u16 npa_msixoff; + u16 nix_msixoff; + u8 sso; + u8 ssow; + u8 timlfs; + u8 cptlfs; + u16 sso_msixoff[MAX_RVU_BLKLF_CNT]; + u16 ssow_msixoff[MAX_RVU_BLKLF_CNT]; + u16 timlf_msixoff[MAX_RVU_BLKLF_CNT]; + u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT]; +}; + +struct get_hw_cap_rsp { + struct mbox_msghdr hdr; + u8 nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */ + u8 nix_shaping; /* Is shaping and coloring supported */ +}; + +/* CGX mbox message formats */ + +struct cgx_stats_rsp { + struct mbox_msghdr hdr; +#define CGX_RX_STATS_COUNT 13 +#define CGX_TX_STATS_COUNT 18 + u64 rx_stats[CGX_RX_STATS_COUNT]; + u64 tx_stats[CGX_TX_STATS_COUNT]; +}; + +/* Structure for requesting the operation for + * setting/getting mac address in the CGX interface + */ +struct cgx_mac_addr_set_or_get { + struct mbox_msghdr hdr; + u8 mac_addr[ETH_ALEN]; +}; + +struct cgx_link_user_info { + uint64_t link_up:1; + uint64_t full_duplex:1; + uint64_t lmac_type_id:4; + uint64_t speed:20; /* speed in Mbps */ +#define LMACTYPE_STR_LEN 16 + char lmac_type[LMACTYPE_STR_LEN]; +}; + +struct cgx_link_info_msg { + struct mbox_msghdr hdr; + struct cgx_link_user_info link_info; +}; + +struct cgx_pause_frm_cfg { + struct mbox_msghdr hdr; + u8 set; + /* set = 1 if the request is to config pause frames */ + /* set = 0 if the request is to fetch pause frames config */ + u8 rx_pause; + u8 tx_pause; +}; + +/* NPA mbox message formats */ + +/* NPA mailbox error codes + * Range 301 - 400. + */ +enum npa_af_status { + NPA_AF_ERR_PARAM = -301, + NPA_AF_ERR_AQ_FULL = -302, + NPA_AF_ERR_AQ_ENQUEUE = -303, + NPA_AF_ERR_AF_LF_INVALID = -304, + NPA_AF_ERR_AF_LF_ALLOC = -305, + NPA_AF_ERR_LF_RESET = -306, +}; + +/* For NPA LF context alloc and init */ +struct npa_lf_alloc_req { + struct mbox_msghdr hdr; + int node; + int aura_sz; /* No of auras */ + u32 nr_pools; /* No of pools */ + u64 way_mask; +}; + +struct npa_lf_alloc_rsp { + struct mbox_msghdr hdr; + u32 stack_pg_ptrs; /* No of ptrs per stack page */ + u32 stack_pg_bytes; /* Size of stack page */ + u16 qints; /* NPA_AF_CONST::QINTS */ +}; + +/* NPA AQ enqueue msg */ +struct npa_aq_enq_req { + struct mbox_msghdr hdr; + u32 aura_id; + u8 ctype; + u8 op; + union { + /* Valid when op == WRITE/INIT and ctype == AURA. + * LF fills the pool_id in aura.pool_addr. AF will translate + * the pool_id to pool context pointer. + */ + struct npa_aura_s aura; + /* Valid when op == WRITE/INIT and ctype == POOL */ + struct npa_pool_s pool; + }; + /* Mask data when op == WRITE (1=write, 0=don't write) */ + union { + /* Valid when op == WRITE and ctype == AURA */ + struct npa_aura_s aura_mask; + /* Valid when op == WRITE and ctype == POOL */ + struct npa_pool_s pool_mask; + }; +}; + +struct npa_aq_enq_rsp { + struct mbox_msghdr hdr; + union { + /* Valid when op == READ and ctype == AURA */ + struct npa_aura_s aura; + /* Valid when op == READ and ctype == POOL */ + struct npa_pool_s pool; + }; +}; + +/* Disable all contexts of type 'ctype' */ +struct hwctx_disable_req { + struct mbox_msghdr hdr; + u8 ctype; +}; + +/* NIX mbox message formats */ + +/* NIX mailbox error codes + * Range 401 - 500. + */ +enum nix_af_status { + NIX_AF_ERR_PARAM = -401, + NIX_AF_ERR_AQ_FULL = -402, + NIX_AF_ERR_AQ_ENQUEUE = -403, + NIX_AF_ERR_AF_LF_INVALID = -404, + NIX_AF_ERR_AF_LF_ALLOC = -405, + NIX_AF_ERR_TLX_ALLOC_FAIL = -406, + NIX_AF_ERR_TLX_INVALID = -407, + NIX_AF_ERR_RSS_SIZE_INVALID = -408, + NIX_AF_ERR_RSS_GRPS_INVALID = -409, + NIX_AF_ERR_FRS_INVALID = -410, + NIX_AF_ERR_RX_LINK_INVALID = -411, + NIX_AF_INVAL_TXSCHQ_CFG = -412, + NIX_AF_SMQ_FLUSH_FAILED = -413, + NIX_AF_ERR_LF_RESET = -414, + NIX_AF_ERR_RSS_NOSPC_FIELD = -415, + NIX_AF_ERR_RSS_NOSPC_ALGO = -416, + NIX_AF_ERR_MARK_CFG_FAIL = -417, + NIX_AF_ERR_LSO_CFG_FAIL = -418, + NIX_AF_INVAL_NPA_PF_FUNC = -419, + NIX_AF_INVAL_SSO_PF_FUNC = -420, +}; + +/* For NIX LF context alloc and init */ +struct nix_lf_alloc_req { + struct mbox_msghdr hdr; + int node; + u32 rq_cnt; /* No of receive queues */ + u32 sq_cnt; /* No of send queues */ + u32 cq_cnt; /* No of completion queues */ + u8 xqe_sz; + u16 rss_sz; + u8 rss_grps; + u16 npa_func; + u16 sso_func; + u64 rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */ + u64 way_mask; +}; + +struct nix_lf_alloc_rsp { + struct mbox_msghdr hdr; + u16 sqb_size; + u16 rx_chan_base; + u16 tx_chan_base; + u8 rx_chan_cnt; /* total number of RX channels */ + u8 tx_chan_cnt; /* total number of TX channels */ + u8 lso_tsov4_idx; + u8 lso_tsov6_idx; + u8 mac_addr[ETH_ALEN]; + u8 lf_rx_stats; /* NIX_AF_CONST1::LF_RX_STATS */ + u8 lf_tx_stats; /* NIX_AF_CONST1::LF_TX_STATS */ + u16 cints; /* NIX_AF_CONST2::CINTS */ + u16 qints; /* NIX_AF_CONST2::QINTS */ +}; + +/* NIX AQ enqueue msg */ +struct nix_aq_enq_req { + struct mbox_msghdr hdr; + u32 qidx; + u8 ctype; + u8 op; + union { + struct nix_rq_ctx_s rq; + struct nix_sq_ctx_s sq; + struct nix_cq_ctx_s cq; + struct nix_rsse_s rss; + struct nix_rx_mce_s mce; + }; + union { + struct nix_rq_ctx_s rq_mask; + struct nix_sq_ctx_s sq_mask; + struct nix_cq_ctx_s cq_mask; + struct nix_rsse_s rss_mask; + struct nix_rx_mce_s mce_mask; + }; +}; + +struct nix_aq_enq_rsp { + struct mbox_msghdr hdr; + union { + struct nix_rq_ctx_s rq; + struct nix_sq_ctx_s sq; + struct nix_cq_ctx_s cq; + struct nix_rsse_s rss; + struct nix_rx_mce_s mce; + }; +}; + +/* Tx scheduler/shaper mailbox messages */ + +#define MAX_TXSCHQ_PER_FUNC 128 + +struct nix_txsch_alloc_req { + struct mbox_msghdr hdr; + /* Scheduler queue count request at each level */ + u16 schq_contig[NIX_TXSCH_LVL_CNT]; /* No of contiguous queues */ + u16 schq[NIX_TXSCH_LVL_CNT]; /* No of non-contiguous queues */ +}; + +struct nix_txsch_alloc_rsp { + struct mbox_msghdr hdr; + /* Scheduler queue count allocated at each level */ + u16 schq_contig[NIX_TXSCH_LVL_CNT]; + u16 schq[NIX_TXSCH_LVL_CNT]; + /* Scheduler queue list allocated at each level */ + u16 schq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; + u16 schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; + u8 aggr_level; /* Traffic aggregation scheduler level */ + u8 aggr_lvl_rr_prio; /* Aggregation lvl's RR_PRIO config */ + u8 link_cfg_lvl; /* LINKX_CFG CSRs mapped to TL3 or TL2's index ? */ +}; + +struct nix_txsch_free_req { + struct mbox_msghdr hdr; +#define TXSCHQ_FREE_ALL BIT_ULL(0) + u16 flags; + /* Scheduler queue level to be freed */ + u16 schq_lvl; + /* List of scheduler queues to be freed */ + u16 schq; +}; + +struct nix_txschq_config { + struct mbox_msghdr hdr; + u8 lvl; /* SMQ/MDQ/TL4/TL3/TL2/TL1 */ +#define TXSCHQ_IDX_SHIFT 16 +#define TXSCHQ_IDX_MASK (BIT_ULL(10) - 1) +#define TXSCHQ_IDX(reg, shift) (((reg) >> (shift)) & TXSCHQ_IDX_MASK) + u8 num_regs; +#define MAX_REGS_PER_MBOX_MSG 20 + u64 reg[MAX_REGS_PER_MBOX_MSG]; + u64 regval[MAX_REGS_PER_MBOX_MSG]; +}; + +struct nix_vtag_config { + struct mbox_msghdr hdr; + /* '0' for 4 octet VTAG, '1' for 8 octet VTAG */ + u8 vtag_size; + /* cfg_type is '0' for tx vlan cfg + * cfg_type is '1' for rx vlan cfg + */ + u8 cfg_type; + union { + /* valid when cfg_type is '0' */ + struct { + /* tx vlan0 tag(C-VLAN) */ + u64 vlan0; + /* tx vlan1 tag(S-VLAN) */ + u64 vlan1; + /* insert tx vlan tag */ + u8 insert_vlan :1; + /* insert tx double vlan tag */ + u8 double_vlan :1; + } tx; + + /* valid when cfg_type is '1' */ + struct { + /* rx vtag type index, valid values are in 0..7 range */ + u8 vtag_type; + /* rx vtag strip */ + u8 strip_vtag :1; + /* rx vtag capture */ + u8 capture_vtag :1; + } rx; + }; +}; + +struct nix_rss_flowkey_cfg { + struct mbox_msghdr hdr; + int mcam_index; /* MCAM entry index to modify */ +#define NIX_FLOW_KEY_TYPE_PORT BIT(0) +#define NIX_FLOW_KEY_TYPE_IPV4 BIT(1) +#define NIX_FLOW_KEY_TYPE_IPV6 BIT(2) +#define NIX_FLOW_KEY_TYPE_TCP BIT(3) +#define NIX_FLOW_KEY_TYPE_UDP BIT(4) +#define NIX_FLOW_KEY_TYPE_SCTP BIT(5) +#define NIX_FLOW_KEY_TYPE_NVGRE BIT(6) +#define NIX_FLOW_KEY_TYPE_VXLAN BIT(7) +#define NIX_FLOW_KEY_TYPE_GENEVE BIT(8) +#define NIX_FLOW_KEY_TYPE_ETH_DMAC BIT(9) +#define NIX_FLOW_KEY_TYPE_IPV6_EXT BIT(10) +#define NIX_FLOW_KEY_TYPE_GTPU BIT(11) +#define NIX_FLOW_KEY_TYPE_INNR_IPV4 BIT(12) +#define NIX_FLOW_KEY_TYPE_INNR_IPV6 BIT(13) +#define NIX_FLOW_KEY_TYPE_INNR_TCP BIT(14) +#define NIX_FLOW_KEY_TYPE_INNR_UDP BIT(15) +#define NIX_FLOW_KEY_TYPE_INNR_SCTP BIT(16) +#define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17) +#define NIX_FLOW_KEY_TYPE_VLAN BIT(20) + u32 flowkey_cfg; /* Flowkey types selected */ + u8 group; /* RSS context or group */ +}; + +struct nix_rss_flowkey_cfg_rsp { + struct mbox_msghdr hdr; + u8 alg_idx; /* Selected algo index */ +}; + +struct nix_set_mac_addr { + struct mbox_msghdr hdr; + u8 mac_addr[ETH_ALEN]; /* MAC address to be set for this pcifunc */ +}; + +struct nix_get_mac_addr_rsp { + struct mbox_msghdr hdr; + u8 mac_addr[ETH_ALEN]; +}; + +struct nix_mark_format_cfg { + struct mbox_msghdr hdr; + u8 offset; + u8 y_mask; + u8 y_val; + u8 r_mask; + u8 r_val; +}; + +struct nix_mark_format_cfg_rsp { + struct mbox_msghdr hdr; + u8 mark_format_idx; +}; + +struct nix_rx_mode { + struct mbox_msghdr hdr; +#define NIX_RX_MODE_UCAST BIT(0) +#define NIX_RX_MODE_PROMISC BIT(1) +#define NIX_RX_MODE_ALLMULTI BIT(2) + u16 mode; +}; + +struct nix_rx_cfg { + struct mbox_msghdr hdr; +#define NIX_RX_OL3_VERIFY BIT(0) +#define NIX_RX_OL4_VERIFY BIT(1) + u8 len_verify; /* Outer L3/L4 len check */ +#define NIX_RX_CSUM_OL4_VERIFY BIT(0) + u8 csum_verify; /* Outer L4 checksum verification */ +}; + +struct nix_frs_cfg { + struct mbox_msghdr hdr; + u8 update_smq; /* Update SMQ's min/max lens */ + u8 update_minlen; /* Set minlen also */ + u8 sdp_link; /* Set SDP RX link */ + u16 maxlen; + u16 minlen; +}; + +struct nix_lso_format_cfg { + struct mbox_msghdr hdr; + u64 field_mask; +#define NIX_LSO_FIELD_MAX 8 + u64 fields[NIX_LSO_FIELD_MAX]; +}; + +struct nix_lso_format_cfg_rsp { + struct mbox_msghdr hdr; + u8 lso_format_idx; +}; + +struct nix_bp_cfg_req { + struct mbox_msghdr hdr; + u16 chan_base; /* Starting channel number */ + u8 chan_cnt; /* Number of channels */ + u8 bpid_per_chan; + /* bpid_per_chan = 0 assigns single bp id for range of channels */ + /* bpid_per_chan = 1 assigns separate bp id for each channel */ +}; + +/* PF can be mapped to either CGX or LBK interface, + * so maximum 64 channels are possible. + */ +#define NIX_MAX_BPID_CHAN 64 +struct nix_bp_cfg_rsp { + struct mbox_msghdr hdr; + u16 chan_bpid[NIX_MAX_BPID_CHAN]; /* Channel and bpid mapping */ + u8 chan_cnt; /* Number of channel for which bpids are assigned */ +}; + +/* NPC mbox message structs */ + +#define NPC_MCAM_ENTRY_INVALID 0xFFFF +#define NPC_MCAM_INVALID_MAP 0xFFFF + +/* NPC mailbox error codes + * Range 701 - 800. + */ +enum npc_af_status { + NPC_MCAM_INVALID_REQ = -701, + NPC_MCAM_ALLOC_DENIED = -702, + NPC_MCAM_ALLOC_FAILED = -703, + NPC_MCAM_PERM_DENIED = -704, +}; + +struct npc_mcam_alloc_entry_req { + struct mbox_msghdr hdr; +#define NPC_MAX_NONCONTIG_ENTRIES 256 + u8 contig; /* Contiguous entries ? */ +#define NPC_MCAM_ANY_PRIO 0 +#define NPC_MCAM_LOWER_PRIO 1 +#define NPC_MCAM_HIGHER_PRIO 2 + u8 priority; /* Lower or higher w.r.t ref_entry */ + u16 ref_entry; + u16 count; /* Number of entries requested */ +}; + +struct npc_mcam_alloc_entry_rsp { + struct mbox_msghdr hdr; + u16 entry; /* Entry allocated or start index if contiguous. + * Invalid incase of non-contiguous. + */ + u16 count; /* Number of entries allocated */ + u16 free_count; /* Number of entries available */ + u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES]; +}; + +struct npc_mcam_free_entry_req { + struct mbox_msghdr hdr; + u16 entry; /* Entry index to be freed */ + u8 all; /* If all entries allocated to this PFVF to be freed */ +}; + +struct mcam_entry { +#define NPC_MAX_KWS_IN_KEY 7 /* Number of keywords in max keywidth */ + u64 kw[NPC_MAX_KWS_IN_KEY]; + u64 kw_mask[NPC_MAX_KWS_IN_KEY]; + u64 action; + u64 vtag_action; +}; + +struct npc_mcam_write_entry_req { + struct mbox_msghdr hdr; + struct mcam_entry entry_data; + u16 entry; /* MCAM entry to write this match key */ + u16 cntr; /* Counter for this MCAM entry */ + u8 intf; /* Rx or Tx interface */ + u8 enable_entry;/* Enable this MCAM entry ? */ + u8 set_cntr; /* Set counter for this entry ? */ +}; + +/* Enable/Disable a given entry */ +struct npc_mcam_ena_dis_entry_req { + struct mbox_msghdr hdr; + u16 entry; +}; + +struct npc_mcam_shift_entry_req { + struct mbox_msghdr hdr; +#define NPC_MCAM_MAX_SHIFTS 64 + u16 curr_entry[NPC_MCAM_MAX_SHIFTS]; + u16 new_entry[NPC_MCAM_MAX_SHIFTS]; + u16 shift_count; /* Number of entries to shift */ +}; + +struct npc_mcam_shift_entry_rsp { + struct mbox_msghdr hdr; + u16 failed_entry_idx; /* Index in 'curr_entry', not entry itself */ +}; + +struct npc_mcam_alloc_counter_req { + struct mbox_msghdr hdr; + u8 contig; /* Contiguous counters ? */ +#define NPC_MAX_NONCONTIG_COUNTERS 64 + u16 count; /* Number of counters requested */ +}; + +struct npc_mcam_alloc_counter_rsp { + struct mbox_msghdr hdr; + u16 cntr; /* Counter allocated or start index if contiguous. + * Invalid incase of non-contiguous. + */ + u16 count; /* Number of counters allocated */ + u16 cntr_list[NPC_MAX_NONCONTIG_COUNTERS]; +}; + +struct npc_mcam_oper_counter_req { + struct mbox_msghdr hdr; + u16 cntr; /* Free a counter or clear/fetch it's stats */ +}; + +struct npc_mcam_oper_counter_rsp { + struct mbox_msghdr hdr; + u64 stat; /* valid only while fetching counter's stats */ +}; + +struct npc_mcam_unmap_counter_req { + struct mbox_msghdr hdr; + u16 cntr; + u16 entry; /* Entry and counter to be unmapped */ + u8 all; /* Unmap all entries using this counter ? */ +}; + +struct npc_mcam_alloc_and_write_entry_req { + struct mbox_msghdr hdr; + struct mcam_entry entry_data; + u16 ref_entry; + u8 priority; /* Lower or higher w.r.t ref_entry */ + u8 intf; /* Rx or Tx interface */ + u8 enable_entry;/* Enable this MCAM entry ? */ + u8 alloc_cntr; /* Allocate counter and map ? */ +}; + +struct npc_mcam_alloc_and_write_entry_rsp { + struct mbox_msghdr hdr; + u16 entry; + u16 cntr; +}; + +struct npc_get_kex_cfg_rsp { + struct mbox_msghdr hdr; + u64 rx_keyx_cfg; /* NPC_AF_INTF(0)_KEX_CFG */ + u64 tx_keyx_cfg; /* NPC_AF_INTF(1)_KEX_CFG */ +#define NPC_MAX_INTF 2 +#define NPC_MAX_LID 8 +#define NPC_MAX_LT 16 +#define NPC_MAX_LD 2 +#define NPC_MAX_LFL 16 + /* NPC_AF_KEX_LDATA(0..1)_FLAGS_CFG */ + u64 kex_ld_flags[NPC_MAX_LD]; + /* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */ + u64 intf_lid_lt_ld[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD]; + /* NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG */ + u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL]; +#define MKEX_NAME_LEN 128 + u8 mkex_pfl_name[MKEX_NAME_LEN]; +}; + +enum ptp_op { + PTP_OP_ADJFINE = 0, + PTP_OP_GET_CLOCK = 1, +}; + +struct ptp_req { + struct mbox_msghdr hdr; + u8 op; + s64 scaled_ppm; +}; + +struct ptp_rsp { + struct mbox_msghdr hdr; + u64 clk; +}; + +#endif /* MBOX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h new file mode 100644 index 000000000..dc34e564c --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -0,0 +1,391 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef NPC_H +#define NPC_H + +enum NPC_LID_E { + NPC_LID_LA = 0, + NPC_LID_LB, + NPC_LID_LC, + NPC_LID_LD, + NPC_LID_LE, + NPC_LID_LF, + NPC_LID_LG, + NPC_LID_LH, +}; + +#define NPC_LT_NA 0 + +enum npc_kpu_la_ltype { + NPC_LT_LA_8023 = 1, + NPC_LT_LA_ETHER, + NPC_LT_LA_IH_NIX_ETHER, + NPC_LT_LA_IH_8_ETHER, + NPC_LT_LA_IH_4_ETHER, + NPC_LT_LA_IH_2_ETHER, + NPC_LT_LA_HIGIG2_ETHER, + NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_LT_LA_CUSTOM0 = 0xE, + NPC_LT_LA_CUSTOM1 = 0xF, +}; + +enum npc_kpu_lb_ltype { + NPC_LT_LB_ETAG = 1, + NPC_LT_LB_CTAG, + NPC_LT_LB_STAG_QINQ, + NPC_LT_LB_BTAG, + NPC_LT_LB_ITAG, + NPC_LT_LB_DSA, + NPC_LT_LB_DSA_VLAN, + NPC_LT_LB_EDSA, + NPC_LT_LB_EDSA_VLAN, + NPC_LT_LB_EXDSA, + NPC_LT_LB_EXDSA_VLAN, + NPC_LT_LB_FDSA, + NPC_LT_LB_CUSTOM0 = 0xE, + NPC_LT_LB_CUSTOM1 = 0xF, +}; + +enum npc_kpu_lc_ltype { + NPC_LT_LC_IP = 1, + NPC_LT_LC_IP_OPT, + NPC_LT_LC_IP6, + NPC_LT_LC_IP6_EXT, + NPC_LT_LC_ARP, + NPC_LT_LC_RARP, + NPC_LT_LC_MPLS, + NPC_LT_LC_NSH, + NPC_LT_LC_PTP, + NPC_LT_LC_FCOE, + NPC_LT_LC_CUSTOM0 = 0xE, + NPC_LT_LC_CUSTOM1 = 0xF, +}; + +/* Don't modify Ltypes upto SCTP, otherwise it will + * effect flow tag calculation and thus RSS. + */ +enum npc_kpu_ld_ltype { + NPC_LT_LD_TCP = 1, + NPC_LT_LD_UDP, + NPC_LT_LD_ICMP, + NPC_LT_LD_SCTP, + NPC_LT_LD_ICMP6, + NPC_LT_LD_CUSTOM0, + NPC_LT_LD_CUSTOM1, + NPC_LT_LD_IGMP = 8, + NPC_LT_LD_AH, + NPC_LT_LD_GRE, + NPC_LT_LD_NVGRE, + NPC_LT_LD_NSH, + NPC_LT_LD_TU_MPLS_IN_NSH, + NPC_LT_LD_TU_MPLS_IN_IP, +}; + +enum npc_kpu_le_ltype { + NPC_LT_LE_VXLAN = 1, + NPC_LT_LE_GENEVE, + NPC_LT_LE_ESP, + NPC_LT_LE_GTPU = 4, + NPC_LT_LE_VXLANGPE, + NPC_LT_LE_GTPC, + NPC_LT_LE_NSH, + NPC_LT_LE_TU_MPLS_IN_GRE, + NPC_LT_LE_TU_NSH_IN_GRE, + NPC_LT_LE_TU_MPLS_IN_UDP, + NPC_LT_LE_CUSTOM0 = 0xE, + NPC_LT_LE_CUSTOM1 = 0xF, +}; + +enum npc_kpu_lf_ltype { + NPC_LT_LF_TU_ETHER = 1, + NPC_LT_LF_TU_PPP, + NPC_LT_LF_TU_MPLS_IN_VXLANGPE, + NPC_LT_LF_TU_NSH_IN_VXLANGPE, + NPC_LT_LF_TU_MPLS_IN_NSH, + NPC_LT_LF_TU_3RD_NSH, + NPC_LT_LF_CUSTOM0 = 0xE, + NPC_LT_LF_CUSTOM1 = 0xF, +}; + +enum npc_kpu_lg_ltype { + NPC_LT_LG_TU_IP = 1, + NPC_LT_LG_TU_IP6, + NPC_LT_LG_TU_ARP, + NPC_LT_LG_TU_ETHER_IN_NSH, + NPC_LT_LG_CUSTOM0 = 0xE, + NPC_LT_LG_CUSTOM1 = 0xF, +}; + +/* Don't modify Ltypes upto SCTP, otherwise it will + * effect flow tag calculation and thus RSS. + */ +enum npc_kpu_lh_ltype { + NPC_LT_LH_TU_TCP = 1, + NPC_LT_LH_TU_UDP, + NPC_LT_LH_TU_ICMP, + NPC_LT_LH_TU_SCTP, + NPC_LT_LH_TU_ICMP6, + NPC_LT_LH_TU_IGMP = 8, + NPC_LT_LH_TU_ESP, + NPC_LT_LH_TU_AH, + NPC_LT_LH_CUSTOM0 = 0xE, + NPC_LT_LH_CUSTOM1 = 0xF, +}; + +/* NPC port kind defines how the incoming or outgoing packets + * are processed. NPC accepts packets from up to 64 pkinds. + * Software assigns pkind for each incoming port such as CGX + * Ethernet interfaces, LBK interfaces, etc. + */ +enum npc_pkind_type { + NPC_TX_DEF_PKIND = 63ULL, /* NIX-TX PKIND */ +}; + +struct npc_kpu_profile_cam { + u8 state; + u8 state_mask; + u16 dp0; + u16 dp0_mask; + u16 dp1; + u16 dp1_mask; + u16 dp2; + u16 dp2_mask; +}; + +struct npc_kpu_profile_action { + u8 errlev; + u8 errcode; + u8 dp0_offset; + u8 dp1_offset; + u8 dp2_offset; + u8 bypass_count; + u8 parse_done; + u8 next_state; + u8 ptr_advance; + u8 cap_ena; + u8 lid; + u8 ltype; + u8 flags; + u8 offset; + u8 mask; + u8 right; + u8 shift; +}; + +struct npc_kpu_profile { + int cam_entries; + int action_entries; + const struct npc_kpu_profile_cam *cam; + const struct npc_kpu_profile_action *action; +}; + +/* NPC KPU register formats */ +struct npc_kpu_cam { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 rsvd_63_56 : 8; + u64 state : 8; + u64 dp2_data : 16; + u64 dp1_data : 16; + u64 dp0_data : 16; +#else + u64 dp0_data : 16; + u64 dp1_data : 16; + u64 dp2_data : 16; + u64 state : 8; + u64 rsvd_63_56 : 8; +#endif +}; + +struct npc_kpu_action0 { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 rsvd_63_57 : 7; + u64 byp_count : 3; + u64 capture_ena : 1; + u64 parse_done : 1; + u64 next_state : 8; + u64 rsvd_43 : 1; + u64 capture_lid : 3; + u64 capture_ltype : 4; + u64 capture_flags : 8; + u64 ptr_advance : 8; + u64 var_len_offset : 8; + u64 var_len_mask : 8; + u64 var_len_right : 1; + u64 var_len_shift : 3; +#else + u64 var_len_shift : 3; + u64 var_len_right : 1; + u64 var_len_mask : 8; + u64 var_len_offset : 8; + u64 ptr_advance : 8; + u64 capture_flags : 8; + u64 capture_ltype : 4; + u64 capture_lid : 3; + u64 rsvd_43 : 1; + u64 next_state : 8; + u64 parse_done : 1; + u64 capture_ena : 1; + u64 byp_count : 3; + u64 rsvd_63_57 : 7; +#endif +}; + +struct npc_kpu_action1 { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 rsvd_63_36 : 28; + u64 errlev : 4; + u64 errcode : 8; + u64 dp2_offset : 8; + u64 dp1_offset : 8; + u64 dp0_offset : 8; +#else + u64 dp0_offset : 8; + u64 dp1_offset : 8; + u64 dp2_offset : 8; + u64 errcode : 8; + u64 errlev : 4; + u64 rsvd_63_36 : 28; +#endif +}; + +struct npc_kpu_pkind_cpi_def { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 ena : 1; + u64 rsvd_62_59 : 4; + u64 lid : 3; + u64 ltype_match : 4; + u64 ltype_mask : 4; + u64 flags_match : 8; + u64 flags_mask : 8; + u64 add_offset : 8; + u64 add_mask : 8; + u64 rsvd_15 : 1; + u64 add_shift : 3; + u64 rsvd_11_10 : 2; + u64 cpi_base : 10; +#else + u64 cpi_base : 10; + u64 rsvd_11_10 : 2; + u64 add_shift : 3; + u64 rsvd_15 : 1; + u64 add_mask : 8; + u64 add_offset : 8; + u64 flags_mask : 8; + u64 flags_match : 8; + u64 ltype_mask : 4; + u64 ltype_match : 4; + u64 lid : 3; + u64 rsvd_62_59 : 4; + u64 ena : 1; +#endif +}; + +struct nix_rx_action { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 rsvd_63_61 :3; + u64 flow_key_alg :5; + u64 match_id :16; + u64 index :20; + u64 pf_func :16; + u64 op :4; +#else + u64 op :4; + u64 pf_func :16; + u64 index :20; + u64 match_id :16; + u64 flow_key_alg :5; + u64 rsvd_63_61 :3; +#endif +}; + +/* NPC_AF_INTFX_KEX_CFG field masks */ +#define NPC_PARSE_NIBBLE GENMASK_ULL(30, 0) + +/* NPC_PARSE_KEX_S nibble definitions for each field */ +#define NPC_PARSE_NIBBLE_CHAN GENMASK_ULL(2, 0) +#define NPC_PARSE_NIBBLE_ERRLEV BIT_ULL(3) +#define NPC_PARSE_NIBBLE_ERRCODE GENMASK_ULL(5, 4) +#define NPC_PARSE_NIBBLE_L2L3_BCAST BIT_ULL(6) +#define NPC_PARSE_NIBBLE_LA_FLAGS GENMASK_ULL(8, 7) +#define NPC_PARSE_NIBBLE_LA_LTYPE BIT_ULL(9) +#define NPC_PARSE_NIBBLE_LB_FLAGS GENMASK_ULL(11, 10) +#define NPC_PARSE_NIBBLE_LB_LTYPE BIT_ULL(12) +#define NPC_PARSE_NIBBLE_LC_FLAGS GENMASK_ULL(14, 13) +#define NPC_PARSE_NIBBLE_LC_LTYPE BIT_ULL(15) +#define NPC_PARSE_NIBBLE_LD_FLAGS GENMASK_ULL(17, 16) +#define NPC_PARSE_NIBBLE_LD_LTYPE BIT_ULL(18) +#define NPC_PARSE_NIBBLE_LE_FLAGS GENMASK_ULL(20, 19) +#define NPC_PARSE_NIBBLE_LE_LTYPE BIT_ULL(21) +#define NPC_PARSE_NIBBLE_LF_FLAGS GENMASK_ULL(23, 22) +#define NPC_PARSE_NIBBLE_LF_LTYPE BIT_ULL(24) +#define NPC_PARSE_NIBBLE_LG_FLAGS GENMASK_ULL(26, 25) +#define NPC_PARSE_NIBBLE_LG_LTYPE BIT_ULL(27) +#define NPC_PARSE_NIBBLE_LH_FLAGS GENMASK_ULL(29, 28) +#define NPC_PARSE_NIBBLE_LH_LTYPE BIT_ULL(30) + +/* NIX Receive Vtag Action Structure */ +#define VTAG0_VALID_BIT BIT_ULL(15) +#define VTAG0_TYPE_MASK GENMASK_ULL(14, 12) +#define VTAG0_LID_MASK GENMASK_ULL(10, 8) +#define VTAG0_RELPTR_MASK GENMASK_ULL(7, 0) + +struct npc_mcam_kex { + /* MKEX Profle Header */ + u64 mkex_sign; /* "mcam-kex-profile" (8 bytes/ASCII characters) */ + u8 name[MKEX_NAME_LEN]; /* MKEX Profile name */ + u64 cpu_model; /* Format as profiled by CPU hardware */ + u64 kpu_version; /* KPU firmware/profile version */ + u64 reserved; /* Reserved for extension */ + + /* MKEX Profle Data */ + u64 keyx_cfg[NPC_MAX_INTF]; /* NPC_AF_INTF(0..1)_KEX_CFG */ + /* NPC_AF_KEX_LDATA(0..1)_FLAGS_CFG */ + u64 kex_ld_flags[NPC_MAX_LD]; + /* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */ + u64 intf_lid_lt_ld[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD]; + /* NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG */ + u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL]; +} __packed; + +struct npc_lt_def { + u8 ltype_mask; + u8 ltype_match; + u8 lid; +} __packed; + +struct npc_lt_def_ipsec { + u8 ltype_mask; + u8 ltype_match; + u8 lid; + u8 spi_offset; + u8 spi_nz; +} __packed; + +struct npc_lt_def_cfg { + struct npc_lt_def rx_ol2; + struct npc_lt_def rx_oip4; + struct npc_lt_def rx_iip4; + struct npc_lt_def rx_oip6; + struct npc_lt_def rx_iip6; + struct npc_lt_def rx_otcp; + struct npc_lt_def rx_itcp; + struct npc_lt_def rx_oudp; + struct npc_lt_def rx_iudp; + struct npc_lt_def rx_osctp; + struct npc_lt_def rx_isctp; + struct npc_lt_def_ipsec rx_ipsec[2]; + struct npc_lt_def pck_ol2; + struct npc_lt_def pck_oip4; + struct npc_lt_def pck_oip6; + struct npc_lt_def pck_iip4; +}; + +#endif /* NPC_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h new file mode 100644 index 000000000..0e4af93be --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h @@ -0,0 +1,13525 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef NPC_PROFILE_H +#define NPC_PROFILE_H + +#define NPC_KPU_PROFILE_VER 0x0000000100050000 + +#define NPC_IH_W 0x8000 +#define NPC_IH_UTAG 0x2000 + +#define NPC_ETYPE_IP 0x0800 +#define NPC_ETYPE_IP6 0x86dd +#define NPC_ETYPE_ARP 0x0806 +#define NPC_ETYPE_RARP 0x8035 +#define NPC_ETYPE_MPLSU 0x8847 +#define NPC_ETYPE_MPLSM 0x8848 +#define NPC_ETYPE_ETAG 0x893f +#define NPC_ETYPE_CTAG 0x8100 +#define NPC_ETYPE_SBTAG 0x88a8 +#define NPC_ETYPE_ITAG 0x88e7 +#define NPC_ETYPE_PTP 0x88f7 +#define NPC_ETYPE_FCOE 0x8906 +#define NPC_ETYPE_QINQ 0x9100 +#define NPC_ETYPE_TRANS_ETH_BR 0x6558 +#define NPC_ETYPE_PPP 0x880b +#define NPC_ETYPE_NSH 0x894f +#define NPC_ETYPE_DSA 0xdada + +#define NPC_IPNH_HOP 0 +#define NPC_IPNH_ICMP 1 +#define NPC_IPNH_IGMP 2 +#define NPC_IPNH_IP 4 +#define NPC_IPNH_TCP 6 +#define NPC_IPNH_UDP 17 +#define NPC_IPNH_IP6 41 +#define NPC_IPNH_ROUT 43 +#define NPC_IPNH_FRAG 44 +#define NPC_IPNH_GRE 47 +#define NPC_IPNH_ESP 50 +#define NPC_IPNH_AH 51 +#define NPC_IPNH_ICMP6 58 +#define NPC_IPNH_NONH 59 +#define NPC_IPNH_DEST 60 +#define NPC_IPNH_SCTP 132 +#define NPC_IPNH_MOBILITY 135 +#define NPC_IPNH_MPLS 137 +#define NPC_IPNH_HOSTID 139 +#define NPC_IPNH_SHIM6 140 + +#define NPC_UDP_PORT_PTP_E 319 +#define NPC_UDP_PORT_PTP_G 320 +#define NPC_UDP_PORT_GTPC 2123 +#define NPC_UDP_PORT_GTPU 2152 +#define NPC_UDP_PORT_VXLAN 4789 +#define NPC_UDP_PORT_VXLANGPE 4790 +#define NPC_UDP_PORT_GENEVE 6081 +#define NPC_UDP_PORT_MPLS 6635 +#define NPC_UDP_PORT_ESP 4500 + +#define NPC_VXLANGPE_NP_IP 0x1 +#define NPC_VXLANGPE_NP_IP6 0x2 +#define NPC_VXLANGPE_NP_ETH 0x3 +#define NPC_VXLANGPE_NP_NSH 0x4 +#define NPC_VXLANGPE_NP_MPLS 0x5 +#define NPC_VXLANGPE_NP_GBP 0x6 +#define NPC_VXLANGPE_NP_VBNG 0x7 + +#define NPC_NSH_NP_IP 0x1 +#define NPC_NSH_NP_IP6 0x2 +#define NPC_NSH_NP_ETH 0x3 +#define NPC_NSH_NP_NSH 0x4 +#define NPC_NSH_NP_MPLS 0x5 + +#define NPC_TCP_PORT_HTTP 80 +#define NPC_TCP_PORT_HTTPS 443 +#define NPC_TCP_PORT_PPTP 1723 + +#define NPC_MPLS_S 0x0100 + +#define NPC_IP_TTL_MASK 0xff00 +#define NPC_IP_VER_4 0x4000 +#define NPC_IP_VER_6 0x6000 +#define NPC_IP_VER_MASK 0xf000 +#define NPC_IP_HDR_LEN_5 0x0500 +#define NPC_IP_HDR_LEN_MASK 0x0f00 +#define NPC_IP_HDR_MF 0x2000 +#define NPC_IP_HDR_FRAGOFF 0x1fff + +#define NPC_IP6_HOP_MASK 0x00ff +#define NPC_IP6_FRAG_FRAGOFF 0xfff8 + +#define NPC_GRE_F_CSUM (0x1 << 15) +#define NPC_GRE_F_ROUTE (0x1 << 14) +#define NPC_GRE_F_KEY (0x1 << 13) +#define NPC_GRE_F_SEQ (0x1 << 12) +#define NPC_GRE_F_ACK (0x1 << 7) +#define NPC_GRE_FLAG_MASK (NPC_GRE_F_CSUM | NPC_GRE_F_ROUTE | \ + NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_F_ACK) +#define NPC_GRE_VER_MASK 0x0003 +#define NPC_GRE_VER_1 0x0001 + +#define NPC_VXLAN_I 0x0800 + +#define NPC_VXLANGPE_VER (0x3 << 12) +#define NPC_VXLANGPE_I (0x1 << 11) +#define NPC_VXLANGPE_P (0x1 << 10) +#define NPC_VXLANGPE_B (0x1 << 9) +#define NPC_VXLANGPE_NP_MASK 0x00ff + +#define NPC_NSH_NP_MASK 0x00ff + +#define NPC_GENEVE_F_OAM (0x1 << 7) +#define NPC_GENEVE_F_CRI_OPT (0x1 << 6) + +#define NPC_GTP_PT_GTP (0x1 << 12) +#define NPC_GTP_PT_MASK (0x1 << 12) +#define NPC_GTP_VER1 (0x1 << 13) +#define NPC_GTP_VER_MASK (0x7 << 13) +#define NPC_GTP_MT_G_PDU 0xff +#define NPC_GTP_MT_MASK 0xff + +#define NPC_TCP_FLAGS_FIN 0x0001 +#define NPC_TCP_FLAGS_SYN 0x0002 +#define NPC_TCP_FLAGS_RST 0x0004 +#define NPC_TCP_FLAGS_PSH 0x0008 +#define NPC_TCP_FLAGS_ACK 0x0010 +#define NPC_TCP_FLAGS_URG 0x0020 +#define NPC_TCP_FLAGS_MASK 0x003f + +#define NPC_TCP_DATA_OFFSET_5 0x5000 +#define NPC_TCP_DATA_OFFSET_MASK 0xf000 + +#define NPC_DSA_EXTEND 0x1000 +#define NPC_DSA_EDSA 0x8000 +#define NPC_DSA_FDSA 0xc000 + +#define NPC_KEXOF_DMAC 8 +#define MKEX_SIGN 0x19bbfdbd15f /* strtoull of "mkexprof" with base:36 */ +#define KEX_LD_CFG(bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \ + (((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \ + ((flags_ena) << 6) | ((key_ofs) & 0x3F)) + +/* Rx parse key extract nibble enable */ +#define NPC_PARSE_NIBBLE_INTF_RX (NPC_PARSE_NIBBLE_CHAN | \ + NPC_PARSE_NIBBLE_LA_LTYPE | \ + NPC_PARSE_NIBBLE_LB_LTYPE | \ + NPC_PARSE_NIBBLE_LC_LTYPE | \ + NPC_PARSE_NIBBLE_LD_LTYPE | \ + NPC_PARSE_NIBBLE_LE_LTYPE) +/* Tx parse key extract nibble enable */ +#define NPC_PARSE_NIBBLE_INTF_TX (NPC_PARSE_NIBBLE_LA_LTYPE | \ + NPC_PARSE_NIBBLE_LB_LTYPE | \ + NPC_PARSE_NIBBLE_LC_LTYPE | \ + NPC_PARSE_NIBBLE_LD_LTYPE | \ + NPC_PARSE_NIBBLE_LE_LTYPE) + +enum npc_kpu_parser_state { + NPC_S_NA = 0, + NPC_S_KPU1_ETHER, + NPC_S_KPU1_IH_NIX, + NPC_S_KPU1_IH, + NPC_S_KPU1_EXDSA, + NPC_S_KPU1_HIGIG2, + NPC_S_KPU1_IH_NIX_HIGIG2, + NPC_S_KPU2_CTAG, + NPC_S_KPU2_CTAG2, + NPC_S_KPU2_SBTAG, + NPC_S_KPU2_QINQ, + NPC_S_KPU2_ETAG, + NPC_S_KPU2_ITAG, + NPC_S_KPU2_PREHEADER, + NPC_S_KPU2_EXDSA, + NPC_S_KPU3_CTAG, + NPC_S_KPU3_STAG, + NPC_S_KPU3_QINQ, + NPC_S_KPU3_ITAG, + NPC_S_KPU3_CTAG_C, + NPC_S_KPU3_STAG_C, + NPC_S_KPU3_QINQ_C, + NPC_S_KPU3_DSA, + NPC_S_KPU4_MPLS, + NPC_S_KPU4_NSH, + NPC_S_KPU4_FDSA, + NPC_S_KPU5_IP, + NPC_S_KPU5_IP6, + NPC_S_KPU5_ARP, + NPC_S_KPU5_RARP, + NPC_S_KPU5_PTP, + NPC_S_KPU5_FCOE, + NPC_S_KPU5_MPLS, + NPC_S_KPU5_MPLS_PL, + NPC_S_KPU5_NSH, + NPC_S_KPU6_IP6_EXT, + NPC_S_KPU6_IP6_HOP_DEST, + NPC_S_KPU6_IP6_ROUT, + NPC_S_KPU6_IP6_FRAG, + NPC_S_KPU7_IP6_EXT, + NPC_S_KPU7_IP6_ROUT, + NPC_S_KPU7_IP6_FRAG, + NPC_S_KPU8_TCP, + NPC_S_KPU8_UDP, + NPC_S_KPU8_SCTP, + NPC_S_KPU8_ICMP, + NPC_S_KPU8_IGMP, + NPC_S_KPU8_ICMP6, + NPC_S_KPU8_GRE, + NPC_S_KPU8_AH, + NPC_S_KPU9_TU_MPLS_IN_GRE, + NPC_S_KPU9_TU_MPLS_IN_NSH, + NPC_S_KPU9_TU_MPLS_IN_IP, + NPC_S_KPU9_TU_MPLS_IN_UDP, + NPC_S_KPU9_TU_NSH_IN_GRE, + NPC_S_KPU9_VXLAN, + NPC_S_KPU9_VXLANGPE, + NPC_S_KPU9_GENEVE, + NPC_S_KPU9_GTPC, + NPC_S_KPU9_GTPU, + NPC_S_KPU9_ESP, + NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, + NPC_S_KPU10_TU_MPLS_PL, + NPC_S_KPU10_TU_MPLS, + NPC_S_KPU10_TU_NSH_IN_VXLANGPE, + NPC_S_KPU11_TU_ETHER, + NPC_S_KPU11_TU_PPP, + NPC_S_KPU11_TU_MPLS_IN_NSH, + NPC_S_KPU11_TU_MPLS_PL, + NPC_S_KPU11_TU_MPLS, + NPC_S_KPU11_TU_ETHER_IN_NSH, + NPC_S_KPU12_TU_IP, + NPC_S_KPU12_TU_IP6, + NPC_S_KPU12_TU_ARP, + NPC_S_KPU13_TU_IP6_EXT, + NPC_S_KPU14_TU_IP6_EXT, + NPC_S_KPU15_TU_TCP, + NPC_S_KPU15_TU_UDP, + NPC_S_KPU15_TU_SCTP, + NPC_S_KPU15_TU_ICMP, + NPC_S_KPU15_TU_IGMP, + NPC_S_KPU15_TU_ICMP6, + NPC_S_KPU15_TU_ESP, + NPC_S_KPU15_TU_AH, + NPC_S_KPU16_HTTP_DATA, + NPC_S_KPU16_HTTPS_DATA, + NPC_S_KPU16_PPTP_DATA, + NPC_S_KPU16_TCP_DATA, + NPC_S_KPU16_UDP_DATA, + NPC_S_KPU16_UDP_PTP, + NPC_S_LAST /* has to be the last item */ +}; + +enum npc_kpu_la_uflag { + NPC_F_LA_U_HAS_TAG = 0x10, + NPC_F_LA_U_HAS_IH_NIX = 0x20, + NPC_F_LA_U_HAS_HIGIG2 = 0x40, +}; +enum npc_kpu_la_lflag { + NPC_F_LA_L_UNK_ETYPE = 1, + NPC_F_LA_L_WITH_VLAN, + NPC_F_LA_L_WITH_ETAG, + NPC_F_LA_L_WITH_ITAG, + NPC_F_LA_L_WITH_MPLS, + NPC_F_LA_L_WITH_NSH, +}; + +enum npc_kpu_lb_uflag { + NPC_F_LB_U_UNK_ETYPE = 0x80, + NPC_F_LB_U_MORE_TAG = 0x40, +}; +enum npc_kpu_lb_lflag { + NPC_F_LB_L_WITH_CTAG = 1, + NPC_F_LB_L_WITH_CTAG_UNK, + NPC_F_LB_L_WITH_STAG_CTAG, + NPC_F_LB_L_WITH_STAG_STAG, + NPC_F_LB_L_WITH_QINQ_CTAG, + NPC_F_LB_L_WITH_QINQ_QINQ, + NPC_F_LB_L_WITH_ITAG, + NPC_F_LB_L_WITH_ITAG_STAG, + NPC_F_LB_L_WITH_ITAG_CTAG, + NPC_F_LB_L_WITH_ITAG_UNK, + NPC_F_LB_L_WITH_BTAG_ITAG, + NPC_F_LB_L_WITH_STAG, + NPC_F_LB_L_WITH_QINQ, + NPC_F_LB_L_DSA, + NPC_F_LB_L_DSA_VLAN, + NPC_F_LB_L_EDSA, + NPC_F_LB_L_EDSA_VLAN, + NPC_F_LB_L_EXDSA, + NPC_F_LB_L_EXDSA_VLAN, + NPC_F_LB_L_FDSA, +}; + +enum npc_kpu_lc_uflag { + NPC_F_LC_U_UNK_PROTO = 0x10, + NPC_F_LC_U_IP_FRAG = 0x20, + NPC_F_LC_U_IP6_FRAG = 0x40, +}; +enum npc_kpu_lc_lflag { + NPC_F_LC_L_IP_IN_IP = 1, + NPC_F_LC_L_6TO4, + NPC_F_LC_L_MPLS_IN_IP, + NPC_F_LC_L_IP6_TUN_IP6, + NPC_F_LC_L_IP6_MPLS_IN_IP, + NPC_F_LC_L_MPLS_4_LABELS, + NPC_F_LC_L_MPLS_3_LABELS, + NPC_F_LC_L_MPLS_2_LABELS, + NPC_F_LC_L_EXT_HOP, + NPC_F_LC_L_EXT_DEST, + NPC_F_LC_L_EXT_ROUT, + NPC_F_LC_L_EXT_MOBILITY, + NPC_F_LC_L_EXT_HOSTID, + NPC_F_LC_L_EXT_SHIM6, +}; + +enum npc_kpu_ld_lflag { + NPC_F_LD_L_TCP_UNK_PORT = 1, + NPC_F_LD_L_TCP_HAS_OPTIONS, + NPC_F_LD_L_TCP_UNK_PORT_HAS_OPTIONS, + NPC_F_LD_L_UDP_UNK_PORT, + NPC_F_LD_L_GRE_NVGRE, + NPC_F_LD_L_GRE_HAS_SRE, + NPC_F_LD_L_GRE_HAS_CSUM, + NPC_F_LD_L_GRE_HAS_KEY, + NPC_F_LD_L_GRE_HAS_SEQ, + NPC_F_LD_L_GRE_HAS_CSUM_KEY, + NPC_F_LD_L_GRE_HAS_CSUM_SEQ, + NPC_F_LD_L_GRE_HAS_KEY_SEQ, + NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ, + NPC_F_LD_L_GRE_HAS_ROUTE, + NPC_F_LD_L_GRE_UNK_PROTO, + NPC_F_LD_L_GRE_VER1, + NPC_F_LD_L_GRE_VER1_HAS_SEQ, + NPC_F_LD_L_GRE_VER1_HAS_ACK, + NPC_F_LD_L_GRE_VER1_HAS_SEQ_ACK, + NPC_F_LD_L_GRE_VER1_UNK_PROTO, + NPC_F_LD_L_MPLS_4_LABELS, + NPC_F_LD_L_MPLS_3_LABELS, + NPC_F_LD_L_MPLS_2_LABELS, +}; + +enum npc_kpu_le_lflag { + NPC_F_LE_L_VXLAN_NOVNI, + NPC_F_LE_L_VXLANGPE_NOVNI, + NPC_F_LE_L_VXLANGPE_UNK, + NPC_F_LE_L_VXLANGPE_NONP, + NPC_F_LE_L_GENEVE_OAM, + NPC_F_LE_L_GENEVE_CRI_OPT, + NPC_F_LE_L_GENEVE_OAM_CRI_OPT, + NPC_F_LE_L_GTPU_G_PDU, + NPC_F_LE_L_GTPU_UNK, +}; + +enum npc_kpu_lf_uflag { + NPC_F_LF_U_UNK_ETYPE = 0x10, + NPC_F_LF_U_HAS_TAG = 0x20, +}; + +enum npc_kpu_lf_lflag { + NPC_F_LF_L_WITH_CTAG = 1, + NPC_F_LF_L_WITH_STAG_CTAG, + NPC_F_LF_L_WITH_STAG, + NPC_F_LF_L_WITH_QINQ_CTAG, + NPC_F_LF_L_WITH_QINQ, +}; + +enum npc_kpu_lg_uflag { + NPC_F_LG_U_UNK_IP_PROTO = 0x10, + NPC_F_LG_U_IP_HAS_OPTIONS = 0x20, + NPC_F_LG_U_IP6_HAS_EXT = 0x40, +}; + +enum npc_kpu_lh_uflag { + NPC_F_LH_U_TCP_HAS_OPTIONS = 0x80, +}; + +enum npc_kpu_lh_lflag { + NPC_F_LH_L_TCP_HTTP = 1, + NPC_F_LH_L_TCP_HTTPS, + NPC_F_LH_L_TCP_PPTP, + NPC_F_LH_L_TCP_UNK_PORT, + NPC_F_LH_L_UDP_UNK_PORT, +}; + +enum npc_kpu_err_code { + NPC_EC_NOERR = 0, /* has to be zero */ + NPC_EC_UNK, + NPC_EC_IH_LENGTH, + NPC_EC_EDSA_UNK, + NPC_EC_L2_K1, + NPC_EC_L2_K2, + NPC_EC_L2_K3, + NPC_EC_L2_K3_ETYPE_UNK, + NPC_EC_L2_K4, + NPC_EC_MPLS_2MANY, + NPC_EC_MPLS_UNK, + NPC_EC_NSH_UNK, + NPC_EC_IP_TTL_0, + NPC_EC_IP_FRAG_OFFSET_1, + NPC_EC_IP_VER, + NPC_EC_IP6_HOP_0, + NPC_EC_IP6_VER, + NPC_EC_TCP_FLAGS_FIN_ONLY, + NPC_EC_TCP_FLAGS_ZERO, + NPC_EC_TCP_FLAGS_RST_FIN, + NPC_EC_TCP_FLAGS_URG_SYN, + NPC_EC_TCP_FLAGS_RST_SYN, + NPC_EC_TCP_FLAGS_SYN_FIN, + NPC_EC_VXLAN, + NPC_EC_NVGRE, + NPC_EC_GRE, + NPC_EC_GRE_VER1, + NPC_EC_L4, + NPC_EC_OIP4_CSUM, + NPC_EC_IIP4_CSUM, + NPC_EC_LAST /* has to be the last item */ +}; + +enum NPC_ERRLEV_E { + NPC_ERRLEV_RE = 0, + NPC_ERRLEV_LA = 1, + NPC_ERRLEV_LB = 2, + NPC_ERRLEV_LC = 3, + NPC_ERRLEV_LD = 4, + NPC_ERRLEV_LE = 5, + NPC_ERRLEV_LF = 6, + NPC_ERRLEV_LG = 7, + NPC_ERRLEV_LH = 8, + NPC_ERRLEV_R9 = 9, + NPC_ERRLEV_R10 = 10, + NPC_ERRLEV_R11 = 11, + NPC_ERRLEV_R12 = 12, + NPC_ERRLEV_R13 = 13, + NPC_ERRLEV_R14 = 14, + NPC_ERRLEV_NIX = 15, + NPC_ERRLEV_ENUM_LAST = 16, +}; + +static const struct npc_kpu_profile_action ikpu_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 36, 40, 44, 0, 0, + NPC_S_KPU1_IH_NIX_HIGIG2, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 28, 32, 36, 0, 0, + NPC_S_KPU1_HIGIG2, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 14, 20, 0, 0, + NPC_S_KPU1_EXDSA, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 20, 24, 28, 0, 0, + NPC_S_KPU1_IH_NIX, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + + }, +}; + +static const struct npc_kpu_profile_cam kpu1_cam_entries[] = { + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_ETAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_DSA, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + 0x0000, + 0xfc00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + 0x0400, + 0xfe00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_ETAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH, 0xff, + NPC_IH_W|NPC_IH_UTAG, + NPC_IH_W|NPC_IH_UTAG, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH, 0xff, + NPC_IH_W, + NPC_IH_W|NPC_IH_UTAG, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH, 0xff, + 0x0000, + NPC_IH_W|NPC_IH_UTAG, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_EXDSA, 0xff, + 0x0000, + 0x0000, + NPC_DSA_EXTEND, + NPC_DSA_EXTEND, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_EXDSA, 0xff, + NPC_DSA_FDSA, + NPC_DSA_FDSA, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_EXDSA, 0xff, + 0x0000, + NPC_DSA_EXTEND, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_ETAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_ETAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_NA, 0X00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, +}; + +static const struct npc_kpu_profile_cam kpu2_cam_entries[] = { + { + NPC_S_KPU2_CTAG, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_SBTAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_IP, + 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_IP6, + 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_ARP, + 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_RARP, + 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_PTP, + 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_FCOE, + 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_MPLSU, + 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_MPLSM, + 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_NSH, + 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_SBTAG, + 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_CTAG, + 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + NPC_ETYPE_QINQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_IP, + 0xffff, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_IP6, + 0xffff, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_ARP, + 0xffff, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_SBTAG, + 0xffff, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_CTAG, + 0xffff, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG2, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG2, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG2, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG2, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG2, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG2, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG2, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG2, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG2, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG2, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG2, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_PREHEADER, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_PREHEADER, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_PREHEADER, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_PREHEADER, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_PREHEADER, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_PREHEADER, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_PREHEADER, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_PREHEADER, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_PREHEADER, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_PREHEADER, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_PREHEADER, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_PREHEADER, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + NPC_DSA_EDSA, + NPC_DSA_EDSA, + 0x0000, + 0x0000, + NPC_ETYPE_IP, + 0xffff, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + NPC_DSA_EDSA, + NPC_DSA_EDSA, + 0x0000, + 0x0000, + NPC_ETYPE_IP6, + 0xffff, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + NPC_DSA_EDSA, + NPC_DSA_EDSA, + 0x0000, + 0x0000, + NPC_ETYPE_ARP, + 0xffff, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + NPC_DSA_EDSA, + NPC_DSA_EDSA, + 0x0000, + 0x0000, + NPC_ETYPE_RARP, + 0xffff, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + NPC_DSA_EDSA, + NPC_DSA_EDSA, + 0x0000, + 0x0000, + NPC_ETYPE_PTP, + 0xffff, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + NPC_DSA_EDSA, + NPC_DSA_EDSA, + 0x0000, + 0x0000, + NPC_ETYPE_FCOE, + 0xffff, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + NPC_DSA_EDSA, + NPC_DSA_EDSA, + 0x0000, + 0x0000, + NPC_ETYPE_CTAG, + 0xffff, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + NPC_DSA_EDSA, + NPC_DSA_EDSA, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + 0x0000, + NPC_DSA_EDSA, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + 0x0000, + NPC_DSA_EDSA, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + 0x0000, + NPC_DSA_EDSA, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + 0x0000, + NPC_DSA_EDSA, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + 0x0000, + NPC_DSA_EDSA, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + 0x0000, + NPC_DSA_EDSA, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + 0x0000, + NPC_DSA_EDSA, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + 0x0000, + NPC_DSA_EDSA, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_NA, 0X00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, +}; + +static const struct npc_kpu_profile_cam kpu3_cam_entries[] = { + { + NPC_S_KPU3_CTAG, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG_C, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG_C, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG_C, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG_C, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG_C, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG_C, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG_C, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG_C, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG_C, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG_C, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_NA, 0X00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, +}; + +static const struct npc_kpu_profile_cam kpu4_cam_entries[] = { + { + NPC_S_KPU4_MPLS, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_MPLS, 0xff, + 0x0000, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_MPLS, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_MPLS_S, + }, + { + NPC_S_KPU4_MPLS, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + }, + { + NPC_S_KPU4_NSH, 0xff, + NPC_NSH_NP_IP, + NPC_NSH_NP_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_NSH, 0xff, + NPC_NSH_NP_IP6, + NPC_NSH_NP_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_NSH, 0xff, + NPC_NSH_NP_ETH, + NPC_NSH_NP_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_NSH, 0xff, + NPC_NSH_NP_MPLS, + NPC_NSH_NP_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_NSH, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_FDSA, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_FDSA, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_FDSA, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_FDSA, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_FDSA, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_FDSA, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_FDSA, 0xff, + 0x0000, + NPC_DSA_FDSA, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_NA, 0X00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, +}; + +static const struct npc_kpu_profile_cam kpu5_cam_entries[] = { + { + NPC_S_KPU5_IP, 0xff, + 0x0000, + NPC_IP_TTL_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0001, + NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_TCP, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_UDP, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_SCTP, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_ICMP, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_IGMP, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_ESP, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_AH, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_GRE, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_IP, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_IP6, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_MPLS, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + 0x0000, + 0x0000, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + 0x0000, + 0x0000, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_TCP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_UDP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_SCTP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_ICMP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_IGMP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_ESP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_AH, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_GRE, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_IP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_IP6, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_MPLS, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + 0x0000, + 0x0000, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + 0x0000, + 0x0000, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_ARP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_RARP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_PTP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_FCOE, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + 0x0000, + NPC_IP6_HOP_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_TCP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_UDP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_SCTP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_ICMP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_ICMP6 << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_GRE << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_IP6 << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_MPLS << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_HOP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_DEST << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_ROUT << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_FRAG << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_ESP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_AH << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_MOBILITY << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_HOSTID << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_SHIM6 << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + 0x0000, + 0x0000, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_MPLS, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_MPLS, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_MPLS, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_MPLS, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_MPLS, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_MPLS_PL, 0xff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_MPLS_PL, 0xff, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_MPLS_PL, 0xff, + 0x0000, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_MPLS_PL, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_NA, 0X00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, +}; + +static const struct npc_kpu_profile_cam kpu6_cam_entries[] = { + { + NPC_S_KPU6_IP6_EXT, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_FRAG, 0xff, + NPC_IPNH_TCP << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_FRAG, 0xff, + NPC_IPNH_UDP << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_FRAG, 0xff, + NPC_IPNH_SCTP << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_FRAG, 0xff, + NPC_IPNH_ICMP << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_FRAG, 0xff, + NPC_IPNH_ICMP6 << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_FRAG, 0xff, + NPC_IPNH_ESP << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_FRAG, 0xff, + NPC_IPNH_AH << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_FRAG, 0xff, + NPC_IPNH_GRE << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_FRAG, 0xff, + NPC_IPNH_IP6 << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_FRAG, 0xff, + NPC_IPNH_MPLS << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_FRAG, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_HOP_DEST, 0xff, + NPC_IPNH_TCP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_HOP_DEST, 0xff, + NPC_IPNH_UDP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_HOP_DEST, 0xff, + NPC_IPNH_SCTP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_HOP_DEST, 0xff, + NPC_IPNH_ICMP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_HOP_DEST, 0xff, + NPC_IPNH_ICMP6 << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_HOP_DEST, 0xff, + NPC_IPNH_ESP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_HOP_DEST, 0xff, + NPC_IPNH_AH << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_HOP_DEST, 0xff, + NPC_IPNH_GRE << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_HOP_DEST, 0xff, + NPC_IPNH_IP6 << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_HOP_DEST, 0xff, + NPC_IPNH_MPLS << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_HOP_DEST, 0xff, + NPC_IPNH_ROUT << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_HOP_DEST, 0xff, + NPC_IPNH_FRAG << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_HOP_DEST, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_ROUT, 0xff, + NPC_IPNH_TCP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_ROUT, 0xff, + NPC_IPNH_UDP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_ROUT, 0xff, + NPC_IPNH_SCTP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_ROUT, 0xff, + NPC_IPNH_ICMP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_ROUT, 0xff, + NPC_IPNH_ICMP6 << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_ROUT, 0xff, + NPC_IPNH_ESP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_ROUT, 0xff, + NPC_IPNH_AH << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_ROUT, 0xff, + NPC_IPNH_GRE << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_ROUT, 0xff, + NPC_IPNH_IP6 << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_ROUT, 0xff, + NPC_IPNH_MPLS << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_ROUT, 0xff, + NPC_IPNH_FRAG << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_ROUT, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_NA, 0X00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, +}; + +static const struct npc_kpu_profile_cam kpu7_cam_entries[] = { + { + NPC_S_KPU7_IP6_EXT, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_ROUT, 0xff, + NPC_IPNH_TCP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_ROUT, 0xff, + NPC_IPNH_UDP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_ROUT, 0xff, + NPC_IPNH_SCTP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_ROUT, 0xff, + NPC_IPNH_ICMP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_ROUT, 0xff, + NPC_IPNH_ICMP6 << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_ROUT, 0xff, + NPC_IPNH_ESP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_ROUT, 0xff, + NPC_IPNH_AH << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_ROUT, 0xff, + NPC_IPNH_GRE << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_ROUT, 0xff, + NPC_IPNH_IP6 << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_ROUT, 0xff, + NPC_IPNH_MPLS << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_ROUT, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_FRAG, 0xff, + NPC_IPNH_TCP << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_FRAG, 0xff, + NPC_IPNH_UDP << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_FRAG, 0xff, + NPC_IPNH_SCTP << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_FRAG, 0xff, + NPC_IPNH_ICMP << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_FRAG, 0xff, + NPC_IPNH_ICMP6 << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_FRAG, 0xff, + NPC_IPNH_ESP << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_FRAG, 0xff, + NPC_IPNH_AH << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_FRAG, 0xff, + NPC_IPNH_GRE << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_FRAG, 0xff, + NPC_IPNH_IP6 << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_FRAG, 0xff, + NPC_IPNH_MPLS << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_FRAG, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_NA, 0X00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, +}; + +static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { + { + NPC_S_KPU8_TCP, 0xff, + 0x0000, + 0x0000, + NPC_TCP_FLAGS_FIN, + NPC_TCP_FLAGS_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, + 0x0000, + 0x0000, + 0x0000, + NPC_TCP_FLAGS_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, + 0x0000, + 0x0000, + NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN, + NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, + 0x0000, + 0x0000, + NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN, + NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, + 0x0000, + 0x0000, + NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN, + NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, + 0x0000, + 0x0000, + NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN, + NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, + NPC_TCP_PORT_HTTP, + 0xffff, + NPC_TCP_DATA_OFFSET_5, + NPC_TCP_DATA_OFFSET_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, + NPC_TCP_PORT_HTTPS, + 0xffff, + NPC_TCP_DATA_OFFSET_5, + NPC_TCP_DATA_OFFSET_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, + NPC_TCP_PORT_PPTP, + 0xffff, + NPC_TCP_DATA_OFFSET_5, + NPC_TCP_DATA_OFFSET_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, + 0x0000, + 0x0000, + NPC_TCP_DATA_OFFSET_5, + NPC_TCP_DATA_OFFSET_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, + NPC_TCP_PORT_HTTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, + NPC_TCP_PORT_HTTPS, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, + NPC_TCP_PORT_PPTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_UDP, 0xff, + NPC_UDP_PORT_VXLAN, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_UDP, 0xff, + NPC_UDP_PORT_VXLANGPE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_UDP, 0xff, + NPC_UDP_PORT_GENEVE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_UDP, 0xff, + NPC_UDP_PORT_GTPC, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_UDP, 0xff, + NPC_UDP_PORT_GTPU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_UDP, 0xff, + NPC_UDP_PORT_PTP_E, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_UDP, 0xff, + NPC_UDP_PORT_PTP_G, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_UDP, 0xff, + NPC_UDP_PORT_MPLS, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_UDP, 0xff, + NPC_UDP_PORT_ESP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_UDP, 0xff, + 0x0000, + 0x0000, + NPC_UDP_PORT_ESP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_UDP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_SCTP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_ICMP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_IGMP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_ICMP6, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_AH, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_TRANS_ETH_BR, + 0xffff, + NPC_GRE_F_KEY, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_TRANS_ETH_BR, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + NPC_GRE_F_CSUM, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + NPC_GRE_F_KEY, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_KEY, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + NPC_GRE_F_CSUM, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + NPC_GRE_F_KEY, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_KEY, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_NSH, + 0xffff, + NPC_GRE_F_CSUM, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_NSH, + 0xffff, + NPC_GRE_F_KEY, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_NSH, + 0xffff, + NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_NSH, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_KEY, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_NSH, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_NSH, + 0xffff, + NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_NSH, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP, + 0xffff, + NPC_GRE_F_CSUM, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP, + 0xffff, + NPC_GRE_F_KEY, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP, + 0xffff, + NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_KEY, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP, + 0xffff, + NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP6, + 0xffff, + NPC_GRE_F_CSUM, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP6, + 0xffff, + NPC_GRE_F_KEY, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP6, + 0xffff, + NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP6, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_KEY, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP6, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP6, + 0xffff, + NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP6, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + 0x0000, + 0xffff, + NPC_GRE_F_ROUTE, + 0x4fff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + 0x0000, + 0xffff, + 0x0000, + 0x4fff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + 0x0000, + 0xffff, + 0x0000, + 0x0003, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_PPP, + 0xffff, + NPC_GRE_F_KEY|NPC_GRE_VER_1, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_PPP, + 0xffff, + NPC_GRE_F_KEY|NPC_GRE_F_SEQ|NPC_GRE_VER_1, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_PPP, + 0xffff, + NPC_GRE_F_KEY|NPC_GRE_F_ACK|NPC_GRE_VER_1, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_PPP, + 0xffff, + NPC_GRE_F_KEY|NPC_GRE_F_SEQ|NPC_GRE_F_ACK|NPC_GRE_VER_1, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + 0x0000, + 0xffff, + 0x2001, + 0xef7f, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + 0x0000, + 0xffff, + 0x0001, + 0x0003, + 0x0000, + 0x0000, + }, + { + NPC_S_NA, 0X00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, +}; + +static const struct npc_kpu_profile_cam kpu9_cam_entries[] = { + { + NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff, + 0x0000, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_MPLS_S, + }, + { + NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + }, + { + NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff, + 0x0000, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_MPLS_S, + }, + { + NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + }, + { + NPC_S_KPU9_TU_MPLS_IN_IP, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_TU_MPLS_IN_IP, 0xff, + 0x0000, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_TU_MPLS_IN_IP, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_MPLS_S, + }, + { + NPC_S_KPU9_TU_MPLS_IN_IP, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + }, + { + NPC_S_KPU9_TU_NSH_IN_GRE, 0xff, + NPC_NSH_NP_IP, + NPC_NSH_NP_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_TU_NSH_IN_GRE, 0xff, + NPC_NSH_NP_IP6, + NPC_NSH_NP_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_TU_NSH_IN_GRE, 0xff, + NPC_NSH_NP_ETH, + NPC_NSH_NP_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_TU_NSH_IN_GRE, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_VXLAN, 0xff, + 0x0000, + 0x0000, + NPC_VXLAN_I, + NPC_VXLAN_I, + 0x0000, + 0xffff, + }, + { + NPC_S_KPU9_VXLAN, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0xffff, + 0x0000, + 0xffff, + }, + { + NPC_S_KPU9_VXLAN, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_VXLANGPE, 0xff, + 0x0000, + 0x0000, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_IP, + NPC_VXLANGPE_NP_MASK, + }, + { + NPC_S_KPU9_VXLANGPE, 0xff, + 0x0000, + 0x0000, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_IP6, + NPC_VXLANGPE_NP_MASK, + }, + { + NPC_S_KPU9_VXLANGPE, 0xff, + 0x0000, + 0x0000, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_ETH, + NPC_VXLANGPE_NP_MASK, + }, + { + NPC_S_KPU9_VXLANGPE, 0xff, + 0x0000, + 0x0000, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_NSH, + NPC_VXLANGPE_NP_MASK, + }, + { + NPC_S_KPU9_VXLANGPE, 0xff, + 0x0000, + 0x0000, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_MPLS, + NPC_VXLANGPE_NP_MASK, + }, + { + NPC_S_KPU9_VXLANGPE, 0xff, + 0x0000, + 0x0000, + NPC_VXLANGPE_P, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_IP, + NPC_VXLANGPE_NP_MASK, + }, + { + NPC_S_KPU9_VXLANGPE, 0xff, + 0x0000, + 0x0000, + NPC_VXLANGPE_P, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_IP6, + NPC_VXLANGPE_NP_MASK, + }, + { + NPC_S_KPU9_VXLANGPE, 0xff, + 0x0000, + 0x0000, + NPC_VXLANGPE_P, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_ETH, + NPC_VXLANGPE_NP_MASK, + }, + { + NPC_S_KPU9_VXLANGPE, 0xff, + 0x0000, + 0x0000, + NPC_VXLANGPE_P, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_NSH, + NPC_VXLANGPE_NP_MASK, + }, + { + NPC_S_KPU9_VXLANGPE, 0xff, + 0x0000, + 0x0000, + NPC_VXLANGPE_P, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_MPLS, + NPC_VXLANGPE_NP_MASK, + }, + { + NPC_S_KPU9_VXLANGPE, 0xff, + 0x0000, + 0x0000, + NPC_VXLANGPE_P, + NPC_VXLANGPE_P, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_VXLANGPE, 0xff, + 0x0000, + 0x0000, + 0x0000, + NPC_VXLANGPE_P, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_GENEVE, 0xff, + 0x0000, + 0x0000, + 0x0000, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_TRANS_ETH_BR, + 0xffff, + }, + { + NPC_S_KPU9_GENEVE, 0xff, + 0x0000, + 0x0000, + NPC_GENEVE_F_OAM, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_TRANS_ETH_BR, + 0xffff, + }, + { + NPC_S_KPU9_GENEVE, 0xff, + 0x0000, + 0x0000, + NPC_GENEVE_F_CRI_OPT, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_TRANS_ETH_BR, + 0xffff, + }, + { + NPC_S_KPU9_GENEVE, 0xff, + 0x0000, + 0x0000, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_TRANS_ETH_BR, + 0xffff, + }, + { + NPC_S_KPU9_GENEVE, 0xff, + 0x0000, + 0x0000, + 0x0000, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_IP, + 0xffff, + }, + { + NPC_S_KPU9_GENEVE, 0xff, + 0x0000, + 0x0000, + NPC_GENEVE_F_OAM, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_IP, + 0xffff, + }, + { + NPC_S_KPU9_GENEVE, 0xff, + 0x0000, + 0x0000, + NPC_GENEVE_F_CRI_OPT, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_IP, + 0xffff, + }, + { + NPC_S_KPU9_GENEVE, 0xff, + 0x0000, + 0x0000, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_IP, + 0xffff, + }, + { + NPC_S_KPU9_GENEVE, 0xff, + 0x0000, + 0x0000, + 0x0000, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_IP6, + 0xffff, + }, + { + NPC_S_KPU9_GENEVE, 0xff, + 0x0000, + 0x0000, + NPC_GENEVE_F_OAM, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_IP6, + 0xffff, + }, + { + NPC_S_KPU9_GENEVE, 0xff, + 0x0000, + 0x0000, + NPC_GENEVE_F_CRI_OPT, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_IP6, + 0xffff, + }, + { + NPC_S_KPU9_GENEVE, 0xff, + 0x0000, + 0x0000, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_IP6, + 0xffff, + }, + { + NPC_S_KPU9_GTPC, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_GTPU, 0xff, + 0x0000, + 0x0000, + NPC_GTP_PT_GTP | NPC_GTP_VER1 | NPC_GTP_MT_G_PDU, + NPC_GTP_PT_MASK | NPC_GTP_VER_MASK | NPC_GTP_MT_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_GTPU, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff, + 0x0000, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_MPLS_S, + }, + { + NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + }, + { + NPC_S_KPU9_ESP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_NA, 0X00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, +}; + +static const struct npc_kpu_profile_cam kpu10_cam_entries[] = { + { + NPC_S_KPU10_TU_MPLS, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS_PL, 0xff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS_PL, 0xff, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS_PL, 0xff, + 0x0000, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS_PL, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff, + 0x0000, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_MPLS_S, + }, + { + NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + }, + { + NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff, + NPC_NSH_NP_IP, + NPC_NSH_NP_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff, + NPC_NSH_NP_IP6, + NPC_NSH_NP_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff, + NPC_NSH_NP_ETH, + NPC_NSH_NP_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_NA, 0X00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, +}; + +static const struct npc_kpu_profile_cam kpu11_cam_entries[] = { + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_PPP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_MPLS, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_MPLS, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_MPLS, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_MPLS, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_MPLS_PL, 0xff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_MPLS_PL, 0xff, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_MPLS_PL, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER_IN_NSH, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_NA, 0X00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, +}; + +static const struct npc_kpu_profile_cam kpu12_cam_entries[] = { + { + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_TCP, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_UDP, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_SCTP, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_ICMP, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_IGMP, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_ESP, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_AH, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + 0x0000, + 0x0000, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_TCP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_UDP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_SCTP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_ICMP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_IGMP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_ESP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_AH, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + 0x0000, + 0x0000, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_ARP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP6, 0xff, + NPC_IPNH_TCP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP6, 0xff, + NPC_IPNH_UDP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP6, 0xff, + NPC_IPNH_SCTP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP6, 0xff, + NPC_IPNH_ICMP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP6, 0xff, + NPC_IPNH_ICMP6 << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP6, 0xff, + NPC_IPNH_ESP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP6, 0xff, + NPC_IPNH_AH << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP6, 0xff, + 0x0000, + 0x0000, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP6, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_NA, 0X00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, +}; + +static const struct npc_kpu_profile_cam kpu13_cam_entries[] = { + { + NPC_S_KPU13_TU_IP6_EXT, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, +}; + +static const struct npc_kpu_profile_cam kpu14_cam_entries[] = { + { + NPC_S_KPU14_TU_IP6_EXT, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, +}; + +static const struct npc_kpu_profile_cam kpu15_cam_entries[] = { + { + NPC_S_KPU15_TU_TCP, 0xff, + 0x0000, + 0x0000, + NPC_TCP_FLAGS_FIN, + NPC_TCP_FLAGS_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, + 0x0000, + 0x0000, + 0x0000, + NPC_TCP_FLAGS_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, + 0x0000, + 0x0000, + NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN, + NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, + 0x0000, + 0x0000, + NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN, + NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, + 0x0000, + 0x0000, + NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN, + NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, + 0x0000, + 0x0000, + NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN, + NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, + NPC_TCP_PORT_HTTP, + 0xffff, + NPC_TCP_DATA_OFFSET_5, + NPC_TCP_DATA_OFFSET_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, + NPC_TCP_PORT_HTTPS, + 0xffff, + NPC_TCP_DATA_OFFSET_5, + NPC_TCP_DATA_OFFSET_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, + NPC_TCP_PORT_PPTP, + 0xffff, + NPC_TCP_DATA_OFFSET_5, + NPC_TCP_DATA_OFFSET_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, + 0x0000, + 0x0000, + NPC_TCP_DATA_OFFSET_5, + NPC_TCP_DATA_OFFSET_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, + NPC_TCP_PORT_HTTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, + NPC_TCP_PORT_HTTPS, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, + NPC_TCP_PORT_PPTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_UDP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_SCTP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_ICMP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_IGMP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_ICMP6, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_ESP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_AH, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_NA, 0X00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, +}; + +static const struct npc_kpu_profile_cam kpu16_cam_entries[] = { + { + NPC_S_KPU16_TCP_DATA, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU16_HTTP_DATA, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU16_HTTPS_DATA, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU16_PPTP_DATA, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU16_UDP_DATA, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU16_UDP_PTP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, +}; + +static const struct npc_kpu_profile_action kpu1_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU5_IP, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU5_IP6, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_ARP, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_RARP, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_PTP, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_FCOE, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 12, 0, 0, 0, + NPC_S_KPU2_CTAG2, 12, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU2_CTAG, 12, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 22, 0, 0, + NPC_S_KPU2_SBTAG, 12, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU2_QINQ, 12, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 12, 26, 0, 0, + NPC_S_KPU2_ETAG, 12, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 18, 22, 26, 0, 0, + NPC_S_KPU2_ITAG, 12, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ITAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU4_MPLS, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + NPC_F_LA_L_WITH_MPLS, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU4_MPLS, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + NPC_F_LA_L_WITH_MPLS, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU4_NSH, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + NPC_F_LA_L_WITH_NSH, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 12, 0, 1, 0, + NPC_S_KPU3_DSA, 12, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LA, NPC_LT_LA_8023, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LA, NPC_LT_LA_8023, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + NPC_F_LA_L_UNK_ETYPE, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU5_IP, 22, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU5_IP6, 22, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_ARP, 22, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_RARP, 22, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_PTP, 22, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_FCOE, 22, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 12, 0, 0, 0, + NPC_S_KPU2_CTAG2, 20, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG + | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU2_CTAG, 20, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG + | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 22, 0, 0, + NPC_S_KPU2_SBTAG, 20, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG + | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU2_QINQ, 20, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG + | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 12, 26, 0, 0, + NPC_S_KPU2_ETAG, 20, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG + | NPC_F_LA_L_WITH_ETAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 18, 22, 26, 0, 0, + NPC_S_KPU2_ITAG, 20, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG + | NPC_F_LA_L_WITH_ITAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU4_MPLS, 22, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_WITH_MPLS, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU4_MPLS, 22, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_WITH_MPLS, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU4_NSH, 22, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_WITH_NSH, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_UNK_ETYPE, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 14, 16, 0, 0, + NPC_S_KPU2_PREHEADER, 8, 1, + NPC_LID_LA, NPC_LT_LA_IH_8_ETHER, + 0, + 1, 0xff, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 14, 16, 0, 0, + NPC_S_KPU2_PREHEADER, 4, 1, + NPC_LID_LA, NPC_LT_LA_IH_4_ETHER, + 0, + 1, 0xff, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 14, 16, 0, 0, + NPC_S_KPU2_PREHEADER, 2, 1, + NPC_LID_LA, NPC_LT_LA_IH_2_ETHER, + 0, + 1, 0xff, 0, 0, + }, + { + NPC_ERRLEV_LA, NPC_EC_IH_LENGTH, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + NPC_F_LA_L_UNK_ETYPE, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 16, 0, 0, + NPC_S_KPU2_EXDSA, 12, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 16, 2, 0, + NPC_S_KPU4_FDSA, 12, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LA, NPC_EC_EDSA_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU5_IP, 30, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU5_IP6, 30, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_ARP, 30, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_RARP, 30, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_PTP, 30, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_FCOE, 30, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 12, 0, 0, 0, + NPC_S_KPU2_CTAG2, 28, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG + | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU2_CTAG, 28, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG + | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 22, 0, 0, + NPC_S_KPU2_SBTAG, 28, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG + | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU2_QINQ, 28, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG + | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 12, 26, 0, 0, + NPC_S_KPU2_ETAG, 28, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG + | NPC_F_LA_L_WITH_ETAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 18, 22, 26, 0, 0, + NPC_S_KPU2_ITAG, 28, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG + | NPC_F_LA_L_WITH_ITAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU4_MPLS, 30, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_WITH_MPLS, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU4_MPLS, 30, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_WITH_MPLS, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU4_NSH, 30, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_WITH_NSH, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_UNK_ETYPE, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU5_IP, 38, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU5_IP6, 38, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_ARP, 38, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_RARP, 38, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_PTP, 38, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_FCOE, 38, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 12, 0, 0, 0, + NPC_S_KPU2_CTAG2, 36, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2 + | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU2_CTAG, 36, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2 + | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 22, 0, 0, + NPC_S_KPU2_SBTAG, 36, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2 + | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU2_QINQ, 36, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2 + | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 12, 26, 0, 0, + NPC_S_KPU2_ETAG, 36, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2 + | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 18, 22, 26, 0, 0, + NPC_S_KPU2_ITAG, 36, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2 + | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ITAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU4_MPLS, 38, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2 + | NPC_F_LA_L_WITH_MPLS, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU4_MPLS, 38, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2 + | NPC_F_LA_L_WITH_MPLS, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU4_NSH, 38, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2 + | NPC_F_LA_L_WITH_NSH, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2 + | NPC_F_LA_L_UNK_ETYPE, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LA, NPC_EC_L2_K1, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, +}; + +static const struct npc_kpu_profile_action kpu2_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 6, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 6, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 6, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_RARP, 6, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_PTP, 6, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_FCOE, 6, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 6, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 6, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 1, 0, + NPC_S_KPU4_NSH, 6, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + NPC_F_LB_U_UNK_ETYPE, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_RARP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_PTP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_FCOE, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 1, 0, + NPC_S_KPU4_NSH, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG_UNK, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 0, 0, 0, + NPC_S_KPU3_CTAG, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 0, 0, 0, + NPC_S_KPU3_STAG, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_STAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 24, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 24, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 24, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_RARP, 24, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_PTP, 24, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_FCOE, 24, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 24, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 24, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 1, 0, + NPC_S_KPU4_NSH, 24, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU3_STAG, 24, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_STAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU3_CTAG, 24, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_UNK, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_UNK_ETYPE, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_RARP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_PTP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_FCOE, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 1, 0, + NPC_S_KPU4_NSH, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG_UNK, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 0, 0, 0, + NPC_S_KPU3_CTAG, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_QINQ_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 0, 0, 0, + NPC_S_KPU3_QINQ, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_QINQ_QINQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_UNK_ETYPE, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 10, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 10, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 10, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_RARP, 10, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_PTP, 10, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_FCOE, 10, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 10, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + 1, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 10, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + 2, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_NSH, 10, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + 2, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU3_CTAG, 10, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 16, 20, 24, 0, 0, + NPC_S_KPU3_ITAG, 14, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_BTAG_ITAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 0, 0, 0, + NPC_S_KPU3_STAG, 10, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 0, 0, 0, + NPC_S_KPU3_QINQ, 10, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_QINQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 28, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 28, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 28, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU3_STAG, 28, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_STAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU3_CTAG, 28, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_UNK, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + NPC_F_LB_U_UNK_ETYPE, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 20, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 20, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 20, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_RARP, 20, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 28, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 28, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 28, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 24, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 24, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 24, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 24, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 24, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 24, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_RARP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_PTP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_FCOE, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 1, 0, + NPC_S_KPU4_NSH, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 0, 0, 0, + NPC_S_KPU3_QINQ, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_QINQ_QINQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_UNK_ETYPE, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 14, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 14, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 14, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_RARP, 14, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_PTP, 14, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_FCOE, 14, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 0, 0, 0, + NPC_S_KPU3_CTAG_C, 14, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 20, 0, 0, + NPC_S_KPU3_STAG_C, 14, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 0, 0, 0, + NPC_S_KPU3_QINQ_C, 14, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 14, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 14, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 1, 0, + NPC_S_KPU4_NSH, 14, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 18, 1, + NPC_LID_LB, NPC_LT_LB_EDSA, + NPC_F_LB_L_EDSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 18, 1, + NPC_LID_LB, NPC_LT_LB_EDSA, + NPC_F_LB_L_EDSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 18, 1, + NPC_LID_LB, NPC_LT_LB_EDSA, + NPC_F_LB_L_EDSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_RARP, 18, 1, + NPC_LID_LB, NPC_LT_LB_EDSA, + NPC_F_LB_L_EDSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_PTP, 18, 1, + NPC_LID_LB, NPC_LT_LB_EDSA, + NPC_F_LB_L_EDSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_FCOE, 18, 1, + NPC_LID_LB, NPC_LT_LB_EDSA, + NPC_F_LB_L_EDSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU3_CTAG, 16, 1, + NPC_LID_LB, NPC_LT_LB_EDSA_VLAN, + NPC_F_LB_L_EDSA_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_EDSA, + NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_EDSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 10, 1, + NPC_LID_LB, NPC_LT_LB_EXDSA, + NPC_F_LB_L_EXDSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 10, 1, + NPC_LID_LB, NPC_LT_LB_EXDSA, + NPC_F_LB_L_EXDSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 10, 1, + NPC_LID_LB, NPC_LT_LB_EXDSA, + NPC_F_LB_L_EXDSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_RARP, 10, 1, + NPC_LID_LB, NPC_LT_LB_EXDSA, + NPC_F_LB_L_EXDSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_PTP, 10, 1, + NPC_LID_LB, NPC_LT_LB_EXDSA, + NPC_F_LB_L_EXDSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_FCOE, 10, 1, + NPC_LID_LB, NPC_LT_LB_EXDSA, + NPC_F_LB_L_EXDSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU3_CTAG, 8, 1, + NPC_LID_LB, NPC_LT_LB_EXDSA_VLAN, + NPC_F_LB_L_EXDSA_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_EXDSA, + NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_EXDSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, +}; + +static const struct npc_kpu_profile_action kpu3_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_RARP, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_PTP, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_FCOE, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU4_NSH, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_RARP, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_PTP, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_FCOE, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU4_NSH, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_RARP, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU4_NSH, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_RARP, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_PTP, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_FCOE, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU4_NSH, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_RARP, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_PTP, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_FCOE, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU4_NSH, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 18, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 18, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 18, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_RARP, 18, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 26, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 26, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 26, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 22, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 22, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 22, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 22, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 22, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 22, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 4, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 4, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 4, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_RARP, 4, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_PTP, 4, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_FCOE, 4, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 4, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 4, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU4_NSH, 4, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_RARP, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_PTP, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_FCOE, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU4_NSH, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_RARP, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU4_NSH, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_RARP, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_PTP, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_FCOE, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU4_NSH, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_RARP, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_PTP, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_FCOE, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU4_NSH, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 10, 1, + NPC_LID_LB, NPC_LT_LB_DSA, + NPC_F_LB_L_DSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 10, 1, + NPC_LID_LB, NPC_LT_LB_DSA, + NPC_F_LB_L_DSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 10, 1, + NPC_LID_LB, NPC_LT_LB_DSA, + NPC_F_LB_L_DSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_RARP, 10, 1, + NPC_LID_LB, NPC_LT_LB_DSA, + NPC_F_LB_L_DSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_PTP, 10, 1, + NPC_LID_LB, NPC_LT_LB_DSA, + NPC_F_LB_L_DSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_FCOE, 10, 1, + NPC_LID_LB, NPC_LT_LB_DSA, + NPC_F_LB_L_DSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 14, 1, + NPC_LID_LB, NPC_LT_LB_DSA_VLAN, + NPC_F_LB_L_DSA_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 14, 1, + NPC_LID_LB, NPC_LT_LB_DSA_VLAN, + NPC_F_LB_L_DSA_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 14, 1, + NPC_LID_LB, NPC_LT_LB_DSA_VLAN, + NPC_F_LB_L_DSA_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_RARP, 14, 1, + NPC_LID_LB, NPC_LT_LB_DSA_VLAN, + NPC_F_LB_L_DSA_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_PTP, 14, 1, + NPC_LID_LB, NPC_LT_LB_DSA_VLAN, + NPC_F_LB_L_DSA_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_FCOE, 14, 1, + NPC_LID_LB, NPC_LT_LB_DSA_VLAN, + NPC_F_LB_L_DSA_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_DSA_VLAN, + NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_DSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_DSA, + NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_DSA_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, +}; + +static const struct npc_kpu_profile_action kpu4_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU5_MPLS_PL, 4, 1, + NPC_LID_LC, NPC_LT_LC_MPLS, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU5_MPLS_PL, 8, 1, + NPC_LID_LC, NPC_LT_LC_MPLS, + NPC_F_LC_L_MPLS_2_LABELS, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU5_MPLS_PL, 12, 1, + NPC_LID_LC, NPC_LT_LC_MPLS, + NPC_F_LC_L_MPLS_3_LABELS, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 4, 0, 0, 0, + NPC_S_KPU5_MPLS, 12, 1, + NPC_LID_LC, NPC_LT_LC_MPLS, + NPC_F_LC_L_MPLS_4_LABELS, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 7, 0, + NPC_S_KPU12_TU_IP, 0, 1, + NPC_LID_LC, NPC_LT_LC_NSH, + 0, + 1, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 7, 0, + NPC_S_KPU12_TU_IP6, 0, 1, + NPC_LID_LC, NPC_LT_LC_NSH, + 0, + 1, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 6, 0, + NPC_S_KPU11_TU_ETHER, 0, 1, + NPC_LID_LC, NPC_LT_LC_NSH, + 0, + 1, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 4, 0, + NPC_S_KPU9_TU_MPLS_IN_NSH, 0, 1, + NPC_LID_LC, NPC_LT_LC_NSH, + 0, + 1, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_LC, NPC_EC_NSH_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_NSH, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 0, 0, + NPC_S_KPU5_IP, 6, 1, + NPC_LID_LB, NPC_LT_LB_FDSA, + NPC_F_LB_L_FDSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 0, 0, + NPC_S_KPU5_IP6, 6, 1, + NPC_LID_LB, NPC_LT_LB_FDSA, + NPC_F_LB_L_FDSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU5_ARP, 6, 1, + NPC_LID_LB, NPC_LT_LB_FDSA, + NPC_F_LB_L_FDSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 0, 0, + NPC_S_KPU5_RARP, 6, 1, + NPC_LID_LB, NPC_LT_LB_FDSA, + NPC_F_LB_L_FDSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 0, 0, + NPC_S_KPU5_PTP, 6, 1, + NPC_LID_LB, NPC_LT_LB_FDSA, + NPC_F_LB_L_FDSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU5_FCOE, 6, 1, + NPC_LID_LB, NPC_LT_LB_FDSA, + NPC_F_LB_L_FDSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_FDSA, + NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_FDSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K4, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, +}; + +static const struct npc_kpu_profile_action kpu5_action_entries[] = { + { + NPC_ERRLEV_LC, NPC_EC_IP_TTL_0, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LC, NPC_EC_IP_FRAG_OFFSET_1, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP, + NPC_F_LC_U_IP_FRAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 12, 0, 2, 0, + NPC_S_KPU8_TCP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU8_UDP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_SCTP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_ICMP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_IGMP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU9_ESP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_AH, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU8_GRE, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 6, 0, + NPC_S_KPU12_TU_IP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + NPC_F_LC_L_IP_IN_IP, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 6, 0, + NPC_S_KPU12_TU_IP6, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + NPC_F_LC_L_6TO4, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 3, 0, + NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + NPC_F_LC_L_MPLS_IN_IP, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP, + NPC_F_LC_U_UNK_PROTO, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP, + NPC_F_LC_U_IP_FRAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 12, 0, 2, 0, + NPC_S_KPU8_TCP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + 0, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 8, 10, 2, 0, + NPC_S_KPU8_UDP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + 0, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_SCTP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + 0, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_ICMP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + 0, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_IGMP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + 0, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU9_ESP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + 0, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_AH, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + 0, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU8_GRE, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + 0, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 6, 0, + NPC_S_KPU12_TU_IP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + NPC_F_LC_L_IP_IN_IP, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 6, 0, + NPC_S_KPU12_TU_IP6, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + NPC_F_LC_L_6TO4, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 3, 0, + NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + NPC_F_LC_L_MPLS_IN_IP, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + NPC_F_LC_U_UNK_PROTO, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + NPC_F_LC_U_IP_FRAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LC, NPC_EC_IP_VER, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_ARP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_RARP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_PTP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_FCOE, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LC, NPC_EC_IP6_HOP_0, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 12, 0, 2, 0, + NPC_S_KPU8_TCP, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU8_UDP, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_SCTP, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_ICMP, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_ICMP6, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_GRE, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 6, 0, + NPC_S_KPU12_TU_IP6, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + NPC_F_LC_L_IP6_TUN_IP6, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 3, 0, + NPC_S_KPU9_TU_MPLS_IN_IP, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + NPC_F_LC_L_IP6_MPLS_IN_IP, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU6_IP6_HOP_DEST, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + NPC_F_LC_L_EXT_HOP, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU6_IP6_HOP_DEST, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + NPC_F_LC_L_EXT_DEST, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU6_IP6_ROUT, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + NPC_F_LC_L_EXT_ROUT, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 2, 0, 0, 0, + NPC_S_KPU6_IP6_FRAG, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + NPC_F_LC_U_IP6_FRAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU9_ESP, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_AH, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + NPC_F_LC_L_EXT_MOBILITY, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + NPC_F_LC_L_EXT_HOSTID, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + NPC_F_LC_L_EXT_SHIM6, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + NPC_F_LC_U_UNK_PROTO, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LC, NPC_EC_IP6_VER, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 6, 0, + NPC_S_KPU12_TU_IP, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 6, 0, + NPC_S_KPU12_TU_IP6, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 5, 0, + NPC_S_KPU11_TU_ETHER, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 5, 0, + NPC_S_KPU11_TU_ETHER, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_MPLS_2MANY, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 6, 0, + NPC_S_KPU12_TU_IP, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 6, 0, + NPC_S_KPU12_TU_IP6, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 5, 0, + NPC_S_KPU11_TU_ETHER, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 5, 0, + NPC_S_KPU11_TU_ETHER, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LC, NPC_EC_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, +}; + +static const struct npc_kpu_profile_action kpu6_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 12, 0, 1, 0, + NPC_S_KPU8_TCP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 8, 10, 1, 0, + NPC_S_KPU8_UDP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_SCTP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_ICMP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_ICMP6, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU9_ESP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_AH, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_GRE, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 5, 0, + NPC_S_KPU12_TU_IP6, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU7_IP6_ROUT, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 2, 0, 0, 0, + NPC_S_KPU7_IP6_FRAG, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 12, 0, 1, 0, + NPC_S_KPU8_TCP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 8, 10, 1, 0, + NPC_S_KPU8_UDP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_SCTP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_ICMP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_ICMP6, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU9_ESP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_AH, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_GRE, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 5, 0, + NPC_S_KPU12_TU_IP6, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 2, 0, 0, 0, + NPC_S_KPU7_IP6_FRAG, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LC, NPC_EC_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, +}; + +static const struct npc_kpu_profile_action kpu7_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 12, 0, 0, 0, + NPC_S_KPU8_TCP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 8, 10, 0, 0, + NPC_S_KPU8_UDP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU8_SCTP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU8_ICMP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU8_ICMP6, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU9_ESP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU8_AH, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU8_GRE, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 4, 0, + NPC_S_KPU12_TU_IP6, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LC, NPC_EC_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, +}; + +static const struct npc_kpu_profile_action kpu8_action_entries[] = { + { + NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_FIN_ONLY, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_ZERO, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_RST_FIN, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_URG_SYN, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_RST_SYN, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_SYN_FIN, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 7, 0, + NPC_S_KPU16_HTTP_DATA, 20, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 7, 0, + NPC_S_KPU16_HTTPS_DATA, 20, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 7, 0, + NPC_S_KPU16_PPTP_DATA, 20, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 7, 0, + NPC_S_KPU16_TCP_DATA, 20, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + NPC_F_LD_L_TCP_UNK_PORT, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 7, 0, + NPC_S_KPU16_HTTP_DATA, 0, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + NPC_F_LD_L_TCP_HAS_OPTIONS, + 12, 0xf0, 1, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 7, 0, + NPC_S_KPU16_HTTPS_DATA, 0, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + NPC_F_LD_L_TCP_HAS_OPTIONS, + 12, 0xf0, 1, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 7, 0, + NPC_S_KPU16_PPTP_DATA, 0, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + NPC_F_LD_L_TCP_HAS_OPTIONS, + 12, 0xf0, 1, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 7, 0, + NPC_S_KPU16_TCP_DATA, 0, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + NPC_F_LD_L_TCP_UNK_PORT_HAS_OPTIONS, + 12, 0xf0, 1, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 2, 0, 0, + NPC_S_KPU9_VXLAN, 8, 1, + NPC_LID_LD, NPC_LT_LD_UDP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 2, 0, 0, + NPC_S_KPU9_VXLANGPE, 8, 1, + NPC_LID_LD, NPC_LT_LD_UDP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 2, 0, 0, + NPC_S_KPU9_GENEVE, 8, 1, + NPC_LID_LD, NPC_LT_LD_UDP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 2, 0, 0, + NPC_S_KPU9_GTPC, 8, 1, + NPC_LID_LD, NPC_LT_LD_UDP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 2, 0, 0, + NPC_S_KPU9_GTPU, 8, 1, + NPC_LID_LD, NPC_LT_LD_UDP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_KPU16_UDP_PTP, 0, 1, + NPC_LID_LD, NPC_LT_LD_UDP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_KPU16_UDP_PTP, 0, 1, + NPC_LID_LD, NPC_LT_LD_UDP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_UDP, 8, 1, + NPC_LID_LD, NPC_LT_LD_UDP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU9_ESP, 8, 1, + NPC_LID_LD, NPC_LT_LD_UDP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU9_ESP, 8, 1, + NPC_LID_LD, NPC_LT_LD_UDP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 7, 0, + NPC_S_KPU16_UDP_DATA, 8, 1, + NPC_LID_LD, NPC_LT_LD_UDP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_SCTP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_ICMP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_IGMP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_ICMP6, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_AH, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 2, 0, + NPC_S_KPU11_TU_ETHER, 8, 1, + NPC_LID_LD, NPC_LT_LD_NVGRE, + NPC_F_LD_L_GRE_NVGRE, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LD, NPC_EC_NVGRE, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LD, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 4, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_KEY, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_SEQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_KEY, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_SEQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_KEY_SEQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 16, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 4, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_KEY, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_SEQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_KEY, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_SEQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_KEY_SEQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 16, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU9_TU_NSH_IN_GRE, 4, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU9_TU_NSH_IN_GRE, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU9_TU_NSH_IN_GRE, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_KEY, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU9_TU_NSH_IN_GRE, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_SEQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU9_TU_NSH_IN_GRE, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_KEY, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU9_TU_NSH_IN_GRE, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_SEQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU9_TU_NSH_IN_GRE, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_KEY_SEQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU9_TU_NSH_IN_GRE, 16, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU12_TU_IP, 4, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU12_TU_IP, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU12_TU_IP, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_KEY, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU12_TU_IP, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_SEQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU12_TU_IP, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_KEY, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU12_TU_IP, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_SEQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU12_TU_IP, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_KEY_SEQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU12_TU_IP, 16, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU12_TU_IP6, 4, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU12_TU_IP6, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU12_TU_IP6, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_KEY, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU12_TU_IP6, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_SEQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU12_TU_IP6, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_KEY, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU12_TU_IP6, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_SEQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU12_TU_IP6, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_KEY_SEQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU12_TU_IP6, 16, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_ROUTE, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_UNK_PROTO, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LD, NPC_EC_GRE, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LD, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU11_TU_PPP, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_VER1, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU11_TU_PPP, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_VER1_HAS_SEQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU11_TU_PPP, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_VER1_HAS_ACK, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU11_TU_PPP, 16, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_VER1_HAS_SEQ_ACK, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_VER1_UNK_PROTO, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LD, NPC_EC_GRE_VER1, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LD, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LD, NPC_EC_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LD, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, +}; + +static const struct npc_kpu_profile_action kpu9_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU10_TU_MPLS_PL, 4, 1, + NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU10_TU_MPLS_PL, 8, 1, + NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU10_TU_MPLS_PL, 12, 1, + NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 4, 0, 0, 0, + NPC_S_KPU10_TU_MPLS, 12, 1, + NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU10_TU_MPLS_PL, 4, 1, + NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU10_TU_MPLS_PL, 8, 1, + NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH, + NPC_F_LD_L_MPLS_2_LABELS, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU10_TU_MPLS_PL, 12, 1, + NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH, + NPC_F_LD_L_MPLS_3_LABELS, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 4, 0, 0, 0, + NPC_S_KPU10_TU_MPLS, 12, 1, + NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH, + NPC_F_LD_L_MPLS_4_LABELS, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU10_TU_MPLS_PL, 4, 1, + NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU10_TU_MPLS_PL, 8, 1, + NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP, + NPC_F_LD_L_MPLS_2_LABELS, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU10_TU_MPLS_PL, 12, 1, + NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP, + NPC_F_LD_L_MPLS_3_LABELS, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 4, 0, 0, 0, + NPC_S_KPU10_TU_MPLS, 12, 1, + NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP, + NPC_F_LD_L_MPLS_4_LABELS, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU12_TU_IP, 0, 1, + NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE, + 0, + 1, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU12_TU_IP6, 0, 1, + NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE, + 0, + 1, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 1, 0, + NPC_S_KPU11_TU_ETHER, 0, 1, + NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE, + 0, + 1, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_LE, NPC_EC_NSH_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 1, 0, + NPC_S_KPU11_TU_ETHER, 8, 1, + NPC_LID_LE, NPC_LT_LE_VXLAN, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 1, 0, + NPC_S_KPU11_TU_ETHER, 8, 1, + NPC_LID_LE, NPC_LT_LE_VXLAN, + NPC_F_LE_L_VXLAN_NOVNI, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LE, NPC_EC_VXLAN, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LE, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU12_TU_IP, 8, 1, + NPC_LID_LE, NPC_LT_LE_VXLANGPE, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU12_TU_IP6, 8, 1, + NPC_LID_LE, NPC_LT_LE_VXLANGPE, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 1, 0, + NPC_S_KPU11_TU_ETHER, 8, 1, + NPC_LID_LE, NPC_LT_LE_VXLANGPE, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 8, 1, + NPC_LID_LE, NPC_LT_LE_VXLANGPE, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 8, 1, + NPC_LID_LE, NPC_LT_LE_VXLANGPE, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU12_TU_IP, 8, 1, + NPC_LID_LE, NPC_LT_LE_VXLANGPE, + NPC_F_LE_L_VXLANGPE_NOVNI, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU12_TU_IP6, 8, 1, + NPC_LID_LE, NPC_LT_LE_VXLANGPE, + NPC_F_LE_L_VXLANGPE_NOVNI, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 1, 0, + NPC_S_KPU11_TU_ETHER, 8, 1, + NPC_LID_LE, NPC_LT_LE_VXLANGPE, + NPC_F_LE_L_VXLANGPE_NOVNI, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 8, 1, + NPC_LID_LE, NPC_LT_LE_VXLANGPE, + NPC_F_LE_L_VXLANGPE_NOVNI, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 8, 1, + NPC_LID_LE, NPC_LT_LE_VXLANGPE, + NPC_F_LE_L_VXLANGPE_NOVNI, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LE, NPC_LT_LE_VXLANGPE, + NPC_F_LE_L_VXLANGPE_UNK, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LE, NPC_LT_LE_VXLANGPE, + NPC_F_LE_L_VXLANGPE_NONP, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 1, 0, + NPC_S_KPU11_TU_ETHER, 8, 1, + NPC_LID_LE, NPC_LT_LE_GENEVE, + 0, + 0, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 1, 0, + NPC_S_KPU11_TU_ETHER, 8, 1, + NPC_LID_LE, NPC_LT_LE_GENEVE, + NPC_F_LE_L_GENEVE_OAM, + 0, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 1, 0, + NPC_S_KPU11_TU_ETHER, 8, 1, + NPC_LID_LE, NPC_LT_LE_GENEVE, + NPC_F_LE_L_GENEVE_CRI_OPT, + 0, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 1, 0, + NPC_S_KPU11_TU_ETHER, 8, 1, + NPC_LID_LE, NPC_LT_LE_GENEVE, + NPC_F_LE_L_GENEVE_OAM_CRI_OPT, + 0, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU12_TU_IP, 8, 1, + NPC_LID_LE, NPC_LT_LE_GENEVE, + 0, + 0, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU12_TU_IP, 8, 1, + NPC_LID_LE, NPC_LT_LE_GENEVE, + NPC_F_LE_L_GENEVE_OAM, + 0, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU12_TU_IP, 8, 1, + NPC_LID_LE, NPC_LT_LE_GENEVE, + NPC_F_LE_L_GENEVE_CRI_OPT, + 0, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU12_TU_IP, 8, 1, + NPC_LID_LE, NPC_LT_LE_GENEVE, + NPC_F_LE_L_GENEVE_OAM_CRI_OPT, + 0, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU12_TU_IP6, 8, 1, + NPC_LID_LE, NPC_LT_LE_GENEVE, + 0, + 0, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU12_TU_IP6, 8, 1, + NPC_LID_LE, NPC_LT_LE_GENEVE, + NPC_F_LE_L_GENEVE_OAM, + 0, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU12_TU_IP6, 8, 1, + NPC_LID_LE, NPC_LT_LE_GENEVE, + NPC_F_LE_L_GENEVE_CRI_OPT, + 0, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU12_TU_IP6, 8, 1, + NPC_LID_LE, NPC_LT_LE_GENEVE, + NPC_F_LE_L_GENEVE_OAM_CRI_OPT, + 0, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LE, NPC_LT_LE_GTPC, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU12_TU_IP, 8, 1, + NPC_LID_LE, NPC_LT_LE_GTPU, + NPC_F_LE_L_GTPU_G_PDU, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LE, NPC_LT_LE_GTPU, + NPC_F_LE_L_GTPU_UNK, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU10_TU_MPLS_PL, 4, 1, + NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU10_TU_MPLS_PL, 8, 1, + NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU10_TU_MPLS_PL, 12, 1, + NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 4, 0, 0, 0, + NPC_S_KPU10_TU_MPLS, 12, 1, + NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LE, NPC_LT_LE_ESP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LE, NPC_EC_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LE, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, +}; + +static const struct npc_kpu_profile_action kpu10_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU12_TU_IP, 4, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU12_TU_IP6, 4, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU11_TU_ETHER, 8, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU11_TU_ETHER, 4, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LE, NPC_EC_MPLS_2MANY, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU12_TU_IP, 0, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU12_TU_IP6, 0, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU11_TU_ETHER, 4, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU11_TU_ETHER, 0, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU11_TU_MPLS_PL, 4, 1, + NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU11_TU_MPLS_PL, 8, 1, + NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU11_TU_MPLS_PL, 12, 1, + NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 4, 0, 0, 0, + NPC_S_KPU11_TU_MPLS, 12, 1, + NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU12_TU_IP, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE, + 0, + 1, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU12_TU_IP6, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE, + 0, + 1, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 0, 0, + NPC_S_KPU11_TU_ETHER_IN_NSH, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE, + 0, + 1, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_LF, NPC_EC_NSH_UNK, + 6, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LE, NPC_EC_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, +}; + +static const struct npc_kpu_profile_action kpu11_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 0, 0, + NPC_S_KPU12_TU_IP, 14, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 0, 0, + NPC_S_KPU12_TU_IP6, 14, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU12_TU_ARP, 14, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 0, 0, + NPC_S_KPU12_TU_IP, 18, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 0, 0, + NPC_S_KPU12_TU_IP6, 18, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU12_TU_ARP, 18, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 0, 0, + NPC_S_KPU12_TU_IP, 22, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_STAG_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 0, 0, + NPC_S_KPU12_TU_IP6, 22, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_STAG_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU12_TU_ARP, 22, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_STAG_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_STAG_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 0, 0, + NPC_S_KPU12_TU_IP, 18, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 0, 0, + NPC_S_KPU12_TU_IP6, 18, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU12_TU_ARP, 18, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 0, 0, + NPC_S_KPU12_TU_IP, 22, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_QINQ_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 0, 0, + NPC_S_KPU12_TU_IP6, 22, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_QINQ_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU12_TU_ARP, 22, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_QINQ_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_QINQ_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 0, 0, + NPC_S_KPU12_TU_IP, 18, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_QINQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 0, 0, + NPC_S_KPU12_TU_IP6, 18, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_QINQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU12_TU_ARP, 18, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_QINQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_QINQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_U_UNK_ETYPE, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_PPP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 0, 0, + NPC_S_KPU12_TU_IP, 4, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 0, 0, + NPC_S_KPU12_TU_IP6, 4, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LF, NPC_EC_MPLS_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LF, NPC_EC_MPLS_2MANY, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 0, 0, + NPC_S_KPU12_TU_IP, 0, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 0, 0, + NPC_S_KPU12_TU_IP6, 0, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LF, NPC_EC_MPLS_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_ETHER_IN_NSH, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LF, NPC_EC_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, +}; + +static const struct npc_kpu_profile_action kpu12_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 12, 0, 2, 0, + NPC_S_KPU15_TU_TCP, 20, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU15_TU_UDP, 20, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_SCTP, 20, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_ICMP, 20, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_IGMP, 20, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_ESP, 20, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_AH, 20, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + NPC_F_LG_U_UNK_IP_PROTO, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 12, 0, 2, 0, + NPC_S_KPU15_TU_TCP, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + NPC_F_LG_U_IP_HAS_OPTIONS, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU15_TU_UDP, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + NPC_F_LG_U_IP_HAS_OPTIONS, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_SCTP, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + NPC_F_LG_U_IP_HAS_OPTIONS, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_ICMP, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + NPC_F_LG_U_IP_HAS_OPTIONS, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_IGMP, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + NPC_F_LG_U_IP_HAS_OPTIONS, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_ESP, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + NPC_F_LG_U_IP_HAS_OPTIONS, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_AH, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + NPC_F_LG_U_IP_HAS_OPTIONS, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + NPC_F_LG_U_IP_HAS_OPTIONS | NPC_F_LG_U_UNK_IP_PROTO, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LF, NPC_EC_IP_VER, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_ARP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 12, 0, 2, 0, + NPC_S_KPU15_TU_TCP, 40, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP6, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU15_TU_UDP, 40, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP6, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_SCTP, 40, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP6, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_ICMP, 40, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP6, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_ICMP6, 40, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP6, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_ESP, 40, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP6, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_AH, 40, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP6, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU13_TU_IP6_EXT, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP6, + NPC_F_LG_U_IP6_HAS_EXT, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LF, NPC_EC_IP6_VER, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP6, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LF, NPC_EC_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LG, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, +}; + +static const struct npc_kpu_profile_action kpu13_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, +}; + +static const struct npc_kpu_profile_action kpu14_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, +}; + +static const struct npc_kpu_profile_action kpu15_action_entries[] = { + { + NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_FIN_ONLY, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_ZERO, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_RST_FIN, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_URG_SYN, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_RST_SYN, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_SYN_FIN, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU16_HTTP_DATA, 20, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + NPC_F_LH_L_TCP_HTTP, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU16_HTTPS_DATA, 20, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + NPC_F_LH_L_TCP_HTTP, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU16_PPTP_DATA, 20, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + NPC_F_LH_L_TCP_PPTP, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU16_TCP_DATA, 20, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + NPC_F_LH_L_TCP_UNK_PORT, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU16_HTTP_DATA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_HTTP, + 12, 0xf0, 1, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU16_HTTPS_DATA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_HTTPS, + 12, 0xf0, 1, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU16_PPTP_DATA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_PPTP, + 12, 0xf0, 1, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU16_TCP_DATA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_UNK_PORT, + 12, 0xf0, 1, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU16_UDP_DATA, 8, 1, + NPC_LID_LH, NPC_LT_LH_TU_UDP, + NPC_F_LH_L_UDP_UNK_PORT, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_SCTP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_ICMP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_IGMP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_ICMP6, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_ESP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_AH, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LG, NPC_EC_L4, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LH, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, +}; + +static const struct npc_kpu_profile_action kpu16_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LH, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LH, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LH, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LH, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LH, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LH, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, +}; + +static const struct npc_kpu_profile npc_kpu_profiles[] = { + { + ARRAY_SIZE(kpu1_cam_entries), + ARRAY_SIZE(kpu1_action_entries), + &kpu1_cam_entries[0], + &kpu1_action_entries[0], + }, + { + ARRAY_SIZE(kpu2_cam_entries), + ARRAY_SIZE(kpu2_action_entries), + &kpu2_cam_entries[0], + &kpu2_action_entries[0], + }, + { + ARRAY_SIZE(kpu3_cam_entries), + ARRAY_SIZE(kpu3_action_entries), + &kpu3_cam_entries[0], + &kpu3_action_entries[0], + }, + { + ARRAY_SIZE(kpu4_cam_entries), + ARRAY_SIZE(kpu4_action_entries), + &kpu4_cam_entries[0], + &kpu4_action_entries[0], + }, + { + ARRAY_SIZE(kpu5_cam_entries), + ARRAY_SIZE(kpu5_action_entries), + &kpu5_cam_entries[0], + &kpu5_action_entries[0], + }, + { + ARRAY_SIZE(kpu6_cam_entries), + ARRAY_SIZE(kpu6_action_entries), + &kpu6_cam_entries[0], + &kpu6_action_entries[0], + }, + { + ARRAY_SIZE(kpu7_cam_entries), + ARRAY_SIZE(kpu7_action_entries), + &kpu7_cam_entries[0], + &kpu7_action_entries[0], + }, + { + ARRAY_SIZE(kpu8_cam_entries), + ARRAY_SIZE(kpu8_action_entries), + &kpu8_cam_entries[0], + &kpu8_action_entries[0], + }, + { + ARRAY_SIZE(kpu9_cam_entries), + ARRAY_SIZE(kpu9_action_entries), + &kpu9_cam_entries[0], + &kpu9_action_entries[0], + }, + { + ARRAY_SIZE(kpu10_cam_entries), + ARRAY_SIZE(kpu10_action_entries), + &kpu10_cam_entries[0], + &kpu10_action_entries[0], + }, + { + ARRAY_SIZE(kpu11_cam_entries), + ARRAY_SIZE(kpu11_action_entries), + &kpu11_cam_entries[0], + &kpu11_action_entries[0], + }, + { + ARRAY_SIZE(kpu12_cam_entries), + ARRAY_SIZE(kpu12_action_entries), + &kpu12_cam_entries[0], + &kpu12_action_entries[0], + }, + { + ARRAY_SIZE(kpu13_cam_entries), + ARRAY_SIZE(kpu13_action_entries), + &kpu13_cam_entries[0], + &kpu13_action_entries[0], + }, + { + ARRAY_SIZE(kpu14_cam_entries), + ARRAY_SIZE(kpu14_action_entries), + &kpu14_cam_entries[0], + &kpu14_action_entries[0], + }, + { + ARRAY_SIZE(kpu15_cam_entries), + ARRAY_SIZE(kpu15_action_entries), + &kpu15_cam_entries[0], + &kpu15_action_entries[0], + }, + { + ARRAY_SIZE(kpu16_cam_entries), + ARRAY_SIZE(kpu16_action_entries), + &kpu16_cam_entries[0], + &kpu16_action_entries[0], + }, +}; + +static const struct npc_lt_def_cfg npc_lt_defaults = { + .rx_ol2 = { + .lid = NPC_LID_LA, + .ltype_match = NPC_LT_LA_ETHER, + .ltype_mask = 0x0F, + }, + .rx_oip4 = { + .lid = NPC_LID_LC, + .ltype_match = NPC_LT_LC_IP, + .ltype_mask = 0x0E, + }, + .rx_iip4 = { + .lid = NPC_LID_LG, + .ltype_match = NPC_LT_LG_TU_IP, + .ltype_mask = 0x0F, + }, + .rx_oip6 = { + .lid = NPC_LID_LC, + .ltype_match = NPC_LT_LC_IP6, + .ltype_mask = 0x0E, + }, + .rx_iip6 = { + .lid = NPC_LID_LG, + .ltype_match = NPC_LT_LG_TU_IP6, + .ltype_mask = 0x0F, + }, + .rx_otcp = { + .lid = NPC_LID_LD, + .ltype_match = NPC_LT_LD_TCP, + .ltype_mask = 0x0F, + }, + .rx_itcp = { + .lid = NPC_LID_LH, + .ltype_match = NPC_LT_LH_TU_TCP, + .ltype_mask = 0x0F, + }, + .rx_oudp = { + .lid = NPC_LID_LD, + .ltype_match = NPC_LT_LD_UDP, + .ltype_mask = 0x0F, + }, + .rx_iudp = { + .lid = NPC_LID_LH, + .ltype_match = NPC_LT_LH_TU_UDP, + .ltype_mask = 0x0F, + }, + .rx_osctp = { + .lid = NPC_LID_LD, + .ltype_match = NPC_LT_LD_SCTP, + .ltype_mask = 0x0F, + }, + .rx_isctp = { + .lid = NPC_LID_LH, + .ltype_match = NPC_LT_LH_TU_SCTP, + .ltype_mask = 0x0F, + }, + .rx_ipsec = { + { + .lid = NPC_LID_LE, + .ltype_match = NPC_LT_LE_ESP, + .ltype_mask = 0x0F, + }, + { + .spi_offset = 8, + .lid = NPC_LID_LH, + .ltype_match = NPC_LT_LH_TU_ESP, + .ltype_mask = 0x0F, + }, + }, + .pck_ol2 = { + .lid = NPC_LID_LA, + .ltype_match = NPC_LT_LA_ETHER, + .ltype_mask = 0x0F, + }, + .pck_oip4 = { + .lid = NPC_LID_LC, + .ltype_match = NPC_LT_LC_IP, + .ltype_mask = 0x0E, + }, + .pck_iip4 = { + .lid = NPC_LID_LG, + .ltype_match = NPC_LT_LG_TU_IP, + .ltype_mask = 0x0F, + }, +}; + +static const struct npc_mcam_kex npc_mkex_default = { + .mkex_sign = MKEX_SIGN, + .name = "default", + .kpu_version = NPC_KPU_PROFILE_VER, + .keyx_cfg = { + /* nibble: LA..LE (ltype only) + channel */ + [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_RX, + /* nibble: LA..LE (ltype only) */ + [NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_TX, + }, + .intf_lid_lt_ld = { + /* Default RX MCAM KEX profile */ + [NIX_INTF_RX] = { + [NPC_LID_LA] = { + /* Layer A: Ethernet: */ + [NPC_LT_LA_ETHER] = { + /* DMAC: 6 bytes, KW1[47:0] */ + KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_KEXOF_DMAC), + /* Ethertype: 2 bytes, KW0[47:32] */ + KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x4), + }, + }, + [NPC_LID_LB] = { + /* Layer B: Single VLAN (CTAG) */ + /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */ + [NPC_LT_LB_CTAG] = { + KEX_LD_CFG(0x03, 0x2, 0x1, 0x0, 0x4), + }, + /* Layer B: Stacked VLAN (STAG|QinQ) */ + [NPC_LT_LB_STAG_QINQ] = { + /* Outer VLAN: 2 bytes, KW0[63:48] */ + KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6), + /* Ethertype: 2 bytes, KW0[47:32] */ + KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x4), + }, + [NPC_LT_LB_FDSA] = { + /* SWITCH PORT: 1 byte, KW0[63:48] */ + KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0x6), + /* Ethertype: 2 bytes, KW0[47:32] */ + KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x4), + }, + }, + [NPC_LID_LC] = { + /* Layer C: IPv4 */ + [NPC_LT_LC_IP] = { + /* SIP+DIP: 8 bytes, KW2[63:0] */ + KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10), + /* TOS: 1 byte, KW1[63:56] */ + KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf), + }, + /* Layer C: IPv6 */ + [NPC_LT_LC_IP6] = { + /* Everything up to SADDR: 8 bytes, KW2[63:0] */ + KEX_LD_CFG(0x07, 0x0, 0x1, 0x0, 0x10), + }, + }, + [NPC_LID_LD] = { + /* Layer D:UDP */ + [NPC_LT_LD_UDP] = { + /* SPORT+DPORT: 4 bytes, KW3[31:0] */ + KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18), + }, + /* Layer D:TCP */ + [NPC_LT_LD_TCP] = { + /* SPORT+DPORT: 4 bytes, KW3[31:0] */ + KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18), + }, + }, + }, + + /* Default TX MCAM KEX profile */ + [NIX_INTF_TX] = { + [NPC_LID_LA] = { + /* Layer A: NIX_INST_HDR_S + Ethernet */ + /* NIX appends 8 bytes of NIX_INST_HDR_S at the + * start of each TX packet supplied to NPC. + */ + [NPC_LT_LA_IH_NIX_ETHER] = { + /* PF_FUNC: 2B , KW0 [47:32] */ + KEX_LD_CFG(0x01, 0x0, 0x1, 0x0, 0x4), + /* DMAC: 6 bytes, KW1[63:16] */ + KEX_LD_CFG(0x05, 0x8, 0x1, 0x0, 0xa), + }, + }, + [NPC_LID_LB] = { + /* Layer B: Single VLAN (CTAG) */ + [NPC_LT_LB_CTAG] = { + /* CTAG VLAN[2..3] KW0[63:48] */ + KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6), + /* CTAG VLAN[2..3] KW1[15:0] */ + KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x8), + }, + /* Layer B: Stacked VLAN (STAG|QinQ) */ + [NPC_LT_LB_STAG_QINQ] = { + /* Outer VLAN: 2 bytes, KW0[63:48] */ + KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6), + /* Outer VLAN: 2 Bytes, KW1[15:0] */ + KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x8), + }, + }, + [NPC_LID_LC] = { + /* Layer C: IPv4 */ + [NPC_LT_LC_IP] = { + /* SIP+DIP: 8 bytes, KW2[63:0] */ + KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10), + }, + /* Layer C: IPv6 */ + [NPC_LT_LC_IP6] = { + /* Everything up to SADDR: 8 bytes, KW2[63:0] */ + KEX_LD_CFG(0x07, 0x0, 0x1, 0x0, 0x10), + }, + }, + [NPC_LID_LD] = { + /* Layer D:UDP */ + [NPC_LT_LD_UDP] = { + /* SPORT+DPORT: 4 bytes, KW3[31:0] */ + KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18), + }, + /* Layer D:TCP */ + [NPC_LT_LD_TCP] = { + /* SPORT+DPORT: 4 bytes, KW3[31:0] */ + KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18), + }, + }, + }, + }, +}; + +#endif /* NPC_PROFILE_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c new file mode 100644 index 000000000..f69f4f35a --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c @@ -0,0 +1,275 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell PTP driver + * + * Copyright (C) 2020 Marvell International Ltd. + */ + +#include <linux/bitfield.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "ptp.h" +#include "mbox.h" +#include "rvu.h" + +#define DRV_NAME "Marvell PTP Driver" + +#define PCI_DEVID_OCTEONTX2_PTP 0xA00C +#define PCI_SUBSYS_DEVID_OCTX2_98xx_PTP 0xB100 +#define PCI_SUBSYS_DEVID_OCTX2_96XX_PTP 0xB200 +#define PCI_SUBSYS_DEVID_OCTX2_95XX_PTP 0xB300 +#define PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP 0xB400 +#define PCI_SUBSYS_DEVID_OCTX2_95MM_PTP 0xB500 +#define PCI_DEVID_OCTEONTX2_RST 0xA085 + +#define PCI_PTP_BAR_NO 0 +#define PCI_RST_BAR_NO 0 + +#define PTP_CLOCK_CFG 0xF00ULL +#define PTP_CLOCK_CFG_PTP_EN BIT_ULL(0) +#define PTP_CLOCK_LO 0xF08ULL +#define PTP_CLOCK_HI 0xF10ULL +#define PTP_CLOCK_COMP 0xF18ULL + +#define RST_BOOT 0x1600ULL +#define RST_MUL_BITS GENMASK_ULL(38, 33) +#define CLOCK_BASE_RATE 50000000ULL + +static u64 get_clock_rate(void) +{ + u64 cfg, ret = CLOCK_BASE_RATE * 16; + struct pci_dev *pdev; + void __iomem *base; + + /* To get the input clock frequency with which PTP co-processor + * block is running the base frequency(50 MHz) needs to be multiplied + * with multiplier bits present in RST_BOOT register of RESET block. + * Hence below code gets the multiplier bits from the RESET PCI + * device present in the system. + */ + pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_OCTEONTX2_RST, NULL); + if (!pdev) + goto error; + + base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO); + if (!base) + goto error_put_pdev; + + cfg = readq(base + RST_BOOT); + ret = CLOCK_BASE_RATE * FIELD_GET(RST_MUL_BITS, cfg); + + iounmap(base); + +error_put_pdev: + pci_dev_put(pdev); + +error: + return ret; +} + +struct ptp *ptp_get(void) +{ + struct pci_dev *pdev; + struct ptp *ptp; + + /* If the PTP pci device is found on the system and ptp + * driver is bound to it then the PTP pci device is returned + * to the caller(rvu driver). + */ + pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_OCTEONTX2_PTP, NULL); + if (!pdev) + return ERR_PTR(-ENODEV); + + ptp = pci_get_drvdata(pdev); + if (!ptp) + ptp = ERR_PTR(-EPROBE_DEFER); + if (IS_ERR(ptp)) + pci_dev_put(pdev); + + return ptp; +} + +void ptp_put(struct ptp *ptp) +{ + if (!ptp) + return; + + pci_dev_put(ptp->pdev); +} + +static int ptp_adjfine(struct ptp *ptp, long scaled_ppm) +{ + bool neg_adj = false; + u64 comp; + u64 adj; + s64 ppb; + + if (scaled_ppm < 0) { + neg_adj = true; + scaled_ppm = -scaled_ppm; + } + + /* The hardware adds the clock compensation value to the PTP clock + * on every coprocessor clock cycle. Typical convention is that it + * represent number of nanosecond betwen each cycle. In this + * convention compensation value is in 64 bit fixed-point + * representation where upper 32 bits are number of nanoseconds + * and lower is fractions of nanosecond. + * The scaled_ppm represent the ratio in "parts per million" by which + * the compensation value should be corrected. + * To calculate new compenstation value we use 64bit fixed point + * arithmetic on following formula + * comp = tbase + tbase * scaled_ppm / (1M * 2^16) + * where tbase is the basic compensation value calculated + * initialy in the probe function. + */ + comp = ((u64)1000000000ull << 32) / ptp->clock_rate; + /* convert scaled_ppm to ppb */ + ppb = 1 + scaled_ppm; + ppb *= 125; + ppb >>= 13; + adj = comp * ppb; + adj = div_u64(adj, 1000000000ull); + comp = neg_adj ? comp - adj : comp + adj; + + writeq(comp, ptp->reg_base + PTP_CLOCK_COMP); + + return 0; +} + +static int ptp_get_clock(struct ptp *ptp, u64 *clk) +{ + /* Return the current PTP clock */ + *clk = readq(ptp->reg_base + PTP_CLOCK_HI); + + return 0; +} + +static int ptp_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct device *dev = &pdev->dev; + struct ptp *ptp; + u64 clock_comp; + u64 clock_cfg; + int err; + + ptp = devm_kzalloc(dev, sizeof(*ptp), GFP_KERNEL); + if (!ptp) { + err = -ENOMEM; + goto error; + } + + ptp->pdev = pdev; + + err = pcim_enable_device(pdev); + if (err) + goto error_free; + + err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev)); + if (err) + goto error_free; + + ptp->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO]; + + ptp->clock_rate = get_clock_rate(); + + /* Enable PTP clock */ + clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG); + clock_cfg |= PTP_CLOCK_CFG_PTP_EN; + writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG); + + clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate; + /* Initial compensation value to start the nanosecs counter */ + writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP); + + pci_set_drvdata(pdev, ptp); + + return 0; + +error_free: + devm_kfree(dev, ptp); + +error: + /* For `ptp_get()` we need to differentiate between the case + * when the core has not tried to probe this device and the case when + * the probe failed. In the later case we pretend that the + * initialization was successful and keep the error in + * `dev->driver_data`. + */ + pci_set_drvdata(pdev, ERR_PTR(err)); + return 0; +} + +static void ptp_remove(struct pci_dev *pdev) +{ + struct ptp *ptp = pci_get_drvdata(pdev); + u64 clock_cfg; + + if (IS_ERR_OR_NULL(ptp)) + return; + + /* Disable PTP clock */ + clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG); + clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN; + writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG); +} + +static const struct pci_device_id ptp_id_table[] = { + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, + PCI_VENDOR_ID_CAVIUM, + PCI_SUBSYS_DEVID_OCTX2_98xx_PTP) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, + PCI_VENDOR_ID_CAVIUM, + PCI_SUBSYS_DEVID_OCTX2_96XX_PTP) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, + PCI_VENDOR_ID_CAVIUM, + PCI_SUBSYS_DEVID_OCTX2_95XX_PTP) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, + PCI_VENDOR_ID_CAVIUM, + PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, + PCI_VENDOR_ID_CAVIUM, + PCI_SUBSYS_DEVID_OCTX2_95MM_PTP) }, + { 0, } +}; + +struct pci_driver ptp_driver = { + .name = DRV_NAME, + .id_table = ptp_id_table, + .probe = ptp_probe, + .remove = ptp_remove, +}; + +int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req, + struct ptp_rsp *rsp) +{ + int err = 0; + + /* This function is the PTP mailbox handler invoked when + * called by AF consumers/netdev drivers via mailbox mechanism. + * It is used by netdev driver to get the PTP clock and to set + * frequency adjustments. Since mailbox can be called without + * notion of whether the driver is bound to ptp device below + * validation is needed as first step. + */ + if (!rvu->ptp) + return -ENODEV; + + switch (req->op) { + case PTP_OP_ADJFINE: + err = ptp_adjfine(rvu->ptp, req->scaled_ppm); + break; + case PTP_OP_GET_CLOCK: + err = ptp_get_clock(rvu->ptp, &rsp->clk); + break; + default: + err = -EINVAL; + break; + } + + return err; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h new file mode 100644 index 000000000..878bc395d --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell PTP driver + * + * Copyright (C) 2020 Marvell International Ltd. + */ + +#ifndef PTP_H +#define PTP_H + +#include <linux/timecounter.h> +#include <linux/time64.h> +#include <linux/spinlock.h> + +struct ptp { + struct pci_dev *pdev; + void __iomem *reg_base; + u32 clock_rate; +}; + +struct ptp *ptp_get(void); +void ptp_put(struct ptp *ptp); + +extern struct pci_driver ptp_driver; + +#endif diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c new file mode 100644 index 000000000..acbc67074 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -0,0 +1,2819 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/irq.h> +#include <linux/pci.h> +#include <linux/sysfs.h> + +#include "cgx.h" +#include "rvu.h" +#include "rvu_reg.h" +#include "ptp.h" + +#include "rvu_trace.h" + +#define DRV_NAME "octeontx2-af" +#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver" + +static int rvu_get_hwvf(struct rvu *rvu, int pcifunc); + +static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, + struct rvu_block *block, int lf); +static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, + struct rvu_block *block, int lf); +static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc); + +static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, + int type, int num, + void (mbox_handler)(struct work_struct *), + void (mbox_up_handler)(struct work_struct *)); +enum { + TYPE_AFVF, + TYPE_AFPF, +}; + +/* Supported devices */ +static const struct pci_device_id rvu_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) }, + { 0, } /* end of table */ +}; + +MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>"); +MODULE_DESCRIPTION(DRV_STRING); +MODULE_LICENSE("GPL v2"); +MODULE_DEVICE_TABLE(pci, rvu_id_table); + +static char *mkex_profile; /* MKEX profile name */ +module_param(mkex_profile, charp, 0000); +MODULE_PARM_DESC(mkex_profile, "MKEX profile name string"); + +static void rvu_setup_hw_capabilities(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + + hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1; + hw->cap.nix_fixed_txschq_mapping = false; + hw->cap.nix_shaping = true; + hw->cap.nix_tx_link_bp = true; + hw->cap.nix_rx_multicast = true; + + if (is_rvu_96xx_B0(rvu)) { + hw->cap.nix_fixed_txschq_mapping = true; + hw->cap.nix_txsch_per_cgx_lmac = 4; + hw->cap.nix_txsch_per_lbk_lmac = 132; + hw->cap.nix_txsch_per_sdp_lmac = 76; + hw->cap.nix_shaping = false; + hw->cap.nix_tx_link_bp = false; + if (is_rvu_96xx_A0(rvu)) + hw->cap.nix_rx_multicast = false; + } +} + +/* Poll a RVU block's register 'offset', for a 'zero' + * or 'nonzero' at bits specified by 'mask' + */ +int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero) +{ + unsigned long timeout = jiffies + usecs_to_jiffies(20000); + bool twice = false; + void __iomem *reg; + u64 reg_val; + + reg = rvu->afreg_base + ((block << 28) | offset); +again: + reg_val = readq(reg); + if (zero && !(reg_val & mask)) + return 0; + if (!zero && (reg_val & mask)) + return 0; + if (time_before(jiffies, timeout)) { + usleep_range(1, 5); + goto again; + } + /* In scenarios where CPU is scheduled out before checking + * 'time_before' (above) and gets scheduled in such that + * jiffies are beyond timeout value, then check again if HW is + * done with the operation in the meantime. + */ + if (!twice) { + twice = true; + goto again; + } + return -EBUSY; +} + +int rvu_alloc_rsrc(struct rsrc_bmap *rsrc) +{ + int id; + + if (!rsrc->bmap) + return -EINVAL; + + id = find_first_zero_bit(rsrc->bmap, rsrc->max); + if (id >= rsrc->max) + return -ENOSPC; + + __set_bit(id, rsrc->bmap); + + return id; +} + +int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc) +{ + int start; + + if (!rsrc->bmap) + return -EINVAL; + + start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0); + if (start >= rsrc->max) + return -ENOSPC; + + bitmap_set(rsrc->bmap, start, nrsrc); + return start; +} + +static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start) +{ + if (!rsrc->bmap) + return; + if (start >= rsrc->max) + return; + + bitmap_clear(rsrc->bmap, start, nrsrc); +} + +bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc) +{ + int start; + + if (!rsrc->bmap) + return false; + + start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0); + if (start >= rsrc->max) + return false; + + return true; +} + +void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id) +{ + if (!rsrc->bmap) + return; + + __clear_bit(id, rsrc->bmap); +} + +int rvu_rsrc_free_count(struct rsrc_bmap *rsrc) +{ + int used; + + if (!rsrc->bmap) + return 0; + + used = bitmap_weight(rsrc->bmap, rsrc->max); + return (rsrc->max - used); +} + +int rvu_alloc_bitmap(struct rsrc_bmap *rsrc) +{ + rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max), + sizeof(long), GFP_KERNEL); + if (!rsrc->bmap) + return -ENOMEM; + return 0; +} + +/* Get block LF's HW index from a PF_FUNC's block slot number */ +int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot) +{ + u16 match = 0; + int lf; + + mutex_lock(&rvu->rsrc_lock); + for (lf = 0; lf < block->lf.max; lf++) { + if (block->fn_map[lf] == pcifunc) { + if (slot == match) { + mutex_unlock(&rvu->rsrc_lock); + return lf; + } + match++; + } + } + mutex_unlock(&rvu->rsrc_lock); + return -ENODEV; +} + +/* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E. + * Some silicon variants of OcteonTX2 supports + * multiple blocks of same type. + * + * @pcifunc has to be zero when no LF is yet attached. + */ +int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc) +{ + int devnum, blkaddr = -ENODEV; + u64 cfg, reg; + bool is_pf; + + switch (blktype) { + case BLKTYPE_NPC: + blkaddr = BLKADDR_NPC; + goto exit; + case BLKTYPE_NPA: + blkaddr = BLKADDR_NPA; + goto exit; + case BLKTYPE_NIX: + /* For now assume NIX0 */ + if (!pcifunc) { + blkaddr = BLKADDR_NIX0; + goto exit; + } + break; + case BLKTYPE_SSO: + blkaddr = BLKADDR_SSO; + goto exit; + case BLKTYPE_SSOW: + blkaddr = BLKADDR_SSOW; + goto exit; + case BLKTYPE_TIM: + blkaddr = BLKADDR_TIM; + goto exit; + case BLKTYPE_CPT: + /* For now assume CPT0 */ + if (!pcifunc) { + blkaddr = BLKADDR_CPT0; + goto exit; + } + break; + } + + /* Check if this is a RVU PF or VF */ + if (pcifunc & RVU_PFVF_FUNC_MASK) { + is_pf = false; + devnum = rvu_get_hwvf(rvu, pcifunc); + } else { + is_pf = true; + devnum = rvu_get_pf(pcifunc); + } + + /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' */ + if (blktype == BLKTYPE_NIX) { + reg = is_pf ? RVU_PRIV_PFX_NIX0_CFG : RVU_PRIV_HWVFX_NIX0_CFG; + cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); + if (cfg) + blkaddr = BLKADDR_NIX0; + } + + /* Check if the 'pcifunc' has a CPT LF from 'BLKADDR_CPT0' */ + if (blktype == BLKTYPE_CPT) { + reg = is_pf ? RVU_PRIV_PFX_CPT0_CFG : RVU_PRIV_HWVFX_CPT0_CFG; + cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); + if (cfg) + blkaddr = BLKADDR_CPT0; + } + +exit: + if (is_block_implemented(rvu->hw, blkaddr)) + return blkaddr; + return -ENODEV; +} + +static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf, + struct rvu_block *block, u16 pcifunc, + u16 lf, bool attach) +{ + int devnum, num_lfs = 0; + bool is_pf; + u64 reg; + + if (lf >= block->lf.max) { + dev_err(&rvu->pdev->dev, + "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n", + __func__, lf, block->name, block->lf.max); + return; + } + + /* Check if this is for a RVU PF or VF */ + if (pcifunc & RVU_PFVF_FUNC_MASK) { + is_pf = false; + devnum = rvu_get_hwvf(rvu, pcifunc); + } else { + is_pf = true; + devnum = rvu_get_pf(pcifunc); + } + + block->fn_map[lf] = attach ? pcifunc : 0; + + switch (block->addr) { + case BLKADDR_NPA: + pfvf->npalf = attach ? true : false; + num_lfs = pfvf->npalf; + break; + case BLKADDR_NIX0: + case BLKADDR_NIX1: + pfvf->nixlf = attach ? true : false; + num_lfs = pfvf->nixlf; + break; + case BLKADDR_SSO: + attach ? pfvf->sso++ : pfvf->sso--; + num_lfs = pfvf->sso; + break; + case BLKADDR_SSOW: + attach ? pfvf->ssow++ : pfvf->ssow--; + num_lfs = pfvf->ssow; + break; + case BLKADDR_TIM: + attach ? pfvf->timlfs++ : pfvf->timlfs--; + num_lfs = pfvf->timlfs; + break; + case BLKADDR_CPT0: + attach ? pfvf->cptlfs++ : pfvf->cptlfs--; + num_lfs = pfvf->cptlfs; + break; + case BLKADDR_CPT1: + attach ? pfvf->cpt1_lfs++ : pfvf->cpt1_lfs--; + num_lfs = pfvf->cpt1_lfs; + break; + } + + reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg; + rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs); +} + +inline int rvu_get_pf(u16 pcifunc) +{ + return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; +} + +void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf) +{ + u64 cfg; + + /* Get numVFs attached to this PF and first HWVF */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); + *numvfs = (cfg >> 12) & 0xFF; + *hwvf = cfg & 0xFFF; +} + +static int rvu_get_hwvf(struct rvu *rvu, int pcifunc) +{ + int pf, func; + u64 cfg; + + pf = rvu_get_pf(pcifunc); + func = pcifunc & RVU_PFVF_FUNC_MASK; + + /* Get first HWVF attached to this PF */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); + + return ((cfg & 0xFFF) + func - 1); +} + +struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc) +{ + /* Check if it is a PF or VF */ + if (pcifunc & RVU_PFVF_FUNC_MASK) + return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)]; + else + return &rvu->pf[rvu_get_pf(pcifunc)]; +} + +static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc) +{ + int pf, vf, nvfs; + u64 cfg; + + pf = rvu_get_pf(pcifunc); + if (pf >= rvu->hw->total_pfs) + return false; + + if (!(pcifunc & RVU_PFVF_FUNC_MASK)) + return true; + + /* Check if VF is within number of VFs attached to this PF */ + vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); + nvfs = (cfg >> 12) & 0xFF; + if (vf >= nvfs) + return false; + + return true; +} + +bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr) +{ + struct rvu_block *block; + + if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT) + return false; + + block = &hw->block[blkaddr]; + return block->implemented; +} + +static void rvu_check_block_implemented(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int blkid; + u64 cfg; + + /* For each block check if 'implemented' bit is set */ + for (blkid = 0; blkid < BLK_COUNT; blkid++) { + block = &hw->block[blkid]; + cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid)); + if (cfg & BIT_ULL(11)) + block->implemented = true; + } +} + +static void rvu_setup_rvum_blk_revid(struct rvu *rvu) +{ + rvu_write64(rvu, BLKADDR_RVUM, + RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), + RVU_BLK_RVUM_REVID); +} + +static void rvu_clear_rvum_blk_revid(struct rvu *rvu) +{ + rvu_write64(rvu, BLKADDR_RVUM, + RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00); +} + +int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf) +{ + int err; + + if (!block->implemented) + return 0; + + rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12)); + err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12), + true); + return err; +} + +static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg) +{ + struct rvu_block *block = &rvu->hw->block[blkaddr]; + + if (!block->implemented) + return; + + rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0)); + rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true); +} + +static void rvu_reset_all_blocks(struct rvu *rvu) +{ + /* Do a HW reset of all RVU blocks */ + rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST); +} + +static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block) +{ + struct rvu_pfvf *pfvf; + u64 cfg; + int lf; + + for (lf = 0; lf < block->lf.max; lf++) { + cfg = rvu_read64(rvu, block->addr, + block->lfcfg_reg | (lf << block->lfshift)); + if (!(cfg & BIT_ULL(63))) + continue; + + /* Set this resource as being used */ + __set_bit(lf, block->lf.bmap); + + /* Get, to whom this LF is attached */ + pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF); + rvu_update_rsrc_map(rvu, pfvf, block, + (cfg >> 8) & 0xFFFF, lf, true); + + /* Set start MSIX vector for this LF within this PF/VF */ + rvu_set_msix_offset(rvu, pfvf, block, lf); + } +} + +static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf) +{ + int min_vecs; + + if (!vf) + goto check_pf; + + if (!nvecs) { + dev_warn(rvu->dev, + "PF%d:VF%d is configured with zero msix vectors, %d\n", + pf, vf - 1, nvecs); + } + return; + +check_pf: + if (pf == 0) + min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT; + else + min_vecs = RVU_PF_INT_VEC_CNT; + + if (!(nvecs < min_vecs)) + return; + dev_warn(rvu->dev, + "PF%d is configured with too few vectors, %d, min is %d\n", + pf, nvecs, min_vecs); +} + +static int rvu_setup_msix_resources(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + int pf, vf, numvfs, hwvf, err; + int nvecs, offset, max_msix; + struct rvu_pfvf *pfvf; + u64 cfg, phy_addr; + dma_addr_t iova; + + for (pf = 0; pf < hw->total_pfs; pf++) { + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); + /* If PF is not enabled, nothing to do */ + if (!((cfg >> 20) & 0x01)) + continue; + + rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); + + pfvf = &rvu->pf[pf]; + /* Get num of MSIX vectors attached to this PF */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf)); + pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1; + rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0); + + /* Alloc msix bitmap for this PF */ + err = rvu_alloc_bitmap(&pfvf->msix); + if (err) + return err; + + /* Allocate memory for MSIX vector to RVU block LF mapping */ + pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max, + sizeof(u16), GFP_KERNEL); + if (!pfvf->msix_lfmap) + return -ENOMEM; + + /* For PF0 (AF) firmware will set msix vector offsets for + * AF, block AF and PF0_INT vectors, so jump to VFs. + */ + if (!pf) + goto setup_vfmsix; + + /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors. + * These are allocated on driver init and never freed, + * so no need to set 'msix_lfmap' for these. + */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf)); + nvecs = (cfg >> 12) & 0xFF; + cfg &= ~0x7FFULL; + offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); + rvu_write64(rvu, BLKADDR_RVUM, + RVU_PRIV_PFX_INT_CFG(pf), cfg | offset); +setup_vfmsix: + /* Alloc msix bitmap for VFs */ + for (vf = 0; vf < numvfs; vf++) { + pfvf = &rvu->hwvf[hwvf + vf]; + /* Get num of MSIX vectors attached to this VF */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, + RVU_PRIV_PFX_MSIX_CFG(pf)); + pfvf->msix.max = (cfg & 0xFFF) + 1; + rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1); + + /* Alloc msix bitmap for this VF */ + err = rvu_alloc_bitmap(&pfvf->msix); + if (err) + return err; + + pfvf->msix_lfmap = + devm_kcalloc(rvu->dev, pfvf->msix.max, + sizeof(u16), GFP_KERNEL); + if (!pfvf->msix_lfmap) + return -ENOMEM; + + /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors. + * These are allocated on driver init and never freed, + * so no need to set 'msix_lfmap' for these. + */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, + RVU_PRIV_HWVFX_INT_CFG(hwvf + vf)); + nvecs = (cfg >> 12) & 0xFF; + cfg &= ~0x7FFULL; + offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); + rvu_write64(rvu, BLKADDR_RVUM, + RVU_PRIV_HWVFX_INT_CFG(hwvf + vf), + cfg | offset); + } + } + + /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence + * create a IOMMU mapping for the physcial address configured by + * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA. + */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); + max_msix = cfg & 0xFFFFF; + if (rvu->fwdata && rvu->fwdata->msixtr_base) + phy_addr = rvu->fwdata->msixtr_base; + else + phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE); + + iova = dma_map_resource(rvu->dev, phy_addr, + max_msix * PCI_MSIX_ENTRY_SIZE, + DMA_BIDIRECTIONAL, 0); + + if (dma_mapping_error(rvu->dev, iova)) + return -ENOMEM; + + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova); + rvu->msix_base_iova = iova; + rvu->msixtr_base_phy = phy_addr; + + return 0; +} + +static void rvu_reset_msix(struct rvu *rvu) +{ + /* Restore msixtr base register */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, + rvu->msixtr_base_phy); +} + +static void rvu_free_hw_resources(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + struct rvu_pfvf *pfvf; + int id, max_msix; + u64 cfg; + + rvu_npa_freemem(rvu); + rvu_npc_freemem(rvu); + rvu_nix_freemem(rvu); + + /* Free block LF bitmaps */ + for (id = 0; id < BLK_COUNT; id++) { + block = &hw->block[id]; + kfree(block->lf.bmap); + } + + /* Free MSIX bitmaps */ + for (id = 0; id < hw->total_pfs; id++) { + pfvf = &rvu->pf[id]; + kfree(pfvf->msix.bmap); + } + + for (id = 0; id < hw->total_vfs; id++) { + pfvf = &rvu->hwvf[id]; + kfree(pfvf->msix.bmap); + } + + /* Unmap MSIX vector base IOVA mapping */ + if (!rvu->msix_base_iova) + return; + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); + max_msix = cfg & 0xFFFFF; + dma_unmap_resource(rvu->dev, rvu->msix_base_iova, + max_msix * PCI_MSIX_ENTRY_SIZE, + DMA_BIDIRECTIONAL, 0); + + rvu_reset_msix(rvu); + mutex_destroy(&rvu->rsrc_lock); +} + +static void rvu_setup_pfvf_macaddress(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + int pf, vf, numvfs, hwvf; + struct rvu_pfvf *pfvf; + u64 *mac; + + for (pf = 0; pf < hw->total_pfs; pf++) { + if (!is_pf_cgxmapped(rvu, pf)) + continue; + /* Assign MAC address to PF */ + pfvf = &rvu->pf[pf]; + if (rvu->fwdata && pf < PF_MACNUM_MAX) { + mac = &rvu->fwdata->pf_macs[pf]; + if (*mac) + u64_to_ether_addr(*mac, pfvf->mac_addr); + else + eth_random_addr(pfvf->mac_addr); + } else { + eth_random_addr(pfvf->mac_addr); + } + + /* Assign MAC address to VFs */ + rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); + for (vf = 0; vf < numvfs; vf++, hwvf++) { + pfvf = &rvu->hwvf[hwvf]; + if (rvu->fwdata && hwvf < VF_MACNUM_MAX) { + mac = &rvu->fwdata->vf_macs[hwvf]; + if (*mac) + u64_to_ether_addr(*mac, pfvf->mac_addr); + else + eth_random_addr(pfvf->mac_addr); + } else { + eth_random_addr(pfvf->mac_addr); + } + } + } +} + +static int rvu_fwdata_init(struct rvu *rvu) +{ + u64 fwdbase; + int err; + + /* Get firmware data base address */ + err = cgx_get_fwdata_base(&fwdbase); + if (err) + goto fail; + rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata)); + if (!rvu->fwdata) + goto fail; + if (!is_rvu_fwdata_valid(rvu)) { + dev_err(rvu->dev, + "Mismatch in 'fwdata' struct btw kernel and firmware\n"); + iounmap(rvu->fwdata); + rvu->fwdata = NULL; + return -EINVAL; + } + return 0; +fail: + dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n"); + return -EIO; +} + +static void rvu_fwdata_exit(struct rvu *rvu) +{ + if (rvu->fwdata) + iounmap(rvu->fwdata); +} + +static int rvu_setup_hw_resources(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int blkid, err; + u64 cfg; + + /* Get HW supported max RVU PF & VF count */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); + hw->total_pfs = (cfg >> 32) & 0xFF; + hw->total_vfs = (cfg >> 20) & 0xFFF; + hw->max_vfs_per_pf = (cfg >> 40) & 0xFF; + + /* Init NPA LF's bitmap */ + block = &hw->block[BLKADDR_NPA]; + if (!block->implemented) + goto nix; + cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST); + block->lf.max = (cfg >> 16) & 0xFFF; + block->addr = BLKADDR_NPA; + block->type = BLKTYPE_NPA; + block->lfshift = 8; + block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG; + block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG; + block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG; + block->lfcfg_reg = NPA_PRIV_LFX_CFG; + block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG; + block->lfreset_reg = NPA_AF_LF_RST; + sprintf(block->name, "NPA"); + err = rvu_alloc_bitmap(&block->lf); + if (err) + return err; + +nix: + /* Init NIX LF's bitmap */ + block = &hw->block[BLKADDR_NIX0]; + if (!block->implemented) + goto sso; + cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2); + block->lf.max = cfg & 0xFFF; + block->addr = BLKADDR_NIX0; + block->type = BLKTYPE_NIX; + block->lfshift = 8; + block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG; + block->pf_lfcnt_reg = RVU_PRIV_PFX_NIX0_CFG; + block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIX0_CFG; + block->lfcfg_reg = NIX_PRIV_LFX_CFG; + block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG; + block->lfreset_reg = NIX_AF_LF_RST; + sprintf(block->name, "NIX"); + err = rvu_alloc_bitmap(&block->lf); + if (err) + return err; + +sso: + /* Init SSO group's bitmap */ + block = &hw->block[BLKADDR_SSO]; + if (!block->implemented) + goto ssow; + cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST); + block->lf.max = cfg & 0xFFFF; + block->addr = BLKADDR_SSO; + block->type = BLKTYPE_SSO; + block->multislot = true; + block->lfshift = 3; + block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG; + block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG; + block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG; + block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG; + block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG; + block->lfreset_reg = SSO_AF_LF_HWGRP_RST; + sprintf(block->name, "SSO GROUP"); + err = rvu_alloc_bitmap(&block->lf); + if (err) + return err; + +ssow: + /* Init SSO workslot's bitmap */ + block = &hw->block[BLKADDR_SSOW]; + if (!block->implemented) + goto tim; + block->lf.max = (cfg >> 56) & 0xFF; + block->addr = BLKADDR_SSOW; + block->type = BLKTYPE_SSOW; + block->multislot = true; + block->lfshift = 3; + block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG; + block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG; + block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG; + block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG; + block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG; + block->lfreset_reg = SSOW_AF_LF_HWS_RST; + sprintf(block->name, "SSOWS"); + err = rvu_alloc_bitmap(&block->lf); + if (err) + return err; + +tim: + /* Init TIM LF's bitmap */ + block = &hw->block[BLKADDR_TIM]; + if (!block->implemented) + goto cpt; + cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST); + block->lf.max = cfg & 0xFFFF; + block->addr = BLKADDR_TIM; + block->type = BLKTYPE_TIM; + block->multislot = true; + block->lfshift = 3; + block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG; + block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG; + block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG; + block->lfcfg_reg = TIM_PRIV_LFX_CFG; + block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG; + block->lfreset_reg = TIM_AF_LF_RST; + sprintf(block->name, "TIM"); + err = rvu_alloc_bitmap(&block->lf); + if (err) + return err; + +cpt: + /* Init CPT LF's bitmap */ + block = &hw->block[BLKADDR_CPT0]; + if (!block->implemented) + goto init; + cfg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS0); + block->lf.max = cfg & 0xFF; + block->addr = BLKADDR_CPT0; + block->type = BLKTYPE_CPT; + block->multislot = true; + block->lfshift = 3; + block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG; + block->pf_lfcnt_reg = RVU_PRIV_PFX_CPT0_CFG; + block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPT0_CFG; + block->lfcfg_reg = CPT_PRIV_LFX_CFG; + block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG; + block->lfreset_reg = CPT_AF_LF_RST; + sprintf(block->name, "CPT"); + err = rvu_alloc_bitmap(&block->lf); + if (err) + return err; + +init: + /* Allocate memory for PFVF data */ + rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs, + sizeof(struct rvu_pfvf), GFP_KERNEL); + if (!rvu->pf) + return -ENOMEM; + + rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs, + sizeof(struct rvu_pfvf), GFP_KERNEL); + if (!rvu->hwvf) + return -ENOMEM; + + mutex_init(&rvu->rsrc_lock); + + rvu_fwdata_init(rvu); + + err = rvu_setup_msix_resources(rvu); + if (err) + return err; + + for (blkid = 0; blkid < BLK_COUNT; blkid++) { + block = &hw->block[blkid]; + if (!block->lf.bmap) + continue; + + /* Allocate memory for block LF/slot to pcifunc mapping info */ + block->fn_map = devm_kcalloc(rvu->dev, block->lf.max, + sizeof(u16), GFP_KERNEL); + if (!block->fn_map) { + err = -ENOMEM; + goto msix_err; + } + + /* Scan all blocks to check if low level firmware has + * already provisioned any of the resources to a PF/VF. + */ + rvu_scan_block(rvu, block); + } + + err = rvu_npc_init(rvu); + if (err) + goto npc_err; + + err = rvu_cgx_init(rvu); + if (err) + goto cgx_err; + + /* Assign MACs for CGX mapped functions */ + rvu_setup_pfvf_macaddress(rvu); + + err = rvu_npa_init(rvu); + if (err) + goto npa_err; + + err = rvu_nix_init(rvu); + if (err) + goto nix_err; + + return 0; + +nix_err: + rvu_nix_freemem(rvu); +npa_err: + rvu_npa_freemem(rvu); +cgx_err: + rvu_cgx_exit(rvu); +npc_err: + rvu_npc_freemem(rvu); + rvu_fwdata_exit(rvu); +msix_err: + rvu_reset_msix(rvu); + return err; +} + +/* NPA and NIX admin queue APIs */ +void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq) +{ + if (!aq) + return; + + qmem_free(rvu->dev, aq->inst); + qmem_free(rvu->dev, aq->res); + devm_kfree(rvu->dev, aq); +} + +int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue, + int qsize, int inst_size, int res_size) +{ + struct admin_queue *aq; + int err; + + *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL); + if (!*ad_queue) + return -ENOMEM; + aq = *ad_queue; + + /* Alloc memory for instructions i.e AQ */ + err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size); + if (err) { + devm_kfree(rvu->dev, aq); + return err; + } + + /* Alloc memory for results */ + err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size); + if (err) { + rvu_aq_free(rvu, aq); + return err; + } + + spin_lock_init(&aq->lock); + return 0; +} + +int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req, + struct ready_msg_rsp *rsp) +{ + if (rvu->fwdata) { + rsp->rclk_freq = rvu->fwdata->rclk; + rsp->sclk_freq = rvu->fwdata->sclk; + } + return 0; +} + +/* Get current count of a RVU block's LF/slots + * provisioned to a given RVU func. + */ +u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr) +{ + switch (blkaddr) { + case BLKADDR_NPA: + return pfvf->npalf ? 1 : 0; + case BLKADDR_NIX0: + case BLKADDR_NIX1: + return pfvf->nixlf ? 1 : 0; + case BLKADDR_SSO: + return pfvf->sso; + case BLKADDR_SSOW: + return pfvf->ssow; + case BLKADDR_TIM: + return pfvf->timlfs; + case BLKADDR_CPT0: + return pfvf->cptlfs; + case BLKADDR_CPT1: + return pfvf->cpt1_lfs; + } + return 0; +} + +/* Return true if LFs of block type are attached to pcifunc */ +static bool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype) +{ + switch (blktype) { + case BLKTYPE_NPA: + return pfvf->npalf ? 1 : 0; + case BLKTYPE_NIX: + return pfvf->nixlf ? 1 : 0; + case BLKTYPE_SSO: + return !!pfvf->sso; + case BLKTYPE_SSOW: + return !!pfvf->ssow; + case BLKTYPE_TIM: + return !!pfvf->timlfs; + case BLKTYPE_CPT: + return pfvf->cptlfs || pfvf->cpt1_lfs; + } + + return false; +} + +bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype) +{ + struct rvu_pfvf *pfvf; + + if (!is_pf_func_valid(rvu, pcifunc)) + return false; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + + /* Check if this PFFUNC has a LF of type blktype attached */ + if (!is_blktype_attached(pfvf, blktype)) + return false; + + return true; +} + +static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block, + int pcifunc, int slot) +{ + u64 val; + + val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13); + rvu_write64(rvu, block->addr, block->lookup_reg, val); + /* Wait for the lookup to finish */ + /* TODO: put some timeout here */ + while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13)) + ; + + val = rvu_read64(rvu, block->addr, block->lookup_reg); + + /* Check LF valid bit */ + if (!(val & (1ULL << 12))) + return -1; + + return (val & 0xFFF); +} + +static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int slot, lf, num_lfs; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc); + if (blkaddr < 0) + return; + + block = &hw->block[blkaddr]; + + num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr); + if (!num_lfs) + return; + + for (slot = 0; slot < num_lfs; slot++) { + lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot); + if (lf < 0) /* This should never happen */ + continue; + + /* Disable the LF */ + rvu_write64(rvu, blkaddr, block->lfcfg_reg | + (lf << block->lfshift), 0x00ULL); + + /* Update SW maintained mapping info as well */ + rvu_update_rsrc_map(rvu, pfvf, block, + pcifunc, lf, false); + + /* Free the resource */ + rvu_free_rsrc(&block->lf, lf); + + /* Clear MSIX vector offset for this LF */ + rvu_clear_msix_offset(rvu, pfvf, block, lf); + } +} + +static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach, + u16 pcifunc) +{ + struct rvu_hwinfo *hw = rvu->hw; + bool detach_all = true; + struct rvu_block *block; + int blkid; + + mutex_lock(&rvu->rsrc_lock); + + /* Check for partial resource detach */ + if (detach && detach->partial) + detach_all = false; + + /* Check for RVU block's LFs attached to this func, + * if so, detach them. + */ + for (blkid = 0; blkid < BLK_COUNT; blkid++) { + block = &hw->block[blkid]; + if (!block->lf.bmap) + continue; + if (!detach_all && detach) { + if (blkid == BLKADDR_NPA && !detach->npalf) + continue; + else if ((blkid == BLKADDR_NIX0) && !detach->nixlf) + continue; + else if ((blkid == BLKADDR_SSO) && !detach->sso) + continue; + else if ((blkid == BLKADDR_SSOW) && !detach->ssow) + continue; + else if ((blkid == BLKADDR_TIM) && !detach->timlfs) + continue; + else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs) + continue; + } + rvu_detach_block(rvu, pcifunc, block->type); + } + + mutex_unlock(&rvu->rsrc_lock); + return 0; +} + +int rvu_mbox_handler_detach_resources(struct rvu *rvu, + struct rsrc_detach *detach, + struct msg_rsp *rsp) +{ + return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc); +} + +static int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + int blkaddr = BLKADDR_NIX0, vf; + struct rvu_pfvf *pf; + + /* All CGX mapped PFs are set with assigned NIX block during init */ + if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { + pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); + blkaddr = pf->nix_blkaddr; + } else if (is_afvf(pcifunc)) { + vf = pcifunc - 1; + /* Assign NIX based on VF number. All even numbered VFs get + * NIX0 and odd numbered gets NIX1 + */ + blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0; + /* NIX1 is not present on all silicons */ + if (!is_block_implemented(rvu->hw, BLKADDR_NIX1)) + blkaddr = BLKADDR_NIX0; + } + + switch (blkaddr) { + case BLKADDR_NIX1: + pfvf->nix_blkaddr = BLKADDR_NIX1; + break; + case BLKADDR_NIX0: + default: + pfvf->nix_blkaddr = BLKADDR_NIX0; + break; + } + + return pfvf->nix_blkaddr; +} + +static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc) +{ + int blkaddr; + + switch (blktype) { + case BLKTYPE_NIX: + blkaddr = rvu_get_nix_blkaddr(rvu, pcifunc); + break; + default: + return rvu_get_blkaddr(rvu, blktype, 0); + }; + + if (is_block_implemented(rvu->hw, blkaddr)) + return blkaddr; + + return -ENODEV; +} + +static void rvu_attach_block(struct rvu *rvu, int pcifunc, + int blktype, int num_lfs) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int slot, lf; + int blkaddr; + u64 cfg; + + if (!num_lfs) + return; + + blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc); + if (blkaddr < 0) + return; + + block = &hw->block[blkaddr]; + if (!block->lf.bmap) + return; + + for (slot = 0; slot < num_lfs; slot++) { + /* Allocate the resource */ + lf = rvu_alloc_rsrc(&block->lf); + if (lf < 0) + return; + + cfg = (1ULL << 63) | (pcifunc << 8) | slot; + rvu_write64(rvu, blkaddr, block->lfcfg_reg | + (lf << block->lfshift), cfg); + rvu_update_rsrc_map(rvu, pfvf, block, + pcifunc, lf, true); + + /* Set start MSIX vector for this LF within this PF/VF */ + rvu_set_msix_offset(rvu, pfvf, block, lf); + } +} + +static int rvu_check_rsrc_availability(struct rvu *rvu, + struct rsrc_attach *req, u16 pcifunc) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + int free_lfs, mappedlfs, blkaddr; + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + + /* Only one NPA LF can be attached */ + if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) { + block = &hw->block[BLKADDR_NPA]; + free_lfs = rvu_rsrc_free_count(&block->lf); + if (!free_lfs) + goto fail; + } else if (req->npalf) { + dev_err(&rvu->pdev->dev, + "Func 0x%x: Invalid req, already has NPA\n", + pcifunc); + return -EINVAL; + } + + /* Only one NIX LF can be attached */ + if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) { + blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return blkaddr; + block = &hw->block[blkaddr]; + free_lfs = rvu_rsrc_free_count(&block->lf); + if (!free_lfs) + goto fail; + } else if (req->nixlf) { + dev_err(&rvu->pdev->dev, + "Func 0x%x: Invalid req, already has NIX\n", + pcifunc); + return -EINVAL; + } + + if (req->sso) { + block = &hw->block[BLKADDR_SSO]; + /* Is request within limits ? */ + if (req->sso > block->lf.max) { + dev_err(&rvu->pdev->dev, + "Func 0x%x: Invalid SSO req, %d > max %d\n", + pcifunc, req->sso, block->lf.max); + return -EINVAL; + } + mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); + free_lfs = rvu_rsrc_free_count(&block->lf); + /* Check if additional resources are available */ + if (req->sso > mappedlfs && + ((req->sso - mappedlfs) > free_lfs)) + goto fail; + } + + if (req->ssow) { + block = &hw->block[BLKADDR_SSOW]; + if (req->ssow > block->lf.max) { + dev_err(&rvu->pdev->dev, + "Func 0x%x: Invalid SSOW req, %d > max %d\n", + pcifunc, req->sso, block->lf.max); + return -EINVAL; + } + mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); + free_lfs = rvu_rsrc_free_count(&block->lf); + if (req->ssow > mappedlfs && + ((req->ssow - mappedlfs) > free_lfs)) + goto fail; + } + + if (req->timlfs) { + block = &hw->block[BLKADDR_TIM]; + if (req->timlfs > block->lf.max) { + dev_err(&rvu->pdev->dev, + "Func 0x%x: Invalid TIMLF req, %d > max %d\n", + pcifunc, req->timlfs, block->lf.max); + return -EINVAL; + } + mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); + free_lfs = rvu_rsrc_free_count(&block->lf); + if (req->timlfs > mappedlfs && + ((req->timlfs - mappedlfs) > free_lfs)) + goto fail; + } + + if (req->cptlfs) { + block = &hw->block[BLKADDR_CPT0]; + if (req->cptlfs > block->lf.max) { + dev_err(&rvu->pdev->dev, + "Func 0x%x: Invalid CPTLF req, %d > max %d\n", + pcifunc, req->cptlfs, block->lf.max); + return -EINVAL; + } + mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); + free_lfs = rvu_rsrc_free_count(&block->lf); + if (req->cptlfs > mappedlfs && + ((req->cptlfs - mappedlfs) > free_lfs)) + goto fail; + } + + return 0; + +fail: + dev_info(rvu->dev, "Request for %s failed\n", block->name); + return -ENOSPC; +} + +int rvu_mbox_handler_attach_resources(struct rvu *rvu, + struct rsrc_attach *attach, + struct msg_rsp *rsp) +{ + u16 pcifunc = attach->hdr.pcifunc; + int err; + + /* If first request, detach all existing attached resources */ + if (!attach->modify) + rvu_detach_rsrcs(rvu, NULL, pcifunc); + + mutex_lock(&rvu->rsrc_lock); + + /* Check if the request can be accommodated */ + err = rvu_check_rsrc_availability(rvu, attach, pcifunc); + if (err) + goto exit; + + /* Now attach the requested resources */ + if (attach->npalf) + rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1); + + if (attach->nixlf) + rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1); + + if (attach->sso) { + /* RVU func doesn't know which exact LF or slot is attached + * to it, it always sees as slot 0,1,2. So for a 'modify' + * request, simply detach all existing attached LFs/slots + * and attach a fresh. + */ + if (attach->modify) + rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO); + rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO, attach->sso); + } + + if (attach->ssow) { + if (attach->modify) + rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW); + rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW, attach->ssow); + } + + if (attach->timlfs) { + if (attach->modify) + rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM); + rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM, attach->timlfs); + } + + if (attach->cptlfs) { + if (attach->modify) + rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT); + rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT, attach->cptlfs); + } + +exit: + mutex_unlock(&rvu->rsrc_lock); + return err; +} + +static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, + int blkaddr, int lf) +{ + u16 vec; + + if (lf < 0) + return MSIX_VECTOR_INVALID; + + for (vec = 0; vec < pfvf->msix.max; vec++) { + if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf)) + return vec; + } + return MSIX_VECTOR_INVALID; +} + +static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, + struct rvu_block *block, int lf) +{ + u16 nvecs, vec, offset; + u64 cfg; + + cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg | + (lf << block->lfshift)); + nvecs = (cfg >> 12) & 0xFF; + + /* Check and alloc MSIX vectors, must be contiguous */ + if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs)) + return; + + offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); + + /* Config MSIX offset in LF */ + rvu_write64(rvu, block->addr, block->msixcfg_reg | + (lf << block->lfshift), (cfg & ~0x7FFULL) | offset); + + /* Update the bitmap as well */ + for (vec = 0; vec < nvecs; vec++) + pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf); +} + +static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, + struct rvu_block *block, int lf) +{ + u16 nvecs, vec, offset; + u64 cfg; + + cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg | + (lf << block->lfshift)); + nvecs = (cfg >> 12) & 0xFF; + + /* Clear MSIX offset in LF */ + rvu_write64(rvu, block->addr, block->msixcfg_reg | + (lf << block->lfshift), cfg & ~0x7FFULL); + + offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf); + + /* Update the mapping */ + for (vec = 0; vec < nvecs; vec++) + pfvf->msix_lfmap[offset + vec] = 0; + + /* Free the same in MSIX bitmap */ + rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset); +} + +int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req, + struct msix_offset_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + struct rvu_pfvf *pfvf; + int lf, slot; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + if (!pfvf->msix.bmap) + return 0; + + /* Set MSIX offsets for each block's LFs attached to this PF/VF */ + lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0); + rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf); + + lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NIX0], pcifunc, 0); + rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NIX0, lf); + + rsp->sso = pfvf->sso; + for (slot = 0; slot < rsp->sso; slot++) { + lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot); + rsp->sso_msixoff[slot] = + rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf); + } + + rsp->ssow = pfvf->ssow; + for (slot = 0; slot < rsp->ssow; slot++) { + lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot); + rsp->ssow_msixoff[slot] = + rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf); + } + + rsp->timlfs = pfvf->timlfs; + for (slot = 0; slot < rsp->timlfs; slot++) { + lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot); + rsp->timlf_msixoff[slot] = + rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf); + } + + rsp->cptlfs = pfvf->cptlfs; + for (slot = 0; slot < rsp->cptlfs; slot++) { + lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot); + rsp->cptlf_msixoff[slot] = + rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf); + } + return 0; +} + +int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + u16 vf, numvfs; + u64 cfg; + + vf = pcifunc & RVU_PFVF_FUNC_MASK; + cfg = rvu_read64(rvu, BLKADDR_RVUM, + RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc))); + numvfs = (cfg >> 12) & 0xFF; + + if (vf && vf <= numvfs) + __rvu_flr_handler(rvu, pcifunc); + else + return RVU_INVALID_VF_ID; + + return 0; +} + +int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req, + struct get_hw_cap_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + + rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping; + rsp->nix_shaping = hw->cap.nix_shaping; + + return 0; +} + +static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid, + struct mbox_msghdr *req) +{ + struct rvu *rvu = pci_get_drvdata(mbox->pdev); + + /* Check if valid, if not reply with a invalid msg */ + if (req->sig != OTX2_MBOX_REQ_SIG) + goto bad_message; + + switch (req->id) { +#define M(_name, _id, _fn_name, _req_type, _rsp_type) \ + case _id: { \ + struct _rsp_type *rsp; \ + int err; \ + \ + rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \ + mbox, devid, \ + sizeof(struct _rsp_type)); \ + /* some handlers should complete even if reply */ \ + /* could not be allocated */ \ + if (!rsp && \ + _id != MBOX_MSG_DETACH_RESOURCES && \ + _id != MBOX_MSG_NIX_TXSCH_FREE && \ + _id != MBOX_MSG_VF_FLR) \ + return -ENOMEM; \ + if (rsp) { \ + rsp->hdr.id = _id; \ + rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \ + rsp->hdr.pcifunc = req->pcifunc; \ + rsp->hdr.rc = 0; \ + } \ + \ + err = rvu_mbox_handler_ ## _fn_name(rvu, \ + (struct _req_type *)req, \ + rsp); \ + if (rsp && err) \ + rsp->hdr.rc = err; \ + \ + trace_otx2_msg_process(mbox->pdev, _id, err); \ + return rsp ? err : -ENOMEM; \ + } +MBOX_MESSAGES +#undef M + +bad_message: + default: + otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id); + return -ENODEV; + } +} + +static void __rvu_mbox_handler(struct rvu_work *mwork, int type) +{ + struct rvu *rvu = mwork->rvu; + int offset, err, id, devid; + struct otx2_mbox_dev *mdev; + struct mbox_hdr *req_hdr; + struct mbox_msghdr *msg; + struct mbox_wq_info *mw; + struct otx2_mbox *mbox; + + switch (type) { + case TYPE_AFPF: + mw = &rvu->afpf_wq_info; + break; + case TYPE_AFVF: + mw = &rvu->afvf_wq_info; + break; + default: + return; + } + + devid = mwork - mw->mbox_wrk; + mbox = &mw->mbox; + mdev = &mbox->dev[devid]; + + /* Process received mbox messages */ + req_hdr = mdev->mbase + mbox->rx_start; + if (mw->mbox_wrk[devid].num_msgs == 0) + return; + + offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); + + for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) { + msg = mdev->mbase + offset; + + /* Set which PF/VF sent this message based on mbox IRQ */ + switch (type) { + case TYPE_AFPF: + msg->pcifunc &= + ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT); + msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT); + break; + case TYPE_AFVF: + msg->pcifunc &= + ~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT); + msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1; + break; + } + + err = rvu_process_mbox_msg(mbox, devid, msg); + if (!err) { + offset = mbox->rx_start + msg->next_msgoff; + continue; + } + + if (msg->pcifunc & RVU_PFVF_FUNC_MASK) + dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n", + err, otx2_mbox_id2name(msg->id), + msg->id, rvu_get_pf(msg->pcifunc), + (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1); + else + dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n", + err, otx2_mbox_id2name(msg->id), + msg->id, devid); + } + mw->mbox_wrk[devid].num_msgs = 0; + + /* Send mbox responses to VF/PF */ + otx2_mbox_msg_send(mbox, devid); +} + +static inline void rvu_afpf_mbox_handler(struct work_struct *work) +{ + struct rvu_work *mwork = container_of(work, struct rvu_work, work); + + __rvu_mbox_handler(mwork, TYPE_AFPF); +} + +static inline void rvu_afvf_mbox_handler(struct work_struct *work) +{ + struct rvu_work *mwork = container_of(work, struct rvu_work, work); + + __rvu_mbox_handler(mwork, TYPE_AFVF); +} + +static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type) +{ + struct rvu *rvu = mwork->rvu; + struct otx2_mbox_dev *mdev; + struct mbox_hdr *rsp_hdr; + struct mbox_msghdr *msg; + struct mbox_wq_info *mw; + struct otx2_mbox *mbox; + int offset, id, devid; + + switch (type) { + case TYPE_AFPF: + mw = &rvu->afpf_wq_info; + break; + case TYPE_AFVF: + mw = &rvu->afvf_wq_info; + break; + default: + return; + } + + devid = mwork - mw->mbox_wrk_up; + mbox = &mw->mbox_up; + mdev = &mbox->dev[devid]; + + rsp_hdr = mdev->mbase + mbox->rx_start; + if (mw->mbox_wrk_up[devid].up_num_msgs == 0) { + dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n"); + return; + } + + offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); + + for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) { + msg = mdev->mbase + offset; + + if (msg->id >= MBOX_MSG_MAX) { + dev_err(rvu->dev, + "Mbox msg with unknown ID 0x%x\n", msg->id); + goto end; + } + + if (msg->sig != OTX2_MBOX_RSP_SIG) { + dev_err(rvu->dev, + "Mbox msg with wrong signature %x, ID 0x%x\n", + msg->sig, msg->id); + goto end; + } + + switch (msg->id) { + case MBOX_MSG_CGX_LINK_EVENT: + break; + default: + if (msg->rc) + dev_err(rvu->dev, + "Mbox msg response has err %d, ID 0x%x\n", + msg->rc, msg->id); + break; + } +end: + offset = mbox->rx_start + msg->next_msgoff; + mdev->msgs_acked++; + } + mw->mbox_wrk_up[devid].up_num_msgs = 0; + + otx2_mbox_reset(mbox, devid); +} + +static inline void rvu_afpf_mbox_up_handler(struct work_struct *work) +{ + struct rvu_work *mwork = container_of(work, struct rvu_work, work); + + __rvu_mbox_up_handler(mwork, TYPE_AFPF); +} + +static inline void rvu_afvf_mbox_up_handler(struct work_struct *work) +{ + struct rvu_work *mwork = container_of(work, struct rvu_work, work); + + __rvu_mbox_up_handler(mwork, TYPE_AFVF); +} + +static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, + int type, int num, + void (mbox_handler)(struct work_struct *), + void (mbox_up_handler)(struct work_struct *)) +{ + void __iomem *hwbase = NULL, *reg_base; + int err, i, dir, dir_up; + struct rvu_work *mwork; + const char *name; + u64 bar4_addr; + + switch (type) { + case TYPE_AFPF: + name = "rvu_afpf_mailbox"; + bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR); + dir = MBOX_DIR_AFPF; + dir_up = MBOX_DIR_AFPF_UP; + reg_base = rvu->afreg_base; + break; + case TYPE_AFVF: + name = "rvu_afvf_mailbox"; + bar4_addr = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR); + dir = MBOX_DIR_PFVF; + dir_up = MBOX_DIR_PFVF_UP; + reg_base = rvu->pfreg_base; + break; + default: + return -EINVAL; + } + + mw->mbox_wq = alloc_workqueue(name, + WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, + num); + if (!mw->mbox_wq) + return -ENOMEM; + + mw->mbox_wrk = devm_kcalloc(rvu->dev, num, + sizeof(struct rvu_work), GFP_KERNEL); + if (!mw->mbox_wrk) { + err = -ENOMEM; + goto exit; + } + + mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num, + sizeof(struct rvu_work), GFP_KERNEL); + if (!mw->mbox_wrk_up) { + err = -ENOMEM; + goto exit; + } + + /* Mailbox is a reserved memory (in RAM) region shared between + * RVU devices, shouldn't be mapped as device memory to allow + * unaligned accesses. + */ + hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * num); + if (!hwbase) { + dev_err(rvu->dev, "Unable to map mailbox region\n"); + err = -ENOMEM; + goto exit; + } + + err = otx2_mbox_init(&mw->mbox, hwbase, rvu->pdev, reg_base, dir, num); + if (err) + goto exit; + + err = otx2_mbox_init(&mw->mbox_up, hwbase, rvu->pdev, + reg_base, dir_up, num); + if (err) + goto exit; + + for (i = 0; i < num; i++) { + mwork = &mw->mbox_wrk[i]; + mwork->rvu = rvu; + INIT_WORK(&mwork->work, mbox_handler); + + mwork = &mw->mbox_wrk_up[i]; + mwork->rvu = rvu; + INIT_WORK(&mwork->work, mbox_up_handler); + } + + return 0; +exit: + if (hwbase) + iounmap((void __iomem *)hwbase); + destroy_workqueue(mw->mbox_wq); + return err; +} + +static void rvu_mbox_destroy(struct mbox_wq_info *mw) +{ + if (mw->mbox_wq) { + flush_workqueue(mw->mbox_wq); + destroy_workqueue(mw->mbox_wq); + mw->mbox_wq = NULL; + } + + if (mw->mbox.hwbase) + iounmap((void __iomem *)mw->mbox.hwbase); + + otx2_mbox_destroy(&mw->mbox); + otx2_mbox_destroy(&mw->mbox_up); +} + +static void rvu_queue_work(struct mbox_wq_info *mw, int first, + int mdevs, u64 intr) +{ + struct otx2_mbox_dev *mdev; + struct otx2_mbox *mbox; + struct mbox_hdr *hdr; + int i; + + for (i = first; i < mdevs; i++) { + /* start from 0 */ + if (!(intr & BIT_ULL(i - first))) + continue; + + mbox = &mw->mbox; + mdev = &mbox->dev[i]; + hdr = mdev->mbase + mbox->rx_start; + + /*The hdr->num_msgs is set to zero immediately in the interrupt + * handler to ensure that it holds a correct value next time + * when the interrupt handler is called. + * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler + * pf>mbox.up_num_msgs holds the data for use in + * pfaf_mbox_up_handler. + */ + + if (hdr->num_msgs) { + mw->mbox_wrk[i].num_msgs = hdr->num_msgs; + hdr->num_msgs = 0; + queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work); + } + mbox = &mw->mbox_up; + mdev = &mbox->dev[i]; + hdr = mdev->mbase + mbox->rx_start; + if (hdr->num_msgs) { + mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs; + hdr->num_msgs = 0; + queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work); + } + } +} + +static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) +{ + struct rvu *rvu = (struct rvu *)rvu_irq; + int vfs = rvu->vfs; + u64 intr; + + intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT); + /* Clear interrupts */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr); + if (intr) + trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr); + + /* Sync with mbox memory region */ + rmb(); + + rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr); + + /* Handle VF interrupts */ + if (vfs > 64) { + intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1)); + rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr); + + rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr); + vfs -= 64; + } + + intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0)); + rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr); + if (intr) + trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr); + + rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr); + + return IRQ_HANDLED; +} + +static void rvu_enable_mbox_intr(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + + /* Clear spurious irqs, if any */ + rvu_write64(rvu, BLKADDR_RVUM, + RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs)); + + /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S, + INTR_MASK(hw->total_pfs) & ~1ULL); +} + +static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr) +{ + struct rvu_block *block; + int slot, lf, num_lfs; + int err; + + block = &rvu->hw->block[blkaddr]; + num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc), + block->addr); + if (!num_lfs) + return; + for (slot = 0; slot < num_lfs; slot++) { + lf = rvu_get_lf(rvu, block, pcifunc, slot); + if (lf < 0) + continue; + + /* Cleanup LF and reset it */ + if (block->addr == BLKADDR_NIX0) + rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf); + else if (block->addr == BLKADDR_NPA) + rvu_npa_lf_teardown(rvu, pcifunc, lf); + + err = rvu_lf_reset(rvu, block, lf); + if (err) { + dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n", + block->addr, lf); + } + } +} + +static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc) +{ + mutex_lock(&rvu->flr_lock); + /* Reset order should reflect inter-block dependencies: + * 1. Reset any packet/work sources (NIX, CPT, TIM) + * 2. Flush and reset SSO/SSOW + * 3. Cleanup pools (NPA) + */ + rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0); + rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0); + rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM); + rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW); + rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO); + rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA); + rvu_detach_rsrcs(rvu, NULL, pcifunc); + mutex_unlock(&rvu->flr_lock); +} + +static void rvu_afvf_flr_handler(struct rvu *rvu, int vf) +{ + int reg = 0; + + /* pcifunc = 0(PF0) | (vf + 1) */ + __rvu_flr_handler(rvu, vf + 1); + + if (vf >= 64) { + reg = 1; + vf = vf - 64; + } + + /* Signal FLR finish and enable IRQ */ + rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf)); +} + +static void rvu_flr_handler(struct work_struct *work) +{ + struct rvu_work *flrwork = container_of(work, struct rvu_work, work); + struct rvu *rvu = flrwork->rvu; + u16 pcifunc, numvfs, vf; + u64 cfg; + int pf; + + pf = flrwork - rvu->flr_wrk; + if (pf >= rvu->hw->total_pfs) { + rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs); + return; + } + + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); + numvfs = (cfg >> 12) & 0xFF; + pcifunc = pf << RVU_PFVF_PF_SHIFT; + + for (vf = 0; vf < numvfs; vf++) + __rvu_flr_handler(rvu, (pcifunc | (vf + 1))); + + __rvu_flr_handler(rvu, pcifunc); + + /* Signal FLR finish */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf)); + + /* Enable interrupt */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, BIT_ULL(pf)); +} + +static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs) +{ + int dev, vf, reg = 0; + u64 intr; + + if (start_vf >= 64) + reg = 1; + + intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg)); + if (!intr) + return; + + for (vf = 0; vf < numvfs; vf++) { + if (!(intr & BIT_ULL(vf))) + continue; + dev = vf + start_vf + rvu->hw->total_pfs; + queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work); + /* Clear and disable the interrupt */ + rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf)); + } +} + +static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq) +{ + struct rvu *rvu = (struct rvu *)rvu_irq; + u64 intr; + u8 pf; + + intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT); + if (!intr) + goto afvf_flr; + + for (pf = 0; pf < rvu->hw->total_pfs; pf++) { + if (intr & (1ULL << pf)) { + /* PF is already dead do only AF related operations */ + queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work); + /* clear interrupt */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT, + BIT_ULL(pf)); + /* Disable the interrupt */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C, + BIT_ULL(pf)); + } + } + +afvf_flr: + rvu_afvf_queue_flr_work(rvu, 0, 64); + if (rvu->vfs > 64) + rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64); + + return IRQ_HANDLED; +} + +static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr) +{ + int vf; + + /* Nothing to be done here other than clearing the + * TRPEND bit. + */ + for (vf = 0; vf < 64; vf++) { + if (intr & (1ULL << vf)) { + /* clear the trpend due to ME(master enable) */ + rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf)); + /* clear interrupt */ + rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf)); + } + } +} + +/* Handles ME interrupts from VFs of AF */ +static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq) +{ + struct rvu *rvu = (struct rvu *)rvu_irq; + int vfset; + u64 intr; + + intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT); + + for (vfset = 0; vfset <= 1; vfset++) { + intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset)); + if (intr) + rvu_me_handle_vfset(rvu, vfset, intr); + } + + return IRQ_HANDLED; +} + +/* Handles ME interrupts from PFs */ +static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq) +{ + struct rvu *rvu = (struct rvu *)rvu_irq; + u64 intr; + u8 pf; + + intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT); + + /* Nothing to be done here other than clearing the + * TRPEND bit. + */ + for (pf = 0; pf < rvu->hw->total_pfs; pf++) { + if (intr & (1ULL << pf)) { + /* clear the trpend due to ME(master enable) */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, + BIT_ULL(pf)); + /* clear interrupt */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT, + BIT_ULL(pf)); + } + } + + return IRQ_HANDLED; +} + +static void rvu_unregister_interrupts(struct rvu *rvu) +{ + int irq; + + /* Disable the Mbox interrupt */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C, + INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + + /* Disable the PF FLR interrupt */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C, + INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + + /* Disable the PF ME interrupt */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C, + INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + + for (irq = 0; irq < rvu->num_vec; irq++) { + if (rvu->irq_allocated[irq]) { + free_irq(pci_irq_vector(rvu->pdev, irq), rvu); + rvu->irq_allocated[irq] = false; + } + } + + pci_free_irq_vectors(rvu->pdev); + rvu->num_vec = 0; +} + +static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu) +{ + struct rvu_pfvf *pfvf = &rvu->pf[0]; + int offset; + + pfvf = &rvu->pf[0]; + offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff; + + /* Make sure there are enough MSIX vectors configured so that + * VF interrupts can be handled. Offset equal to zero means + * that PF vectors are not configured and overlapping AF vectors. + */ + return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) && + offset; +} + +static int rvu_register_interrupts(struct rvu *rvu) +{ + int ret, offset, pf_vec_start; + + rvu->num_vec = pci_msix_vec_count(rvu->pdev); + + rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec, + NAME_SIZE, GFP_KERNEL); + if (!rvu->irq_name) + return -ENOMEM; + + rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec, + sizeof(bool), GFP_KERNEL); + if (!rvu->irq_allocated) + return -ENOMEM; + + /* Enable MSI-X */ + ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec, + rvu->num_vec, PCI_IRQ_MSIX); + if (ret < 0) { + dev_err(rvu->dev, + "RVUAF: Request for %d msix vectors failed, ret %d\n", + rvu->num_vec, ret); + return ret; + } + + /* Register mailbox interrupt handler */ + sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox"); + ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX), + rvu_mbox_intr_handler, 0, + &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for mbox irq\n"); + goto fail; + } + + rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true; + + /* Enable mailbox interrupts from all PFs */ + rvu_enable_mbox_intr(rvu); + + /* Register FLR interrupt handler */ + sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE], + "RVUAF FLR"); + ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR), + rvu_flr_intr_handler, 0, + &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE], + rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for FLR\n"); + goto fail; + } + rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true; + + /* Enable FLR interrupt for all PFs*/ + rvu_write64(rvu, BLKADDR_RVUM, + RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs)); + + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, + INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + + /* Register ME interrupt handler */ + sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE], + "RVUAF ME"); + ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME), + rvu_me_pf_intr_handler, 0, + &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE], + rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for ME\n"); + } + rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true; + + /* Clear TRPEND bit for all PF */ + rvu_write64(rvu, BLKADDR_RVUM, + RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs)); + /* Enable ME interrupt for all PFs*/ + rvu_write64(rvu, BLKADDR_RVUM, + RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs)); + + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S, + INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + + if (!rvu_afvf_msix_vectors_num_ok(rvu)) + return 0; + + /* Get PF MSIX vectors offset. */ + pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM, + RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff; + + /* Register MBOX0 interrupt. */ + offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0; + sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0"); + ret = request_irq(pci_irq_vector(rvu->pdev, offset), + rvu_mbox_intr_handler, 0, + &rvu->irq_name[offset * NAME_SIZE], + rvu); + if (ret) + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for Mbox0\n"); + + rvu->irq_allocated[offset] = true; + + /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so + * simply increment current offset by 1. + */ + offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1; + sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1"); + ret = request_irq(pci_irq_vector(rvu->pdev, offset), + rvu_mbox_intr_handler, 0, + &rvu->irq_name[offset * NAME_SIZE], + rvu); + if (ret) + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for Mbox1\n"); + + rvu->irq_allocated[offset] = true; + + /* Register FLR interrupt handler for AF's VFs */ + offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0; + sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0"); + ret = request_irq(pci_irq_vector(rvu->pdev, offset), + rvu_flr_intr_handler, 0, + &rvu->irq_name[offset * NAME_SIZE], rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for RVUAFVF FLR0\n"); + goto fail; + } + rvu->irq_allocated[offset] = true; + + offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1; + sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1"); + ret = request_irq(pci_irq_vector(rvu->pdev, offset), + rvu_flr_intr_handler, 0, + &rvu->irq_name[offset * NAME_SIZE], rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for RVUAFVF FLR1\n"); + goto fail; + } + rvu->irq_allocated[offset] = true; + + /* Register ME interrupt handler for AF's VFs */ + offset = pf_vec_start + RVU_PF_INT_VEC_VFME0; + sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0"); + ret = request_irq(pci_irq_vector(rvu->pdev, offset), + rvu_me_vf_intr_handler, 0, + &rvu->irq_name[offset * NAME_SIZE], rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for RVUAFVF ME0\n"); + goto fail; + } + rvu->irq_allocated[offset] = true; + + offset = pf_vec_start + RVU_PF_INT_VEC_VFME1; + sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1"); + ret = request_irq(pci_irq_vector(rvu->pdev, offset), + rvu_me_vf_intr_handler, 0, + &rvu->irq_name[offset * NAME_SIZE], rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for RVUAFVF ME1\n"); + goto fail; + } + rvu->irq_allocated[offset] = true; + return 0; + +fail: + rvu_unregister_interrupts(rvu); + return ret; +} + +static void rvu_flr_wq_destroy(struct rvu *rvu) +{ + if (rvu->flr_wq) { + flush_workqueue(rvu->flr_wq); + destroy_workqueue(rvu->flr_wq); + rvu->flr_wq = NULL; + } +} + +static int rvu_flr_init(struct rvu *rvu) +{ + int dev, num_devs; + u64 cfg; + int pf; + + /* Enable FLR for all PFs*/ + for (pf = 0; pf < rvu->hw->total_pfs; pf++) { + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); + rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf), + cfg | BIT_ULL(22)); + } + + rvu->flr_wq = alloc_workqueue("rvu_afpf_flr", + WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, + 1); + if (!rvu->flr_wq) + return -ENOMEM; + + num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev); + rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs, + sizeof(struct rvu_work), GFP_KERNEL); + if (!rvu->flr_wrk) { + destroy_workqueue(rvu->flr_wq); + return -ENOMEM; + } + + for (dev = 0; dev < num_devs; dev++) { + rvu->flr_wrk[dev].rvu = rvu; + INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler); + } + + mutex_init(&rvu->flr_lock); + + return 0; +} + +static void rvu_disable_afvf_intr(struct rvu *rvu) +{ + int vfs = rvu->vfs; + + rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs)); + if (vfs <= 64) + return; + + rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), + INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); +} + +static void rvu_enable_afvf_intr(struct rvu *rvu) +{ + int vfs = rvu->vfs; + + /* Clear any pending interrupts and enable AF VF interrupts for + * the first 64 VFs. + */ + /* Mbox */ + rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs)); + + /* FLR */ + rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs)); + + /* Same for remaining VFs, if any. */ + if (vfs <= 64) + return; + + rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1), + INTR_MASK(vfs - 64)); + + rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); +} + +#define PCI_DEVID_OCTEONTX2_LBK 0xA061 + +static int lbk_get_num_chans(void) +{ + struct pci_dev *pdev; + void __iomem *base; + int ret = -EIO; + + pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK, + NULL); + if (!pdev) + goto err; + + base = pci_ioremap_bar(pdev, 0); + if (!base) + goto err_put; + + /* Read number of available LBK channels from LBK(0)_CONST register. */ + ret = (readq(base + 0x10) >> 32) & 0xffff; + iounmap(base); +err_put: + pci_dev_put(pdev); +err: + return ret; +} + +static int rvu_enable_sriov(struct rvu *rvu) +{ + struct pci_dev *pdev = rvu->pdev; + int err, chans, vfs; + + if (!rvu_afvf_msix_vectors_num_ok(rvu)) { + dev_warn(&pdev->dev, + "Skipping SRIOV enablement since not enough IRQs are available\n"); + return 0; + } + + chans = lbk_get_num_chans(); + if (chans < 0) + return chans; + + vfs = pci_sriov_get_totalvfs(pdev); + + /* Limit VFs in case we have more VFs than LBK channels available. */ + if (vfs > chans) + vfs = chans; + + if (!vfs) + return 0; + + /* Save VFs number for reference in VF interrupts handlers. + * Since interrupts might start arriving during SRIOV enablement + * ordinary API cannot be used to get number of enabled VFs. + */ + rvu->vfs = vfs; + + err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs, + rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler); + if (err) + return err; + + rvu_enable_afvf_intr(rvu); + /* Make sure IRQs are enabled before SRIOV. */ + mb(); + + err = pci_enable_sriov(pdev, vfs); + if (err) { + rvu_disable_afvf_intr(rvu); + rvu_mbox_destroy(&rvu->afvf_wq_info); + return err; + } + + return 0; +} + +static void rvu_disable_sriov(struct rvu *rvu) +{ + rvu_disable_afvf_intr(rvu); + rvu_mbox_destroy(&rvu->afvf_wq_info); + pci_disable_sriov(rvu->pdev); +} + +static void rvu_update_module_params(struct rvu *rvu) +{ + const char *default_pfl_name = "default"; + + strscpy(rvu->mkex_pfl_name, + mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN); +} + +static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct rvu *rvu; + int err; + + rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL); + if (!rvu) + return -ENOMEM; + + rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL); + if (!rvu->hw) { + devm_kfree(dev, rvu); + return -ENOMEM; + } + + pci_set_drvdata(pdev, rvu); + rvu->pdev = pdev; + rvu->dev = &pdev->dev; + + err = pci_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device\n"); + goto err_freemem; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(dev, "PCI request regions failed 0x%x\n", err); + goto err_disable_device; + } + + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); + if (err) { + dev_err(dev, "DMA mask config failed, abort\n"); + goto err_release_regions; + } + + pci_set_master(pdev); + + rvu->ptp = ptp_get(); + if (IS_ERR(rvu->ptp)) { + err = PTR_ERR(rvu->ptp); + if (err == -EPROBE_DEFER) + goto err_release_regions; + rvu->ptp = NULL; + } + + /* Map Admin function CSRs */ + rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0); + rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0); + if (!rvu->afreg_base || !rvu->pfreg_base) { + dev_err(dev, "Unable to map admin function CSRs, aborting\n"); + err = -ENOMEM; + goto err_put_ptp; + } + + /* Store module params in rvu structure */ + rvu_update_module_params(rvu); + + /* Check which blocks the HW supports */ + rvu_check_block_implemented(rvu); + + rvu_reset_all_blocks(rvu); + + rvu_setup_hw_capabilities(rvu); + + err = rvu_setup_hw_resources(rvu); + if (err) + goto err_put_ptp; + + /* Init mailbox btw AF and PFs */ + err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF, + rvu->hw->total_pfs, rvu_afpf_mbox_handler, + rvu_afpf_mbox_up_handler); + if (err) + goto err_hwsetup; + + err = rvu_flr_init(rvu); + if (err) + goto err_mbox; + + err = rvu_register_interrupts(rvu); + if (err) + goto err_flr; + + rvu_setup_rvum_blk_revid(rvu); + + /* Enable AF's VFs (if any) */ + err = rvu_enable_sriov(rvu); + if (err) + goto err_irq; + + /* Initialize debugfs */ + rvu_dbg_init(rvu); + + return 0; +err_irq: + rvu_unregister_interrupts(rvu); +err_flr: + rvu_flr_wq_destroy(rvu); +err_mbox: + rvu_mbox_destroy(&rvu->afpf_wq_info); +err_hwsetup: + rvu_cgx_exit(rvu); + rvu_fwdata_exit(rvu); + rvu_reset_all_blocks(rvu); + rvu_free_hw_resources(rvu); + rvu_clear_rvum_blk_revid(rvu); +err_put_ptp: + ptp_put(rvu->ptp); +err_release_regions: + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); +err_freemem: + pci_set_drvdata(pdev, NULL); + devm_kfree(&pdev->dev, rvu->hw); + devm_kfree(dev, rvu); + return err; +} + +static void rvu_remove(struct pci_dev *pdev) +{ + struct rvu *rvu = pci_get_drvdata(pdev); + + rvu_dbg_exit(rvu); + rvu_unregister_interrupts(rvu); + rvu_flr_wq_destroy(rvu); + rvu_cgx_exit(rvu); + rvu_fwdata_exit(rvu); + rvu_mbox_destroy(&rvu->afpf_wq_info); + rvu_disable_sriov(rvu); + rvu_reset_all_blocks(rvu); + rvu_free_hw_resources(rvu); + rvu_clear_rvum_blk_revid(rvu); + ptp_put(rvu->ptp); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + devm_kfree(&pdev->dev, rvu->hw); + devm_kfree(&pdev->dev, rvu); +} + +static struct pci_driver rvu_driver = { + .name = DRV_NAME, + .id_table = rvu_id_table, + .probe = rvu_probe, + .remove = rvu_remove, +}; + +static int __init rvu_init_module(void) +{ + int err; + + pr_info("%s: %s\n", DRV_NAME, DRV_STRING); + + err = pci_register_driver(&cgx_driver); + if (err < 0) + return err; + + err = pci_register_driver(&ptp_driver); + if (err < 0) + goto ptp_err; + + err = pci_register_driver(&rvu_driver); + if (err < 0) + goto rvu_err; + + return 0; +rvu_err: + pci_unregister_driver(&ptp_driver); +ptp_err: + pci_unregister_driver(&cgx_driver); + + return err; +} + +static void __exit rvu_cleanup_module(void) +{ + pci_unregister_driver(&rvu_driver); + pci_unregister_driver(&ptp_driver); + pci_unregister_driver(&cgx_driver); +} + +module_init(rvu_init_module); +module_exit(rvu_cleanup_module); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h new file mode 100644 index 000000000..ec9a291e8 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -0,0 +1,539 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef RVU_H +#define RVU_H + +#include <linux/pci.h> +#include "rvu_struct.h" +#include "common.h" +#include "mbox.h" + +/* PCI device IDs */ +#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065 + +/* Subsystem Device ID */ +#define PCI_SUBSYS_DEVID_98XX 0xB100 +#define PCI_SUBSYS_DEVID_96XX 0xB200 + +/* PCI BAR nos */ +#define PCI_AF_REG_BAR_NUM 0 +#define PCI_PF_REG_BAR_NUM 2 +#define PCI_MBOX_BAR_NUM 4 + +#define NAME_SIZE 32 + +/* PF_FUNC */ +#define RVU_PFVF_PF_SHIFT 10 +#define RVU_PFVF_PF_MASK 0x3F +#define RVU_PFVF_FUNC_SHIFT 0 +#define RVU_PFVF_FUNC_MASK 0x3FF + +#ifdef CONFIG_DEBUG_FS +struct dump_ctx { + int lf; + int id; + bool all; +}; + +struct rvu_debugfs { + struct dentry *root; + struct dentry *cgx_root; + struct dentry *cgx; + struct dentry *lmac; + struct dentry *npa; + struct dentry *nix; + struct dentry *npc; + struct dump_ctx npa_aura_ctx; + struct dump_ctx npa_pool_ctx; + struct dump_ctx nix_cq_ctx; + struct dump_ctx nix_rq_ctx; + struct dump_ctx nix_sq_ctx; + int npa_qsize_id; + int nix_qsize_id; +}; +#endif + +struct rvu_work { + struct work_struct work; + struct rvu *rvu; + int num_msgs; + int up_num_msgs; +}; + +struct rsrc_bmap { + unsigned long *bmap; /* Pointer to resource bitmap */ + u16 max; /* Max resource id or count */ +}; + +struct rvu_block { + struct rsrc_bmap lf; + struct admin_queue *aq; /* NIX/NPA AQ */ + u16 *fn_map; /* LF to pcifunc mapping */ + bool multislot; + bool implemented; + u8 addr; /* RVU_BLOCK_ADDR_E */ + u8 type; /* RVU_BLOCK_TYPE_E */ + u8 lfshift; + u64 lookup_reg; + u64 pf_lfcnt_reg; + u64 vf_lfcnt_reg; + u64 lfcfg_reg; + u64 msixcfg_reg; + u64 lfreset_reg; + unsigned char name[NAME_SIZE]; +}; + +struct nix_mcast { + struct qmem *mce_ctx; + struct qmem *mcast_buf; + int replay_pkind; + int next_free_mce; + struct mutex mce_lock; /* Serialize MCE updates */ +}; + +struct nix_mce_list { + struct hlist_head head; + int count; + int max; +}; + +struct npc_mcam { + struct rsrc_bmap counters; + struct mutex lock; /* MCAM entries and counters update lock */ + unsigned long *bmap; /* bitmap, 0 => bmap_entries */ + unsigned long *bmap_reverse; /* Reverse bitmap, bmap_entries => 0 */ + u16 bmap_entries; /* Number of unreserved MCAM entries */ + u16 bmap_fcnt; /* MCAM entries free count */ + u16 *entry2pfvf_map; + u16 *entry2cntr_map; + u16 *cntr2pfvf_map; + u16 *cntr_refcnt; + u8 keysize; /* MCAM keysize 112/224/448 bits */ + u8 banks; /* Number of MCAM banks */ + u8 banks_per_entry;/* Number of keywords in key */ + u16 banksize; /* Number of MCAM entries in each bank */ + u16 total_entries; /* Total number of MCAM entries */ + u16 nixlf_offset; /* Offset of nixlf rsvd uncast entries */ + u16 pf_offset; /* Offset of PF's rsvd bcast, promisc entries */ + u16 lprio_count; + u16 lprio_start; + u16 hprio_count; + u16 hprio_end; + u16 rx_miss_act_cntr; /* Counter for RX MISS action */ +}; + +/* Structure for per RVU func info ie PF/VF */ +struct rvu_pfvf { + bool npalf; /* Only one NPALF per RVU_FUNC */ + bool nixlf; /* Only one NIXLF per RVU_FUNC */ + u16 sso; + u16 ssow; + u16 cptlfs; + u16 timlfs; + u16 cpt1_lfs; + u8 cgx_lmac; + + /* Block LF's MSIX vector info */ + struct rsrc_bmap msix; /* Bitmap for MSIX vector alloc */ +#define MSIX_BLKLF(blkaddr, lf) (((blkaddr) << 8) | ((lf) & 0xFF)) + u16 *msix_lfmap; /* Vector to block LF mapping */ + + /* NPA contexts */ + struct qmem *aura_ctx; + struct qmem *pool_ctx; + struct qmem *npa_qints_ctx; + unsigned long *aura_bmap; + unsigned long *pool_bmap; + + /* NIX contexts */ + struct qmem *rq_ctx; + struct qmem *sq_ctx; + struct qmem *cq_ctx; + struct qmem *rss_ctx; + struct qmem *cq_ints_ctx; + struct qmem *nix_qints_ctx; + unsigned long *sq_bmap; + unsigned long *rq_bmap; + unsigned long *cq_bmap; + + u16 rx_chan_base; + u16 tx_chan_base; + u8 rx_chan_cnt; /* total number of RX channels */ + u8 tx_chan_cnt; /* total number of TX channels */ + u16 maxlen; + u16 minlen; + + u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */ + + /* Broadcast pkt replication info */ + u16 bcast_mce_idx; + struct nix_mce_list bcast_mce_list; + + /* VLAN offload */ + struct mcam_entry entry; + int rxvlan_index; + bool rxvlan; + + bool cgx_in_use; /* this PF/VF using CGX? */ + int cgx_users; /* number of cgx users - used only by PFs */ + + u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */ +}; + +struct nix_txsch { + struct rsrc_bmap schq; + u8 lvl; +#define NIX_TXSCHQ_FREE BIT_ULL(1) +#define NIX_TXSCHQ_CFG_DONE BIT_ULL(0) +#define TXSCH_MAP_FUNC(__pfvf_map) ((__pfvf_map) & 0xFFFF) +#define TXSCH_MAP_FLAGS(__pfvf_map) ((__pfvf_map) >> 16) +#define TXSCH_MAP(__func, __flags) (((__func) & 0xFFFF) | ((__flags) << 16)) +#define TXSCH_SET_FLAG(__pfvf_map, flag) ((__pfvf_map) | ((flag) << 16)) + u32 *pfvf_map; +}; + +struct nix_mark_format { + u8 total; + u8 in_use; + u32 *cfg; +}; + +struct npc_pkind { + struct rsrc_bmap rsrc; + u32 *pfchan_map; +}; + +struct nix_flowkey { +#define NIX_FLOW_KEY_ALG_MAX 32 + u32 flowkey[NIX_FLOW_KEY_ALG_MAX]; + int in_use; +}; + +struct nix_lso { + u8 total; + u8 in_use; +}; + +struct nix_hw { + struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */ + struct nix_mcast mcast; + struct nix_flowkey flowkey; + struct nix_mark_format mark_format; + struct nix_lso lso; +}; + +/* RVU block's capabilities or functionality, + * which vary by silicon version/skew. + */ +struct hw_cap { + /* Transmit side supported functionality */ + u8 nix_tx_aggr_lvl; /* Tx link's traffic aggregation level */ + u16 nix_txsch_per_cgx_lmac; /* Max Q's transmitting to CGX LMAC */ + u16 nix_txsch_per_lbk_lmac; /* Max Q's transmitting to LBK LMAC */ + u16 nix_txsch_per_sdp_lmac; /* Max Q's transmitting to SDP LMAC */ + bool nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */ + bool nix_shaping; /* Is shaping and coloring supported */ + bool nix_tx_link_bp; /* Can link backpressure TL queues ? */ + bool nix_rx_multicast; /* Rx packet replication support */ +}; + +struct rvu_hwinfo { + u8 total_pfs; /* MAX RVU PFs HW supports */ + u16 total_vfs; /* Max RVU VFs HW supports */ + u16 max_vfs_per_pf; /* Max VFs that can be attached to a PF */ + u8 cgx; + u8 lmac_per_cgx; + u8 cgx_links; + u8 lbk_links; + u8 sdp_links; + u8 npc_kpus; /* No of parser units */ + + struct hw_cap cap; + struct rvu_block block[BLK_COUNT]; /* Block info */ + struct nix_hw *nix0; + struct npc_pkind pkind; + struct npc_mcam mcam; +}; + +struct mbox_wq_info { + struct otx2_mbox mbox; + struct rvu_work *mbox_wrk; + + struct otx2_mbox mbox_up; + struct rvu_work *mbox_wrk_up; + + struct workqueue_struct *mbox_wq; +}; + +struct rvu_fwdata { +#define RVU_FWDATA_HEADER_MAGIC 0xCFDA /* Custom Firmware Data*/ +#define RVU_FWDATA_VERSION 0x0001 + u32 header_magic; + u32 version; /* version id */ + + /* MAC address */ +#define PF_MACNUM_MAX 32 +#define VF_MACNUM_MAX 256 + u64 pf_macs[PF_MACNUM_MAX]; + u64 vf_macs[VF_MACNUM_MAX]; + u64 sclk; + u64 rclk; + u64 mcam_addr; + u64 mcam_sz; + u64 msixtr_base; +#define FWDATA_RESERVED_MEM 1023 + u64 reserved[FWDATA_RESERVED_MEM]; +}; + +struct ptp; + +/* KPU profile adapter structure gathering all KPU configuration data and abstracting out the + * source where it came from. + */ +struct npc_kpu_profile_adapter { + const char *name; + u64 version; + const struct npc_lt_def_cfg *lt_def; + const struct npc_kpu_profile_action *ikpu; /* array[pkinds] */ + const struct npc_kpu_profile *kpu; /* array[kpus] */ + const struct npc_mcam_kex *mkex; + size_t pkinds; + size_t kpus; +}; + +struct rvu { + void __iomem *afreg_base; + void __iomem *pfreg_base; + struct pci_dev *pdev; + struct device *dev; + struct rvu_hwinfo *hw; + struct rvu_pfvf *pf; + struct rvu_pfvf *hwvf; + struct mutex rsrc_lock; /* Serialize resource alloc/free */ + int vfs; /* Number of VFs attached to RVU */ + + /* Mbox */ + struct mbox_wq_info afpf_wq_info; + struct mbox_wq_info afvf_wq_info; + + /* PF FLR */ + struct rvu_work *flr_wrk; + struct workqueue_struct *flr_wq; + struct mutex flr_lock; /* Serialize FLRs */ + + /* MSI-X */ + u16 num_vec; + char *irq_name; + bool *irq_allocated; + dma_addr_t msix_base_iova; + u64 msixtr_base_phy; /* Register reset value */ + + /* CGX */ +#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */ + u8 cgx_mapped_pfs; + u8 cgx_cnt_max; /* CGX port count max */ + u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */ + u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for + * every cgx lmac port + */ + unsigned long pf_notify_bmap; /* Flags for PF notification */ + void **cgx_idmap; /* cgx id to cgx data map table */ + struct work_struct cgx_evh_work; + struct workqueue_struct *cgx_evh_wq; + spinlock_t cgx_evq_lock; /* cgx event queue lock */ + struct list_head cgx_evq_head; /* cgx event queue head */ + struct mutex cgx_cfg_lock; /* serialize cgx configuration */ + + char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */ + + /* Firmware data */ + struct rvu_fwdata *fwdata; + + /* NPC KPU data */ + struct npc_kpu_profile_adapter kpu; + + struct ptp *ptp; + +#ifdef CONFIG_DEBUG_FS + struct rvu_debugfs rvu_dbg; +#endif +}; + +static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) +{ + writeq(val, rvu->afreg_base + ((block << 28) | offset)); +} + +static inline u64 rvu_read64(struct rvu *rvu, u64 block, u64 offset) +{ + return readq(rvu->afreg_base + ((block << 28) | offset)); +} + +static inline void rvupf_write64(struct rvu *rvu, u64 offset, u64 val) +{ + writeq(val, rvu->pfreg_base + offset); +} + +static inline u64 rvupf_read64(struct rvu *rvu, u64 offset) +{ + return readq(rvu->pfreg_base + offset); +} + +/* Silicon revisions */ +static inline bool is_rvu_96xx_A0(struct rvu *rvu) +{ + struct pci_dev *pdev = rvu->pdev; + + return (pdev->revision == 0x00) && + (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX); +} + +static inline bool is_rvu_96xx_B0(struct rvu *rvu) +{ + struct pci_dev *pdev = rvu->pdev; + + return ((pdev->revision == 0x00) || (pdev->revision == 0x01)) && + (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX); +} + +static inline bool is_rvu_supports_nix1(struct rvu *rvu) +{ + struct pci_dev *pdev = rvu->pdev; + + if (pdev->subsystem_device == PCI_SUBSYS_DEVID_98XX) + return true; + + return false; +} + +/* Function Prototypes + * RVU + */ +static inline int is_afvf(u16 pcifunc) +{ + return !(pcifunc & ~RVU_PFVF_FUNC_MASK); +} + +static inline bool is_rvu_fwdata_valid(struct rvu *rvu) +{ + return (rvu->fwdata->header_magic == RVU_FWDATA_HEADER_MAGIC) && + (rvu->fwdata->version == RVU_FWDATA_VERSION); +} + +int rvu_alloc_bitmap(struct rsrc_bmap *rsrc); +int rvu_alloc_rsrc(struct rsrc_bmap *rsrc); +void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id); +int rvu_rsrc_free_count(struct rsrc_bmap *rsrc); +int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc); +bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc); +u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr); +int rvu_get_pf(u16 pcifunc); +struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc); +void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf); +bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr); +bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype); +int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot); +int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf); +int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc); +int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero); + +/* RVU HW reg validation */ +enum regmap_block { + TXSCHQ_HWREGMAP = 0, + MAX_HWREGMAP, +}; + +bool rvu_check_valid_reg(int regmap, int regblk, u64 reg); + +/* NPA/NIX AQ APIs */ +int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue, + int qsize, int inst_size, int res_size); +void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq); + +/* CGX APIs */ +static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf) +{ + return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs); +} + +static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id) +{ + *cgx_id = (map >> 4) & 0xF; + *lmac_id = (map & 0xF); +} + +#define M(_name, _id, fn_name, req, rsp) \ +int rvu_mbox_handler_ ## fn_name(struct rvu *, struct req *, struct rsp *); +MBOX_MESSAGES +#undef M + +int rvu_cgx_init(struct rvu *rvu); +int rvu_cgx_exit(struct rvu *rvu); +void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu); +int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start); +void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable); +int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start); +int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index, + int rxtxflag, u64 *stat); +/* NPA APIs */ +int rvu_npa_init(struct rvu *rvu); +void rvu_npa_freemem(struct rvu *rvu); +void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf); +int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req, + struct npa_aq_enq_rsp *rsp); + +/* NIX APIs */ +bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc); +int rvu_nix_init(struct rvu *rvu); +int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw, + int blkaddr, u32 cfg); +void rvu_nix_freemem(struct rvu *rvu); +int rvu_get_nixlf_count(struct rvu *rvu); +void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf); +int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr); +int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add); + +/* NPC APIs */ +int rvu_npc_init(struct rvu *rvu); +void rvu_npc_freemem(struct rvu *rvu); +int rvu_npc_get_pkind(struct rvu *rvu, u16 pf); +void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf); +int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool en); +void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, + int nixlf, u64 chan, u8 *mac_addr); +void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, + int nixlf, u64 chan, bool allmulti); +void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf); +void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf); +void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, + int nixlf, u64 chan); +void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable); +int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf); +void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf); +void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf); +void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf); +void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, + int group, int alg_idx, int mcam_index); +void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc, + int blkaddr, int *alloc_cnt, + int *enable_cnt); +void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc, + int blkaddr, int *alloc_cnt, + int *enable_cnt); + +#ifdef CONFIG_DEBUG_FS +void rvu_dbg_init(struct rvu *rvu); +void rvu_dbg_exit(struct rvu *rvu); +#else +static inline void rvu_dbg_init(struct rvu *rvu) {} +static inline void rvu_dbg_exit(struct rvu *rvu) {} +#endif +#endif /* RVU_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c new file mode 100644 index 000000000..83743e153 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -0,0 +1,769 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/types.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "rvu.h" +#include "cgx.h" +#include "rvu_reg.h" +#include "rvu_trace.h" + +struct cgx_evq_entry { + struct list_head evq_node; + struct cgx_link_event link_event; +}; + +#define M(_name, _id, _fn_name, _req_type, _rsp_type) \ +static struct _req_type __maybe_unused \ +*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \ +{ \ + struct _req_type *req; \ + \ + req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ + &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \ + sizeof(struct _rsp_type)); \ + if (!req) \ + return NULL; \ + req->hdr.sig = OTX2_MBOX_REQ_SIG; \ + req->hdr.id = _id; \ + trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \ + return req; \ +} + +MBOX_UP_CGX_MESSAGES +#undef M + +/* Returns bitmap of mapped PFs */ +static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id) +{ + return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id]; +} + +static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id) +{ + unsigned long pfmap; + + pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id); + + /* Assumes only one pf mapped to a cgx lmac port */ + if (!pfmap) + return -ENODEV; + else + return find_first_bit(&pfmap, 16); +} + +static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id) +{ + return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF); +} + +void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu) +{ + if (cgx_id >= rvu->cgx_cnt_max) + return NULL; + + return rvu->cgx_idmap[cgx_id]; +} + +/* Based on P2X connectivity find mapped NIX block for a PF */ +static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf, + int cgx_id, int lmac_id) +{ + struct rvu_pfvf *pfvf = &rvu->pf[pf]; + u8 p2x; + + p2x = cgx_lmac_get_p2x(cgx_id, lmac_id); + /* Firmware sets P2X_SELECT as either NIX0 or NIX1 */ + pfvf->nix_blkaddr = BLKADDR_NIX0; + if (is_rvu_supports_nix1(rvu) && p2x == CMR_P2X_SEL_NIX1) + pfvf->nix_blkaddr = BLKADDR_NIX1; +} + +static int rvu_map_cgx_lmac_pf(struct rvu *rvu) +{ + struct npc_pkind *pkind = &rvu->hw->pkind; + int cgx_cnt_max = rvu->cgx_cnt_max; + int cgx, lmac_cnt, lmac; + int pf = PF_CGXMAP_BASE; + int size, free_pkind; + + if (!cgx_cnt_max) + return 0; + + if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF) + return -EINVAL; + + /* Alloc map table + * An additional entry is required since PF id starts from 1 and + * hence entry at offset 0 is invalid. + */ + size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8); + rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL); + if (!rvu->pf2cgxlmac_map) + return -ENOMEM; + + /* Initialize all entries with an invalid cgx and lmac id */ + memset(rvu->pf2cgxlmac_map, 0xFF, size); + + /* Reverse map table */ + rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev, + cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16), + GFP_KERNEL); + if (!rvu->cgxlmac2pf_map) + return -ENOMEM; + + rvu->cgx_mapped_pfs = 0; + for (cgx = 0; cgx < cgx_cnt_max; cgx++) { + if (!rvu_cgx_pdata(cgx, rvu)) + continue; + lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); + for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) { + rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac); + rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf; + free_pkind = rvu_alloc_rsrc(&pkind->rsrc); + pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16; + rvu_map_cgx_nix_block(rvu, pf, cgx, lmac); + rvu->cgx_mapped_pfs++; + } + } + return 0; +} + +static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu) +{ + struct cgx_evq_entry *qentry; + unsigned long flags; + int err; + + qentry = kmalloc(sizeof(*qentry), GFP_KERNEL); + if (!qentry) + return -ENOMEM; + + /* Lock the event queue before we read the local link status */ + spin_lock_irqsave(&rvu->cgx_evq_lock, flags); + err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id, + &qentry->link_event.link_uinfo); + qentry->link_event.cgx_id = cgx_id; + qentry->link_event.lmac_id = lmac_id; + if (err) + goto skip_add; + list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head); +skip_add: + spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags); + + /* start worker to process the events */ + queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work); + + return 0; +} + +/* This is called from interrupt context and is expected to be atomic */ +static int cgx_lmac_postevent(struct cgx_link_event *event, void *data) +{ + struct cgx_evq_entry *qentry; + struct rvu *rvu = data; + + /* post event to the event queue */ + qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC); + if (!qentry) + return -ENOMEM; + qentry->link_event = *event; + spin_lock(&rvu->cgx_evq_lock); + list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head); + spin_unlock(&rvu->cgx_evq_lock); + + /* start worker to process the events */ + queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work); + + return 0; +} + +static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu) +{ + struct cgx_link_user_info *linfo; + struct cgx_link_info_msg *msg; + unsigned long pfmap; + int err, pfid; + + linfo = &event->link_uinfo; + pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id); + + do { + pfid = find_first_bit(&pfmap, 16); + clear_bit(pfid, &pfmap); + + /* check if notification is enabled */ + if (!test_bit(pfid, &rvu->pf_notify_bmap)) { + dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n", + event->cgx_id, event->lmac_id, + linfo->link_up ? "UP" : "DOWN"); + continue; + } + + /* Send mbox message to PF */ + msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid); + if (!msg) + continue; + msg->link_info = *linfo; + otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid); + err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid); + if (err) + dev_warn(rvu->dev, "notification to pf %d failed\n", + pfid); + } while (pfmap); +} + +static void cgx_evhandler_task(struct work_struct *work) +{ + struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work); + struct cgx_evq_entry *qentry; + struct cgx_link_event *event; + unsigned long flags; + + do { + /* Dequeue an event */ + spin_lock_irqsave(&rvu->cgx_evq_lock, flags); + qentry = list_first_entry_or_null(&rvu->cgx_evq_head, + struct cgx_evq_entry, + evq_node); + if (qentry) + list_del(&qentry->evq_node); + spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags); + if (!qentry) + break; /* nothing more to process */ + + event = &qentry->link_event; + + /* process event */ + cgx_notify_pfs(event, rvu); + kfree(qentry); + } while (1); +} + +static int cgx_lmac_event_handler_init(struct rvu *rvu) +{ + struct cgx_event_cb cb; + int cgx, lmac, err; + void *cgxd; + + spin_lock_init(&rvu->cgx_evq_lock); + INIT_LIST_HEAD(&rvu->cgx_evq_head); + INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task); + rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0); + if (!rvu->cgx_evh_wq) { + dev_err(rvu->dev, "alloc workqueue failed"); + return -ENOMEM; + } + + cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */ + cb.data = rvu; + + for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { + cgxd = rvu_cgx_pdata(cgx, rvu); + if (!cgxd) + continue; + for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) { + err = cgx_lmac_evh_register(&cb, cgxd, lmac); + if (err) + dev_err(rvu->dev, + "%d:%d handler register failed\n", + cgx, lmac); + } + } + + return 0; +} + +static void rvu_cgx_wq_destroy(struct rvu *rvu) +{ + if (rvu->cgx_evh_wq) { + flush_workqueue(rvu->cgx_evh_wq); + destroy_workqueue(rvu->cgx_evh_wq); + rvu->cgx_evh_wq = NULL; + } +} + +int rvu_cgx_init(struct rvu *rvu) +{ + int cgx, err; + void *cgxd; + + /* CGX port id starts from 0 and are not necessarily contiguous + * Hence we allocate resources based on the maximum port id value. + */ + rvu->cgx_cnt_max = cgx_get_cgxcnt_max(); + if (!rvu->cgx_cnt_max) { + dev_info(rvu->dev, "No CGX devices found!\n"); + return -ENODEV; + } + + rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max * + sizeof(void *), GFP_KERNEL); + if (!rvu->cgx_idmap) + return -ENOMEM; + + /* Initialize the cgxdata table */ + for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) + rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx); + + /* Map CGX LMAC interfaces to RVU PFs */ + err = rvu_map_cgx_lmac_pf(rvu); + if (err) + return err; + + /* Register for CGX events */ + err = cgx_lmac_event_handler_init(rvu); + if (err) + return err; + + mutex_init(&rvu->cgx_cfg_lock); + + /* Ensure event handler registration is completed, before + * we turn on the links + */ + mb(); + + /* Do link up for all CGX ports */ + for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { + cgxd = rvu_cgx_pdata(cgx, rvu); + if (!cgxd) + continue; + err = cgx_lmac_linkup_start(cgxd); + if (err) + dev_err(rvu->dev, + "Link up process failed to start on cgx %d\n", + cgx); + } + + return 0; +} + +int rvu_cgx_exit(struct rvu *rvu) +{ + int cgx, lmac; + void *cgxd; + + for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { + cgxd = rvu_cgx_pdata(cgx, rvu); + if (!cgxd) + continue; + for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) + cgx_lmac_evh_unregister(cgxd, lmac); + } + + /* Ensure event handler unregister is completed */ + mb(); + + rvu_cgx_wq_destroy(rvu); + return 0; +} + +/* Most of the CGX configuration is restricted to the mapped PF only, + * VF's of mapped PF and other PFs are not allowed. This fn() checks + * whether a PFFUNC is permitted to do the config or not. + */ +static bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc) +{ + if ((pcifunc & RVU_PFVF_FUNC_MASK) || + !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) + return false; + return true; +} + +void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable) +{ + u8 cgx_id, lmac_id; + void *cgxd; + + if (!is_pf_cgxmapped(rvu, pf)) + return; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + cgxd = rvu_cgx_pdata(cgx_id, rvu); + + /* Set / clear CTL_BCK to control pause frame forwarding to NIX */ + if (enable) + cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, true); + else + cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, false); +} + +int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) +{ + int pf = rvu_get_pf(pcifunc); + u8 cgx_id, lmac_id; + + if (!is_cgx_config_permitted(rvu, pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + + cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start); + + return 0; +} + +int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true); + return 0; +} + +int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false); + return 0; +} + +int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req, + struct cgx_stats_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + int stat = 0, err = 0; + u64 tx_stat, rx_stat; + u8 cgx_idx, lmac; + void *cgxd; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -ENODEV; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); + cgxd = rvu_cgx_pdata(cgx_idx, rvu); + + /* Rx stats */ + while (stat < CGX_RX_STATS_COUNT) { + err = cgx_get_rx_stats(cgxd, lmac, stat, &rx_stat); + if (err) + return err; + rsp->rx_stats[stat] = rx_stat; + stat++; + } + + /* Tx stats */ + stat = 0; + while (stat < CGX_TX_STATS_COUNT) { + err = cgx_get_tx_stats(cgxd, lmac, stat, &tx_stat); + if (err) + return err; + rsp->tx_stats[stat] = tx_stat; + stat++; + } + return 0; +} + +int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu, + struct cgx_mac_addr_set_or_get *req, + struct cgx_mac_addr_set_or_get *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + + cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr); + + return 0; +} + +int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu, + struct cgx_mac_addr_set_or_get *req, + struct cgx_mac_addr_set_or_get *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + int rc = 0, i; + u64 cfg; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + + rsp->hdr.rc = rc; + cfg = cgx_lmac_addr_get(cgx_id, lmac_id); + /* copy 48 bit mac address to req->mac_addr */ + for (i = 0; i < ETH_ALEN; i++) + rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8; + return 0; +} + +int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + int pf = rvu_get_pf(pcifunc); + u8 cgx_id, lmac_id; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + + cgx_lmac_promisc_config(cgx_id, lmac_id, true); + return 0; +} + +int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + + cgx_lmac_promisc_config(cgx_id, lmac_id, false); + return 0; +} + +static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) +{ + int pf = rvu_get_pf(pcifunc); + u8 cgx_id, lmac_id; + void *cgxd; + + /* This msg is expected only from PFs that are mapped to CGX LMACs, + * if received from other PF/VF simply ACK, nothing to do. + */ + if ((pcifunc & RVU_PFVF_FUNC_MASK) || + !is_pf_cgxmapped(rvu, pf)) + return -ENODEV; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + cgxd = rvu_cgx_pdata(cgx_id, rvu); + + cgx_lmac_ptp_config(cgxd, lmac_id, enable); + /* If PTP is enabled then inform NPC that packets to be + * parsed by this PF will have their data shifted by 8 bytes + * and if PTP is disabled then no shift is required + */ + if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable)) + return -EINVAL; + + return 0; +} + +int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true); +} + +int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false); +} + +static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en) +{ + int pf = rvu_get_pf(pcifunc); + u8 cgx_id, lmac_id; + + if (!is_cgx_config_permitted(rvu, pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + + if (en) { + set_bit(pf, &rvu->pf_notify_bmap); + /* Send the current link status to PF */ + rvu_cgx_send_link_info(cgx_id, lmac_id, rvu); + } else { + clear_bit(pf, &rvu->pf_notify_bmap); + } + + return 0; +} + +int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true); + return 0; +} + +int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false); + return 0; +} + +int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req, + struct cgx_link_info_msg *rsp) +{ + u8 cgx_id, lmac_id; + int pf, err; + + pf = rvu_get_pf(req->hdr.pcifunc); + + if (!is_pf_cgxmapped(rvu, pf)) + return -ENODEV; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + + err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id, + &rsp->link_info); + return err; +} + +static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en) +{ + int pf = rvu_get_pf(pcifunc); + u8 cgx_id, lmac_id; + + if (!is_cgx_config_permitted(rvu, pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + + return cgx_lmac_internal_loopback(rvu_cgx_pdata(cgx_id, rvu), + lmac_id, en); +} + +int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true); + return 0; +} + +int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false); + return 0; +} + +int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu, + struct cgx_pause_frm_cfg *req, + struct cgx_pause_frm_cfg *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + + /* This msg is expected only from PF/VFs that are mapped to CGX LMACs, + * if received from other PF/VF simply ACK, nothing to do. + */ + if (!is_pf_cgxmapped(rvu, pf)) + return -ENODEV; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + + if (req->set) + cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id, + req->tx_pause, req->rx_pause); + else + cgx_lmac_get_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id, + &rsp->tx_pause, &rsp->rx_pause); + return 0; +} + +/* Finds cumulative status of NIX rx/tx counters from LF of a PF and those + * from its VFs as well. ie. NIX rx/tx counters at the CGX port level + */ +int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, + int index, int rxtxflag, u64 *stat) +{ + struct rvu_block *block; + int blkaddr; + u16 pcifunc; + int pf, lf; + + *stat = 0; + + if (!cgxd || !rvu) + return -EINVAL; + + pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id); + if (pf < 0) + return pf; + + /* Assumes LF of a PF and all of its VF belongs to the same + * NIX block + */ + pcifunc = pf << RVU_PFVF_PF_SHIFT; + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return 0; + block = &rvu->hw->block[blkaddr]; + + for (lf = 0; lf < block->lf.max; lf++) { + /* Check if a lf is attached to this PF or one of its VFs */ + if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc & + ~RVU_PFVF_FUNC_MASK))) + continue; + if (rxtxflag == NIX_STATS_RX) + *stat += rvu_read64(rvu, blkaddr, + NIX_AF_LFX_RX_STATX(lf, index)); + else + *stat += rvu_read64(rvu, blkaddr, + NIX_AF_LFX_TX_STATX(lf, index)); + } + + return 0; +} + +int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start) +{ + struct rvu_pfvf *parent_pf, *pfvf; + int cgx_users, err = 0; + + if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) + return 0; + + parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; + pfvf = rvu_get_pfvf(rvu, pcifunc); + + mutex_lock(&rvu->cgx_cfg_lock); + + if (start && pfvf->cgx_in_use) + goto exit; /* CGX is already started hence nothing to do */ + if (!start && !pfvf->cgx_in_use) + goto exit; /* CGX is already stopped hence nothing to do */ + + if (start) { + cgx_users = parent_pf->cgx_users; + parent_pf->cgx_users++; + } else { + parent_pf->cgx_users--; + cgx_users = parent_pf->cgx_users; + } + + /* Start CGX when first of all NIXLFs is started. + * Stop CGX when last of all NIXLFs is stopped. + */ + if (!cgx_users) { + err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK, + start); + if (err) { + dev_err(rvu->dev, "Unable to %s CGX\n", + start ? "start" : "stop"); + /* Revert the usage count in case of error */ + parent_pf->cgx_users = start ? parent_pf->cgx_users - 1 + : parent_pf->cgx_users + 1; + goto exit; + } + } + pfvf->cgx_in_use = start; +exit: + mutex_unlock(&rvu->cgx_cfg_lock); + return err; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c new file mode 100644 index 000000000..520579685 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -0,0 +1,1799 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2019 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifdef CONFIG_DEBUG_FS + +#include <linux/fs.h> +#include <linux/debugfs.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "rvu_struct.h" +#include "rvu_reg.h" +#include "rvu.h" +#include "cgx.h" +#include "npc.h" + +#define DEBUGFS_DIR_NAME "octeontx2" + +enum { + CGX_STAT0, + CGX_STAT1, + CGX_STAT2, + CGX_STAT3, + CGX_STAT4, + CGX_STAT5, + CGX_STAT6, + CGX_STAT7, + CGX_STAT8, + CGX_STAT9, + CGX_STAT10, + CGX_STAT11, + CGX_STAT12, + CGX_STAT13, + CGX_STAT14, + CGX_STAT15, + CGX_STAT16, + CGX_STAT17, + CGX_STAT18, +}; + +/* NIX TX stats */ +enum nix_stat_lf_tx { + TX_UCAST = 0x0, + TX_BCAST = 0x1, + TX_MCAST = 0x2, + TX_DROP = 0x3, + TX_OCTS = 0x4, + TX_STATS_ENUM_LAST, +}; + +/* NIX RX stats */ +enum nix_stat_lf_rx { + RX_OCTS = 0x0, + RX_UCAST = 0x1, + RX_BCAST = 0x2, + RX_MCAST = 0x3, + RX_DROP = 0x4, + RX_DROP_OCTS = 0x5, + RX_FCS = 0x6, + RX_ERR = 0x7, + RX_DRP_BCAST = 0x8, + RX_DRP_MCAST = 0x9, + RX_DRP_L3BCAST = 0xa, + RX_DRP_L3MCAST = 0xb, + RX_STATS_ENUM_LAST, +}; + +static char *cgx_rx_stats_fields[] = { + [CGX_STAT0] = "Received packets", + [CGX_STAT1] = "Octets of received packets", + [CGX_STAT2] = "Received PAUSE packets", + [CGX_STAT3] = "Received PAUSE and control packets", + [CGX_STAT4] = "Filtered DMAC0 (NIX-bound) packets", + [CGX_STAT5] = "Filtered DMAC0 (NIX-bound) octets", + [CGX_STAT6] = "Packets dropped due to RX FIFO full", + [CGX_STAT7] = "Octets dropped due to RX FIFO full", + [CGX_STAT8] = "Error packets", + [CGX_STAT9] = "Filtered DMAC1 (NCSI-bound) packets", + [CGX_STAT10] = "Filtered DMAC1 (NCSI-bound) octets", + [CGX_STAT11] = "NCSI-bound packets dropped", + [CGX_STAT12] = "NCSI-bound octets dropped", +}; + +static char *cgx_tx_stats_fields[] = { + [CGX_STAT0] = "Packets dropped due to excessive collisions", + [CGX_STAT1] = "Packets dropped due to excessive deferral", + [CGX_STAT2] = "Multiple collisions before successful transmission", + [CGX_STAT3] = "Single collisions before successful transmission", + [CGX_STAT4] = "Total octets sent on the interface", + [CGX_STAT5] = "Total frames sent on the interface", + [CGX_STAT6] = "Packets sent with an octet count < 64", + [CGX_STAT7] = "Packets sent with an octet count == 64", + [CGX_STAT8] = "Packets sent with an octet count of 65–127", + [CGX_STAT9] = "Packets sent with an octet count of 128-255", + [CGX_STAT10] = "Packets sent with an octet count of 256-511", + [CGX_STAT11] = "Packets sent with an octet count of 512-1023", + [CGX_STAT12] = "Packets sent with an octet count of 1024-1518", + [CGX_STAT13] = "Packets sent with an octet count of > 1518", + [CGX_STAT14] = "Packets sent to a broadcast DMAC", + [CGX_STAT15] = "Packets sent to the multicast DMAC", + [CGX_STAT16] = "Transmit underflow and were truncated", + [CGX_STAT17] = "Control/PAUSE packets sent", +}; + +#define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \ + blk_addr, NDC_AF_CONST) & 0xFF) + +#define rvu_dbg_NULL NULL +#define rvu_dbg_open_NULL NULL + +#define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op) \ +static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, rvu_dbg_##read_op, inode->i_private); \ +} \ +static const struct file_operations rvu_dbg_##name##_fops = { \ + .owner = THIS_MODULE, \ + .open = rvu_dbg_open_##name, \ + .read = seq_read, \ + .write = rvu_dbg_##write_op, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + +#define RVU_DEBUG_FOPS(name, read_op, write_op) \ +static const struct file_operations rvu_dbg_##name##_fops = { \ + .owner = THIS_MODULE, \ + .open = simple_open, \ + .read = rvu_dbg_##read_op, \ + .write = rvu_dbg_##write_op \ +} + +static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf); + +static void get_lf_str_list(struct rvu_block block, int pcifunc, + char *lfs) +{ + int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max; + + for_each_set_bit(lf, block.lf.bmap, block.lf.max) { + if (lf >= block.lf.max) + break; + + if (block.fn_map[lf] != pcifunc) + continue; + + if (lf == prev_lf + 1) { + prev_lf = lf; + seq = 1; + continue; + } + + if (seq) + len += sprintf(lfs + len, "-%d,%d", prev_lf, lf); + else + len += (len ? sprintf(lfs + len, ",%d", lf) : + sprintf(lfs + len, "%d", lf)); + + prev_lf = lf; + seq = 0; + } + + if (seq) + len += sprintf(lfs + len, "-%d", prev_lf); + + lfs[len] = '\0'; +} + +static int get_max_column_width(struct rvu *rvu) +{ + int index, pf, vf, lf_str_size = 12, buf_size = 256; + struct rvu_block block; + u16 pcifunc; + char *buf; + + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (pf = 0; pf < rvu->hw->total_pfs; pf++) { + for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { + pcifunc = pf << 10 | vf; + if (!pcifunc) + continue; + + for (index = 0; index < BLK_COUNT; index++) { + block = rvu->hw->block[index]; + if (!strlen(block.name)) + continue; + + get_lf_str_list(block, pcifunc, buf); + if (lf_str_size <= strlen(buf)) + lf_str_size = strlen(buf) + 1; + } + } + } + + kfree(buf); + return lf_str_size; +} + +/* Dumps current provisioning status of all RVU block LFs */ +static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, + char __user *buffer, + size_t count, loff_t *ppos) +{ + int index, off = 0, flag = 0, len = 0, i = 0; + struct rvu *rvu = filp->private_data; + int bytes_not_copied = 0; + struct rvu_block block; + int pf, vf, pcifunc; + int buf_size = 2048; + int lf_str_size; + char *lfs; + char *buf; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOSPC; + + /* Get the maximum width of a column */ + lf_str_size = get_max_column_width(rvu); + + lfs = kzalloc(lf_str_size, GFP_KERNEL); + if (!lfs) { + kfree(buf); + return -ENOMEM; + } + off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size, + "pcifunc"); + for (index = 0; index < BLK_COUNT; index++) + if (strlen(rvu->hw->block[index].name)) { + off += scnprintf(&buf[off], buf_size - 1 - off, + "%-*s", lf_str_size, + rvu->hw->block[index].name); + } + + off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); + bytes_not_copied = copy_to_user(buffer + (i * off), buf, off); + if (bytes_not_copied) + goto out; + + i++; + *ppos += off; + for (pf = 0; pf < rvu->hw->total_pfs; pf++) { + for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { + off = 0; + flag = 0; + pcifunc = pf << 10 | vf; + if (!pcifunc) + continue; + + if (vf) { + sprintf(lfs, "PF%d:VF%d", pf, vf - 1); + off = scnprintf(&buf[off], + buf_size - 1 - off, + "%-*s", lf_str_size, lfs); + } else { + sprintf(lfs, "PF%d", pf); + off = scnprintf(&buf[off], + buf_size - 1 - off, + "%-*s", lf_str_size, lfs); + } + + for (index = 0; index < BLK_COUNT; index++) { + block = rvu->hw->block[index]; + if (!strlen(block.name)) + continue; + len = 0; + lfs[len] = '\0'; + get_lf_str_list(block, pcifunc, lfs); + if (strlen(lfs)) + flag = 1; + + off += scnprintf(&buf[off], buf_size - 1 - off, + "%-*s", lf_str_size, lfs); + } + if (flag) { + off += scnprintf(&buf[off], + buf_size - 1 - off, "\n"); + bytes_not_copied = copy_to_user(buffer + + (i * off), + buf, off); + if (bytes_not_copied) + goto out; + + i++; + *ppos += off; + } + } + } + +out: + kfree(lfs); + kfree(buf); + if (bytes_not_copied) + return -EFAULT; + + return *ppos; +} + +RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL); + +static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blktype, int lf, + u16 *pcifunc) +{ + struct rvu_block *block; + struct rvu_hwinfo *hw; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, blktype, 0); + if (blkaddr < 0) { + dev_warn(rvu->dev, "Invalid blktype\n"); + return false; + } + + hw = rvu->hw; + block = &hw->block[blkaddr]; + + if (lf < 0 || lf >= block->lf.max) { + dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n", + block->lf.max - 1); + return false; + } + + *pcifunc = block->fn_map[lf]; + if (!*pcifunc) { + dev_warn(rvu->dev, + "This LF is not attached to any RVU PFFUNC\n"); + return false; + } + return true; +} + +static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf) +{ + char *buf; + + buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!buf) + return; + + if (!pfvf->aura_ctx) { + seq_puts(m, "Aura context is not initialized\n"); + } else { + bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap, + pfvf->aura_ctx->qsize); + seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize); + seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf); + } + + if (!pfvf->pool_ctx) { + seq_puts(m, "Pool context is not initialized\n"); + } else { + bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap, + pfvf->pool_ctx->qsize); + seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize); + seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf); + } + kfree(buf); +} + +/* The 'qsize' entry dumps current Aura/Pool context Qsize + * and each context's current enable/disable status in a bitmap. + */ +static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused, + int blktype) +{ + void (*print_qsize)(struct seq_file *filp, + struct rvu_pfvf *pfvf) = NULL; + struct rvu_pfvf *pfvf; + struct rvu *rvu; + int qsize_id; + u16 pcifunc; + + rvu = filp->private; + switch (blktype) { + case BLKTYPE_NPA: + qsize_id = rvu->rvu_dbg.npa_qsize_id; + print_qsize = print_npa_qsize; + break; + + case BLKTYPE_NIX: + qsize_id = rvu->rvu_dbg.nix_qsize_id; + print_qsize = print_nix_qsize; + break; + + default: + return -EINVAL; + } + + if (!rvu_dbg_is_valid_lf(rvu, blktype, qsize_id, &pcifunc)) + return -EINVAL; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + print_qsize(filp, pfvf); + + return 0; +} + +static ssize_t rvu_dbg_qsize_write(struct file *filp, + const char __user *buffer, size_t count, + loff_t *ppos, int blktype) +{ + char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix"; + struct seq_file *seqfile = filp->private_data; + char *cmd_buf, *cmd_buf_tmp, *subtoken; + struct rvu *rvu = seqfile->private; + u16 pcifunc; + int ret, lf; + + cmd_buf = memdup_user(buffer, count + 1); + if (IS_ERR(cmd_buf)) + return -ENOMEM; + + cmd_buf[count] = '\0'; + + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = cmd_buf_tmp - cmd_buf + 1; + } + + cmd_buf_tmp = cmd_buf; + subtoken = strsep(&cmd_buf, " "); + ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL; + if (cmd_buf) + ret = -EINVAL; + + if (!strncmp(subtoken, "help", 4) || ret < 0) { + dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string); + goto qsize_write_done; + } + + if (!rvu_dbg_is_valid_lf(rvu, blktype, lf, &pcifunc)) { + ret = -EINVAL; + goto qsize_write_done; + } + if (blktype == BLKTYPE_NPA) + rvu->rvu_dbg.npa_qsize_id = lf; + else + rvu->rvu_dbg.nix_qsize_id = lf; + +qsize_write_done: + kfree(cmd_buf_tmp); + return ret ? ret : count; +} + +static ssize_t rvu_dbg_npa_qsize_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + return rvu_dbg_qsize_write(filp, buffer, count, ppos, + BLKTYPE_NPA); +} + +static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused) +{ + return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA); +} + +RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write); + +/* Dumps given NPA Aura's context */ +static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp) +{ + struct npa_aura_s *aura = &rsp->aura; + + seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr); + + seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n", + aura->ena, aura->pool_caching); + seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n", + aura->pool_way_mask, aura->avg_con); + seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n", + aura->pool_drop_ena, aura->aura_drop_ena); + seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n", + aura->bp_ena, aura->aura_drop); + seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n", + aura->shift, aura->avg_level); + + seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n", + (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid); + + seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n", + (u64)aura->limit, aura->bp, aura->fc_ena); + seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n", + aura->fc_up_crossing, aura->fc_stype); + seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits); + + seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr); + + seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n", + aura->pool_drop, aura->update_time); + seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n", + aura->err_int, aura->err_int_ena); + seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n", + aura->thresh_int, aura->thresh_int_ena); + seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n", + aura->thresh_up, aura->thresh_qint_idx); + seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx); + + seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh); +} + +/* Dumps given NPA Pool's context */ +static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp) +{ + struct npa_pool_s *pool = &rsp->pool; + + seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base); + + seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n", + pool->ena, pool->nat_align); + seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n", + pool->stack_caching, pool->stack_way_mask); + seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n", + pool->buf_offset, pool->buf_size); + + seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n", + pool->stack_max_pages, pool->stack_pages); + + seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc); + + seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n", + pool->stack_offset, pool->shift, pool->avg_level); + seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n", + pool->avg_con, pool->fc_ena, pool->fc_stype); + seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n", + pool->fc_hyst_bits, pool->fc_up_crossing); + seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time); + + seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr); + + seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start); + + seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end); + + seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n", + pool->err_int, pool->err_int_ena); + seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int); + seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n", + pool->thresh_int_ena, pool->thresh_up); + seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t\t%d\n", + pool->thresh_qint_idx, pool->err_qint_idx); +} + +/* Reads aura/pool's ctx from admin queue */ +static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype) +{ + void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp); + struct npa_aq_enq_req aq_req; + struct npa_aq_enq_rsp rsp; + struct rvu_pfvf *pfvf; + int aura, rc, max_id; + int npalf, id, all; + struct rvu *rvu; + u16 pcifunc; + + rvu = m->private; + + switch (ctype) { + case NPA_AQ_CTYPE_AURA: + npalf = rvu->rvu_dbg.npa_aura_ctx.lf; + id = rvu->rvu_dbg.npa_aura_ctx.id; + all = rvu->rvu_dbg.npa_aura_ctx.all; + break; + + case NPA_AQ_CTYPE_POOL: + npalf = rvu->rvu_dbg.npa_pool_ctx.lf; + id = rvu->rvu_dbg.npa_pool_ctx.id; + all = rvu->rvu_dbg.npa_pool_ctx.all; + break; + default: + return -EINVAL; + } + + if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc)) + return -EINVAL; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) { + seq_puts(m, "Aura context is not initialized\n"); + return -EINVAL; + } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) { + seq_puts(m, "Pool context is not initialized\n"); + return -EINVAL; + } + + memset(&aq_req, 0, sizeof(struct npa_aq_enq_req)); + aq_req.hdr.pcifunc = pcifunc; + aq_req.ctype = ctype; + aq_req.op = NPA_AQ_INSTOP_READ; + if (ctype == NPA_AQ_CTYPE_AURA) { + max_id = pfvf->aura_ctx->qsize; + print_npa_ctx = print_npa_aura_ctx; + } else { + max_id = pfvf->pool_ctx->qsize; + print_npa_ctx = print_npa_pool_ctx; + } + + if (id < 0 || id >= max_id) { + seq_printf(m, "Invalid %s, valid range is 0-%d\n", + (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool", + max_id - 1); + return -EINVAL; + } + + if (all) + id = 0; + else + max_id = id + 1; + + for (aura = id; aura < max_id; aura++) { + aq_req.aura_id = aura; + seq_printf(m, "======%s : %d=======\n", + (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL", + aq_req.aura_id); + rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp); + if (rc) { + seq_puts(m, "Failed to read context\n"); + return -EINVAL; + } + print_npa_ctx(m, &rsp); + } + return 0; +} + +static int write_npa_ctx(struct rvu *rvu, bool all, + int npalf, int id, int ctype) +{ + struct rvu_pfvf *pfvf; + int max_id = 0; + u16 pcifunc; + + if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc)) + return -EINVAL; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + + if (ctype == NPA_AQ_CTYPE_AURA) { + if (!pfvf->aura_ctx) { + dev_warn(rvu->dev, "Aura context is not initialized\n"); + return -EINVAL; + } + max_id = pfvf->aura_ctx->qsize; + } else if (ctype == NPA_AQ_CTYPE_POOL) { + if (!pfvf->pool_ctx) { + dev_warn(rvu->dev, "Pool context is not initialized\n"); + return -EINVAL; + } + max_id = pfvf->pool_ctx->qsize; + } + + if (id < 0 || id >= max_id) { + dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n", + (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool", + max_id - 1); + return -EINVAL; + } + + switch (ctype) { + case NPA_AQ_CTYPE_AURA: + rvu->rvu_dbg.npa_aura_ctx.lf = npalf; + rvu->rvu_dbg.npa_aura_ctx.id = id; + rvu->rvu_dbg.npa_aura_ctx.all = all; + break; + + case NPA_AQ_CTYPE_POOL: + rvu->rvu_dbg.npa_pool_ctx.lf = npalf; + rvu->rvu_dbg.npa_pool_ctx.id = id; + rvu->rvu_dbg.npa_pool_ctx.all = all; + break; + default: + return -EINVAL; + } + return 0; +} + +static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count, + const char __user *buffer, int *npalf, + int *id, bool *all) +{ + int bytes_not_copied; + char *cmd_buf_tmp; + char *subtoken; + int ret; + + bytes_not_copied = copy_from_user(cmd_buf, buffer, *count); + if (bytes_not_copied) + return -EFAULT; + + cmd_buf[*count] = '\0'; + cmd_buf_tmp = strchr(cmd_buf, '\n'); + + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + *count = cmd_buf_tmp - cmd_buf + 1; + } + + subtoken = strsep(&cmd_buf, " "); + ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL; + if (ret < 0) + return ret; + subtoken = strsep(&cmd_buf, " "); + if (subtoken && strcmp(subtoken, "all") == 0) { + *all = true; + } else { + ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL; + if (ret < 0) + return ret; + } + if (cmd_buf) + return -EINVAL; + return ret; +} + +static ssize_t rvu_dbg_npa_ctx_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos, int ctype) +{ + char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ? + "aura" : "pool"; + struct seq_file *seqfp = filp->private_data; + struct rvu *rvu = seqfp->private; + int npalf, id = 0, ret; + bool all = false; + + if ((*ppos != 0) || !count) + return -EINVAL; + + cmd_buf = kzalloc(count + 1, GFP_KERNEL); + if (!cmd_buf) + return count; + ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer, + &npalf, &id, &all); + if (ret < 0) { + dev_info(rvu->dev, + "Usage: echo <npalf> [%s number/all] > %s_ctx\n", + ctype_string, ctype_string); + goto done; + } else { + ret = write_npa_ctx(rvu, all, npalf, id, ctype); + } +done: + kfree(cmd_buf); + return ret ? ret : count; +} + +static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos, + NPA_AQ_CTYPE_AURA); +} + +static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused) +{ + return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA); +} + +RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write); + +static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos, + NPA_AQ_CTYPE_POOL); +} + +static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused) +{ + return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL); +} + +RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write); + +static void ndc_cache_stats(struct seq_file *s, int blk_addr, + int ctype, int transaction) +{ + u64 req, out_req, lat, cant_alloc; + struct rvu *rvu = s->private; + int port; + + for (port = 0; port < NDC_MAX_PORT; port++) { + req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC + (port, ctype, transaction)); + lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC + (port, ctype, transaction)); + out_req = rvu_read64(rvu, blk_addr, + NDC_AF_PORTX_RTX_RWX_OSTDN_PC + (port, ctype, transaction)); + cant_alloc = rvu_read64(rvu, blk_addr, + NDC_AF_PORTX_RTX_CANT_ALLOC_PC + (port, transaction)); + seq_printf(s, "\nPort:%d\n", port); + seq_printf(s, "\tTotal Requests:\t\t%lld\n", req); + seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat); + seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req); + seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req); + seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc); + } +} + +static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr) +{ + seq_puts(s, "\n***** CACHE mode read stats *****\n"); + ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS); + seq_puts(s, "\n***** CACHE mode write stats *****\n"); + ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS); + seq_puts(s, "\n***** BY-PASS mode read stats *****\n"); + ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS); + seq_puts(s, "\n***** BY-PASS mode write stats *****\n"); + ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS); + return 0; +} + +static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused) +{ + return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0); +} + +RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL); + +static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr) +{ + struct rvu *rvu = s->private; + int bank, max_bank; + + max_bank = NDC_MAX_BANK(rvu, blk_addr); + for (bank = 0; bank < max_bank; bank++) { + seq_printf(s, "BANK:%d\n", bank); + seq_printf(s, "\tHits:\t%lld\n", + (u64)rvu_read64(rvu, blk_addr, + NDC_AF_BANKX_HIT_PC(bank))); + seq_printf(s, "\tMiss:\t%lld\n", + (u64)rvu_read64(rvu, blk_addr, + NDC_AF_BANKX_MISS_PC(bank))); + } + return 0; +} + +static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused) +{ + return ndc_blk_cache_stats(filp, NIX0_RX, + BLKADDR_NDC_NIX0_RX); +} + +RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL); + +static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused) +{ + return ndc_blk_cache_stats(filp, NIX0_TX, + BLKADDR_NDC_NIX0_TX); +} + +RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL); + +static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp, + void *unused) +{ + return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0); +} + +RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL); + +static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp, + void *unused) +{ + return ndc_blk_hits_miss_stats(filp, + NPA0_U, BLKADDR_NDC_NIX0_RX); +} + +RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL); + +static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp, + void *unused) +{ + return ndc_blk_hits_miss_stats(filp, + NPA0_U, BLKADDR_NDC_NIX0_TX); +} + +RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL); + +/* Dumps given nix_sq's context */ +static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) +{ + struct nix_sq_ctx_s *sq_ctx = &rsp->sq; + + seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n", + sq_ctx->sqe_way_mask, sq_ctx->cq); + seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n", + sq_ctx->sdp_mcast, sq_ctx->substream); + seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n", + sq_ctx->qint_idx, sq_ctx->ena); + + seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n", + sq_ctx->sqb_count, sq_ctx->default_chan); + seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n", + sq_ctx->smq_rr_quantum, sq_ctx->sso_ena); + seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n", + sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq); + + seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n", + sq_ctx->sqe_stype, sq_ctx->sq_int_ena); + seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n", + sq_ctx->sq_int, sq_ctx->sqb_aura); + seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count); + + seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n", + sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend); + seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n", + sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset); + seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n", + sq_ctx->smenq_offset, sq_ctx->tail_offset); + seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n", + sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq); + seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n", + sq_ctx->mnq_dis, sq_ctx->lmt_dis); + seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n", + sq_ctx->cq_limit, sq_ctx->max_sqe_size); + + seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb); + seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb); + seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb); + seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n", + sq_ctx->smenq_next_sqb); + + seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb); + + seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n", + sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena); + seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n", + sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps); + seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n", + sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1); + seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total); + + seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n", + (u64)sq_ctx->scm_lso_rem); + seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs); + seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts); + seq_printf(m, "W14: dropped_octs \t\t%llu\n\n", + (u64)sq_ctx->dropped_octs); + seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n", + (u64)sq_ctx->dropped_pkts); +} + +/* Dumps given nix_rq's context */ +static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) +{ + struct nix_rq_ctx_s *rq_ctx = &rsp->rq; + + seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n", + rq_ctx->wqe_aura, rq_ctx->substream); + seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n", + rq_ctx->cq, rq_ctx->ena_wqwd); + seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n", + rq_ctx->ipsech_ena, rq_ctx->sso_ena); + seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena); + + seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n", + rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena); + seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n", + rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching); + seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n", + rq_ctx->pb_caching, rq_ctx->sso_tt); + seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n", + rq_ctx->sso_grp, rq_ctx->lpb_aura); + seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura); + + seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n", + rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy); + seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n", + rq_ctx->xqe_imm_size, rq_ctx->later_skip); + seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n", + rq_ctx->first_skip, rq_ctx->lpb_sizem1); + seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n", + rq_ctx->spb_ena, rq_ctx->wqe_skip); + seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1); + + seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n", + rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop); + seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n", + rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop); + seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n", + rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop); + seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n", + rq_ctx->xqe_pass, rq_ctx->xqe_drop); + + seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n", + rq_ctx->qint_idx, rq_ctx->rq_int_ena); + seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n", + rq_ctx->rq_int, rq_ctx->lpb_pool_pass); + seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n", + rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass); + seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop); + + seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n", + rq_ctx->flow_tagw, rq_ctx->bad_utag); + seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n", + rq_ctx->good_utag, rq_ctx->ltag); + + seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs); + seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts); + seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs); + seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts); + seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts); +} + +/* Dumps given nix_cq's context */ +static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) +{ + struct nix_cq_ctx_s *cq_ctx = &rsp->cq; + + seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base); + + seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr); + seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n", + cq_ctx->avg_con, cq_ctx->cint_idx); + seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n", + cq_ctx->cq_err, cq_ctx->qint_idx); + seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n", + cq_ctx->bpid, cq_ctx->bp_ena); + + seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n", + cq_ctx->update_time, cq_ctx->avg_level); + seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n", + cq_ctx->head, cq_ctx->tail); + + seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n", + cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int); + seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n", + cq_ctx->qsize, cq_ctx->caching); + seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n", + cq_ctx->substream, cq_ctx->ena); + seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n", + cq_ctx->drop_ena, cq_ctx->drop); + seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp); +} + +static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp, + void *unused, int ctype) +{ + void (*print_nix_ctx)(struct seq_file *filp, + struct nix_aq_enq_rsp *rsp) = NULL; + struct rvu *rvu = filp->private; + struct nix_aq_enq_req aq_req; + struct nix_aq_enq_rsp rsp; + char *ctype_string = NULL; + int qidx, rc, max_id = 0; + struct rvu_pfvf *pfvf; + int nixlf, id, all; + u16 pcifunc; + + switch (ctype) { + case NIX_AQ_CTYPE_CQ: + nixlf = rvu->rvu_dbg.nix_cq_ctx.lf; + id = rvu->rvu_dbg.nix_cq_ctx.id; + all = rvu->rvu_dbg.nix_cq_ctx.all; + break; + + case NIX_AQ_CTYPE_SQ: + nixlf = rvu->rvu_dbg.nix_sq_ctx.lf; + id = rvu->rvu_dbg.nix_sq_ctx.id; + all = rvu->rvu_dbg.nix_sq_ctx.all; + break; + + case NIX_AQ_CTYPE_RQ: + nixlf = rvu->rvu_dbg.nix_rq_ctx.lf; + id = rvu->rvu_dbg.nix_rq_ctx.id; + all = rvu->rvu_dbg.nix_rq_ctx.all; + break; + + default: + return -EINVAL; + } + + if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc)) + return -EINVAL; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) { + seq_puts(filp, "SQ context is not initialized\n"); + return -EINVAL; + } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) { + seq_puts(filp, "RQ context is not initialized\n"); + return -EINVAL; + } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) { + seq_puts(filp, "CQ context is not initialized\n"); + return -EINVAL; + } + + if (ctype == NIX_AQ_CTYPE_SQ) { + max_id = pfvf->sq_ctx->qsize; + ctype_string = "sq"; + print_nix_ctx = print_nix_sq_ctx; + } else if (ctype == NIX_AQ_CTYPE_RQ) { + max_id = pfvf->rq_ctx->qsize; + ctype_string = "rq"; + print_nix_ctx = print_nix_rq_ctx; + } else if (ctype == NIX_AQ_CTYPE_CQ) { + max_id = pfvf->cq_ctx->qsize; + ctype_string = "cq"; + print_nix_ctx = print_nix_cq_ctx; + } + + memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); + aq_req.hdr.pcifunc = pcifunc; + aq_req.ctype = ctype; + aq_req.op = NIX_AQ_INSTOP_READ; + if (all) + id = 0; + else + max_id = id + 1; + for (qidx = id; qidx < max_id; qidx++) { + aq_req.qidx = qidx; + seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n", + ctype_string, nixlf, aq_req.qidx); + rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp); + if (rc) { + seq_puts(filp, "Failed to read the context\n"); + return -EINVAL; + } + print_nix_ctx(filp, &rsp); + } + return 0; +} + +static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf, + int id, int ctype, char *ctype_string) +{ + struct rvu_pfvf *pfvf; + int max_id = 0; + u16 pcifunc; + + if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc)) + return -EINVAL; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + + if (ctype == NIX_AQ_CTYPE_SQ) { + if (!pfvf->sq_ctx) { + dev_warn(rvu->dev, "SQ context is not initialized\n"); + return -EINVAL; + } + max_id = pfvf->sq_ctx->qsize; + } else if (ctype == NIX_AQ_CTYPE_RQ) { + if (!pfvf->rq_ctx) { + dev_warn(rvu->dev, "RQ context is not initialized\n"); + return -EINVAL; + } + max_id = pfvf->rq_ctx->qsize; + } else if (ctype == NIX_AQ_CTYPE_CQ) { + if (!pfvf->cq_ctx) { + dev_warn(rvu->dev, "CQ context is not initialized\n"); + return -EINVAL; + } + max_id = pfvf->cq_ctx->qsize; + } + + if (id < 0 || id >= max_id) { + dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n", + ctype_string, max_id - 1); + return -EINVAL; + } + switch (ctype) { + case NIX_AQ_CTYPE_CQ: + rvu->rvu_dbg.nix_cq_ctx.lf = nixlf; + rvu->rvu_dbg.nix_cq_ctx.id = id; + rvu->rvu_dbg.nix_cq_ctx.all = all; + break; + + case NIX_AQ_CTYPE_SQ: + rvu->rvu_dbg.nix_sq_ctx.lf = nixlf; + rvu->rvu_dbg.nix_sq_ctx.id = id; + rvu->rvu_dbg.nix_sq_ctx.all = all; + break; + + case NIX_AQ_CTYPE_RQ: + rvu->rvu_dbg.nix_rq_ctx.lf = nixlf; + rvu->rvu_dbg.nix_rq_ctx.id = id; + rvu->rvu_dbg.nix_rq_ctx.all = all; + break; + default: + return -EINVAL; + } + return 0; +} + +static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos, + int ctype) +{ + struct seq_file *m = filp->private_data; + struct rvu *rvu = m->private; + char *cmd_buf, *ctype_string; + int nixlf, id = 0, ret; + bool all = false; + + if ((*ppos != 0) || !count) + return -EINVAL; + + switch (ctype) { + case NIX_AQ_CTYPE_SQ: + ctype_string = "sq"; + break; + case NIX_AQ_CTYPE_RQ: + ctype_string = "rq"; + break; + case NIX_AQ_CTYPE_CQ: + ctype_string = "cq"; + break; + default: + return -EINVAL; + } + + cmd_buf = kzalloc(count + 1, GFP_KERNEL); + + if (!cmd_buf) + return count; + + ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer, + &nixlf, &id, &all); + if (ret < 0) { + dev_info(rvu->dev, + "Usage: echo <nixlf> [%s number/all] > %s_ctx\n", + ctype_string, ctype_string); + goto done; + } else { + ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype, + ctype_string); + } +done: + kfree(cmd_buf); + return ret ? ret : count; +} + +static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos, + NIX_AQ_CTYPE_SQ); +} + +static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused) +{ + return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ); +} + +RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write); + +static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos, + NIX_AQ_CTYPE_RQ); +} + +static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void *unused) +{ + return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_RQ); +} + +RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write); + +static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos, + NIX_AQ_CTYPE_CQ); +} + +static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused) +{ + return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ); +} + +RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write); + +static void print_nix_qctx_qsize(struct seq_file *filp, int qsize, + unsigned long *bmap, char *qtype) +{ + char *buf; + + buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!buf) + return; + + bitmap_print_to_pagebuf(false, buf, bmap, qsize); + seq_printf(filp, "%s context count : %d\n", qtype, qsize); + seq_printf(filp, "%s context ena/dis bitmap : %s\n", + qtype, buf); + kfree(buf); +} + +static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf) +{ + if (!pfvf->cq_ctx) + seq_puts(filp, "cq context is not initialized\n"); + else + print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap, + "cq"); + + if (!pfvf->rq_ctx) + seq_puts(filp, "rq context is not initialized\n"); + else + print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap, + "rq"); + + if (!pfvf->sq_ctx) + seq_puts(filp, "sq context is not initialized\n"); + else + print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap, + "sq"); +} + +static ssize_t rvu_dbg_nix_qsize_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + return rvu_dbg_qsize_write(filp, buffer, count, ppos, + BLKTYPE_NIX); +} + +static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused) +{ + return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX); +} + +RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write); + +static void rvu_dbg_nix_init(struct rvu *rvu) +{ + const struct device *dev = &rvu->pdev->dev; + struct dentry *pfile; + + rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root); + if (!rvu->rvu_dbg.nix) { + dev_err(rvu->dev, "create debugfs dir failed for nix\n"); + return; + } + + pfile = debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, rvu, + &rvu_dbg_nix_sq_ctx_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, rvu, + &rvu_dbg_nix_rq_ctx_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, rvu, + &rvu_dbg_nix_cq_ctx_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, rvu, + &rvu_dbg_nix_ndc_tx_cache_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, rvu, + &rvu_dbg_nix_ndc_rx_cache_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, + rvu, &rvu_dbg_nix_ndc_tx_hits_miss_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, + rvu, &rvu_dbg_nix_ndc_rx_hits_miss_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu, + &rvu_dbg_nix_qsize_fops); + if (!pfile) + goto create_failed; + + return; +create_failed: + dev_err(dev, "Failed to create debugfs dir/file for NIX\n"); + debugfs_remove_recursive(rvu->rvu_dbg.nix); +} + +static void rvu_dbg_npa_init(struct rvu *rvu) +{ + const struct device *dev = &rvu->pdev->dev; + struct dentry *pfile; + + rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root); + if (!rvu->rvu_dbg.npa) + return; + + pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu, + &rvu_dbg_npa_qsize_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu, + &rvu_dbg_npa_aura_ctx_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu, + &rvu_dbg_npa_pool_ctx_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu, + &rvu_dbg_npa_ndc_cache_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, + rvu, &rvu_dbg_npa_ndc_hits_miss_fops); + if (!pfile) + goto create_failed; + + return; + +create_failed: + dev_err(dev, "Failed to create debugfs dir/file for NPA\n"); + debugfs_remove_recursive(rvu->rvu_dbg.npa); +} + +#define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \ + ({ \ + u64 cnt; \ + err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \ + NIX_STATS_RX, &(cnt)); \ + if (!err) \ + seq_printf(s, "%s: %llu\n", name, cnt); \ + cnt; \ + }) + +#define PRINT_CGX_CUML_NIXTX_STATUS(idx, name) \ + ({ \ + u64 cnt; \ + err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \ + NIX_STATS_TX, &(cnt)); \ + if (!err) \ + seq_printf(s, "%s: %llu\n", name, cnt); \ + cnt; \ + }) + +static int cgx_print_stats(struct seq_file *s, int lmac_id) +{ + struct cgx_link_user_info linfo; + void *cgxd = s->private; + u64 ucast, mcast, bcast; + int stat = 0, err = 0; + u64 tx_stat, rx_stat; + struct rvu *rvu; + + rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_OCTEONTX2_RVU_AF, NULL)); + if (!rvu) + return -ENODEV; + + /* Link status */ + seq_puts(s, "\n=======Link Status======\n\n"); + err = cgx_get_link_info(cgxd, lmac_id, &linfo); + if (err) + seq_puts(s, "Failed to read link status\n"); + seq_printf(s, "\nLink is %s %d Mbps\n\n", + linfo.link_up ? "UP" : "DOWN", linfo.speed); + + /* Rx stats */ + seq_puts(s, "\n=======NIX RX_STATS(CGX port level)======\n\n"); + ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames"); + if (err) + return err; + mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames"); + if (err) + return err; + bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames"); + if (err) + return err; + seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast); + PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes"); + if (err) + return err; + PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops"); + if (err) + return err; + PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors"); + if (err) + return err; + + /* Tx stats */ + seq_puts(s, "\n=======NIX TX_STATS(CGX port level)======\n\n"); + ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames"); + if (err) + return err; + mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames"); + if (err) + return err; + bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames"); + if (err) + return err; + seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast); + PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes"); + if (err) + return err; + PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops"); + if (err) + return err; + + /* Rx stats */ + seq_puts(s, "\n=======CGX RX_STATS======\n\n"); + while (stat < CGX_RX_STATS_COUNT) { + err = cgx_get_rx_stats(cgxd, lmac_id, stat, &rx_stat); + if (err) + return err; + seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat], rx_stat); + stat++; + } + + /* Tx stats */ + stat = 0; + seq_puts(s, "\n=======CGX TX_STATS======\n\n"); + while (stat < CGX_TX_STATS_COUNT) { + err = cgx_get_tx_stats(cgxd, lmac_id, stat, &tx_stat); + if (err) + return err; + seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat], tx_stat); + stat++; + } + + return err; +} + +static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused) +{ + struct dentry *current_dir; + int err, lmac_id; + char *buf; + + current_dir = filp->file->f_path.dentry->d_parent; + buf = strrchr(current_dir->d_name.name, 'c'); + if (!buf) + return -EINVAL; + + err = kstrtoint(buf + 1, 10, &lmac_id); + if (!err) { + err = cgx_print_stats(filp, lmac_id); + if (err) + return err; + } + return err; +} + +RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL); + +static void rvu_dbg_cgx_init(struct rvu *rvu) +{ + const struct device *dev = &rvu->pdev->dev; + struct dentry *pfile; + int i, lmac_id; + char dname[20]; + void *cgx; + + rvu->rvu_dbg.cgx_root = debugfs_create_dir("cgx", rvu->rvu_dbg.root); + + for (i = 0; i < cgx_get_cgxcnt_max(); i++) { + cgx = rvu_cgx_pdata(i, rvu); + if (!cgx) + continue; + /* cgx debugfs dir */ + sprintf(dname, "cgx%d", i); + rvu->rvu_dbg.cgx = debugfs_create_dir(dname, + rvu->rvu_dbg.cgx_root); + for (lmac_id = 0; lmac_id < cgx_get_lmac_cnt(cgx); lmac_id++) { + /* lmac debugfs dir */ + sprintf(dname, "lmac%d", lmac_id); + rvu->rvu_dbg.lmac = + debugfs_create_dir(dname, rvu->rvu_dbg.cgx); + + pfile = debugfs_create_file("stats", 0600, + rvu->rvu_dbg.lmac, cgx, + &rvu_dbg_cgx_stat_fops); + if (!pfile) + goto create_failed; + } + } + return; + +create_failed: + dev_err(dev, "Failed to create debugfs dir/file for CGX\n"); + debugfs_remove_recursive(rvu->rvu_dbg.cgx_root); +} + +/* NPC debugfs APIs */ +static void rvu_print_npc_mcam_info(struct seq_file *s, + u16 pcifunc, int blkaddr) +{ + struct rvu *rvu = s->private; + int entry_acnt, entry_ecnt; + int cntr_acnt, cntr_ecnt; + + /* Skip PF0 */ + if (!pcifunc) + return; + rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr, + &entry_acnt, &entry_ecnt); + rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr, + &cntr_acnt, &cntr_ecnt); + if (!entry_acnt && !cntr_acnt) + return; + + if (!(pcifunc & RVU_PFVF_FUNC_MASK)) + seq_printf(s, "\n\t\t Device \t\t: PF%d\n", + rvu_get_pf(pcifunc)); + else + seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n", + rvu_get_pf(pcifunc), + (pcifunc & RVU_PFVF_FUNC_MASK) - 1); + + if (entry_acnt) { + seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt); + seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt); + } + if (cntr_acnt) { + seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt); + seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt); + } +} + +static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued) +{ + struct rvu *rvu = filp->private; + int pf, vf, numvfs, blkaddr; + struct npc_mcam *mcam; + u16 pcifunc; + u64 cfg; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return -ENODEV; + + mcam = &rvu->hw->mcam; + + seq_puts(filp, "\nNPC MCAM info:\n"); + /* MCAM keywidth on receive and transmit sides */ + cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX)); + cfg = (cfg >> 32) & 0x07; + seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ? + "112bits" : ((cfg == NPC_MCAM_KEY_X2) ? + "224bits" : "448bits")); + cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX)); + cfg = (cfg >> 32) & 0x07; + seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ? + "112bits" : ((cfg == NPC_MCAM_KEY_X2) ? + "224bits" : "448bits")); + + mutex_lock(&mcam->lock); + /* MCAM entries */ + seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries); + seq_printf(filp, "\t\t Reserved \t: %d\n", + mcam->total_entries - mcam->bmap_entries); + seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt); + + /* MCAM counters */ + cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST); + cfg = (cfg >> 48) & 0xFFFF; + seq_printf(filp, "\n\t\t MCAM counters \t: %lld\n", cfg); + seq_printf(filp, "\t\t Reserved \t: %lld\n", cfg - mcam->counters.max); + seq_printf(filp, "\t\t Available \t: %d\n", + rvu_rsrc_free_count(&mcam->counters)); + + if (mcam->bmap_entries == mcam->bmap_fcnt) { + mutex_unlock(&mcam->lock); + return 0; + } + + seq_puts(filp, "\n\t\t Current allocation\n"); + seq_puts(filp, "\t\t====================\n"); + for (pf = 0; pf < rvu->hw->total_pfs; pf++) { + pcifunc = (pf << RVU_PFVF_PF_SHIFT); + rvu_print_npc_mcam_info(filp, pcifunc, blkaddr); + + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); + numvfs = (cfg >> 12) & 0xFF; + for (vf = 0; vf < numvfs; vf++) { + pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1); + rvu_print_npc_mcam_info(filp, pcifunc, blkaddr); + } + } + + mutex_unlock(&mcam->lock); + return 0; +} + +RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL); + +static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp, + void *unused) +{ + struct rvu *rvu = filp->private; + struct npc_mcam *mcam; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return -ENODEV; + + mcam = &rvu->hw->mcam; + + seq_puts(filp, "\nNPC MCAM RX miss action stats\n"); + seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr, + rvu_read64(rvu, blkaddr, + NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr))); + + return 0; +} + +RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL); + +static void rvu_dbg_npc_init(struct rvu *rvu) +{ + const struct device *dev = &rvu->pdev->dev; + struct dentry *pfile; + + rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root); + if (!rvu->rvu_dbg.npc) + return; + + pfile = debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, + rvu, &rvu_dbg_npc_mcam_info_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, + rvu, &rvu_dbg_npc_rx_miss_act_fops); + if (!pfile) + goto create_failed; + + return; + +create_failed: + dev_err(dev, "Failed to create debugfs dir/file for NPC\n"); + debugfs_remove_recursive(rvu->rvu_dbg.npc); +} + +void rvu_dbg_init(struct rvu *rvu) +{ + struct device *dev = &rvu->pdev->dev; + struct dentry *pfile; + + rvu->rvu_dbg.root = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL); + if (!rvu->rvu_dbg.root) { + dev_err(rvu->dev, "%s failed\n", __func__); + return; + } + pfile = debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu, + &rvu_dbg_rsrc_status_fops); + if (!pfile) + goto create_failed; + + rvu_dbg_npa_init(rvu); + rvu_dbg_nix_init(rvu); + rvu_dbg_cgx_init(rvu); + rvu_dbg_npc_init(rvu); + + return; + +create_failed: + dev_err(dev, "Failed to create debugfs dir\n"); + debugfs_remove_recursive(rvu->rvu_dbg.root); +} + +void rvu_dbg_exit(struct rvu *rvu) +{ + debugfs_remove_recursive(rvu->rvu_dbg.root); +} + +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c new file mode 100644 index 000000000..e549b09c3 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -0,0 +1,3450 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/pci.h> + +#include "rvu_struct.h" +#include "rvu_reg.h" +#include "rvu.h" +#include "npc.h" +#include "cgx.h" + +static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, + int type, int chan_id); + +enum mc_tbl_sz { + MC_TBL_SZ_256, + MC_TBL_SZ_512, + MC_TBL_SZ_1K, + MC_TBL_SZ_2K, + MC_TBL_SZ_4K, + MC_TBL_SZ_8K, + MC_TBL_SZ_16K, + MC_TBL_SZ_32K, + MC_TBL_SZ_64K, +}; + +enum mc_buf_cnt { + MC_BUF_CNT_8, + MC_BUF_CNT_16, + MC_BUF_CNT_32, + MC_BUF_CNT_64, + MC_BUF_CNT_128, + MC_BUF_CNT_256, + MC_BUF_CNT_512, + MC_BUF_CNT_1024, + MC_BUF_CNT_2048, +}; + +enum nix_makr_fmt_indexes { + NIX_MARK_CFG_IP_DSCP_RED, + NIX_MARK_CFG_IP_DSCP_YELLOW, + NIX_MARK_CFG_IP_DSCP_YELLOW_RED, + NIX_MARK_CFG_IP_ECN_RED, + NIX_MARK_CFG_IP_ECN_YELLOW, + NIX_MARK_CFG_IP_ECN_YELLOW_RED, + NIX_MARK_CFG_VLAN_DEI_RED, + NIX_MARK_CFG_VLAN_DEI_YELLOW, + NIX_MARK_CFG_VLAN_DEI_YELLOW_RED, + NIX_MARK_CFG_MAX, +}; + +/* For now considering MC resources needed for broadcast + * pkt replication only. i.e 256 HWVFs + 12 PFs. + */ +#define MC_TBL_SIZE MC_TBL_SZ_512 +#define MC_BUF_CNT MC_BUF_CNT_128 + +struct mce { + struct hlist_node node; + u16 pcifunc; +}; + +bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (!pfvf->nixlf || blkaddr < 0) + return false; + return true; +} + +int rvu_get_nixlf_count(struct rvu *rvu) +{ + struct rvu_block *block; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); + if (blkaddr < 0) + return 0; + block = &rvu->hw->block[blkaddr]; + return block->lf.max; +} + +int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct rvu_hwinfo *hw = rvu->hw; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (!pfvf->nixlf || blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); + if (*nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + if (nix_blkaddr) + *nix_blkaddr = blkaddr; + + return 0; +} + +static void nix_mce_list_init(struct nix_mce_list *list, int max) +{ + INIT_HLIST_HEAD(&list->head); + list->count = 0; + list->max = max; +} + +static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count) +{ + int idx; + + if (!mcast) + return 0; + + idx = mcast->next_free_mce; + mcast->next_free_mce += count; + return idx; +} + +static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) +{ + if (blkaddr == BLKADDR_NIX0 && hw->nix0) + return hw->nix0; + + return NULL; +} + +static void nix_rx_sync(struct rvu *rvu, int blkaddr) +{ + int err; + + /*Sync all in flight RX packets to LLC/DRAM */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); + err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); + if (err) + dev_err(rvu->dev, "NIX RX software sync failed\n"); +} + +static bool is_valid_txschq(struct rvu *rvu, int blkaddr, + int lvl, u16 pcifunc, u16 schq) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct nix_txsch *txsch; + struct nix_hw *nix_hw; + u16 map_func; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return false; + + txsch = &nix_hw->txsch[lvl]; + /* Check out of bounds */ + if (schq >= txsch->schq.max) + return false; + + mutex_lock(&rvu->rsrc_lock); + map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); + mutex_unlock(&rvu->rsrc_lock); + + /* TLs aggegating traffic are shared across PF and VFs */ + if (lvl >= hw->cap.nix_tx_aggr_lvl) { + if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc)) + return false; + else + return true; + } + + if (map_func != pcifunc) + return false; + + return true; +} + +static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + int pkind, pf, vf, lbkid; + u8 cgx_id, lmac_id; + int err; + + pf = rvu_get_pf(pcifunc); + if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) + return 0; + + switch (type) { + case NIX_INTF_TYPE_CGX: + pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf]; + rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); + + pkind = rvu_npc_get_pkind(rvu, pf); + if (pkind < 0) { + dev_err(rvu->dev, + "PF_Func 0x%x: Invalid pkind\n", pcifunc); + return -EINVAL; + } + pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0); + pfvf->tx_chan_base = pfvf->rx_chan_base; + pfvf->rx_chan_cnt = 1; + pfvf->tx_chan_cnt = 1; + cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); + rvu_npc_set_pkind(rvu, pkind, pfvf); + + /* By default we enable pause frames */ + if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0) + cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu), + lmac_id, true, true); + break; + case NIX_INTF_TYPE_LBK: + vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; + + /* If NIX1 block is present on the silicon then NIXes are + * assigned alternatively for lbk interfaces. NIX0 should + * send packets on lbk link 1 channels and NIX1 should send + * on lbk link 0 channels for the communication between + * NIX0 and NIX1. + */ + lbkid = 0; + if (rvu->hw->lbk_links > 1) + lbkid = vf & 0x1 ? 0 : 1; + + /* Note that AF's VFs work in pairs and talk over consecutive + * loopback channels.Therefore if odd number of AF VFs are + * enabled then the last VF remains with no pair. + */ + pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(lbkid, vf); + pfvf->tx_chan_base = vf & 0x1 ? + NIX_CHAN_LBK_CHX(lbkid, vf - 1) : + NIX_CHAN_LBK_CHX(lbkid, vf + 1); + pfvf->rx_chan_cnt = 1; + pfvf->tx_chan_cnt = 1; + rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, + pfvf->rx_chan_base, false); + break; + } + + /* Add a UCAST forwarding rule in MCAM with this NIXLF attached + * RVU PF/VF's MAC address. + */ + rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, + pfvf->rx_chan_base, pfvf->mac_addr); + + /* Add this PF_FUNC to bcast pkt replication list */ + err = nix_update_bcast_mce_list(rvu, pcifunc, true); + if (err) { + dev_err(rvu->dev, + "Bcast list, failed to enable PF_FUNC 0x%x\n", + pcifunc); + return err; + } + + rvu_npc_install_bcast_match_entry(rvu, pcifunc, + nixlf, pfvf->rx_chan_base); + pfvf->maxlen = NIC_HW_MIN_FRS; + pfvf->minlen = NIC_HW_MIN_FRS; + + return 0; +} + +static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + int err; + + pfvf->maxlen = 0; + pfvf->minlen = 0; + pfvf->rxvlan = false; + + /* Remove this PF_FUNC from bcast pkt replication list */ + err = nix_update_bcast_mce_list(rvu, pcifunc, false); + if (err) { + dev_err(rvu->dev, + "Bcast list, failed to disable PF_FUNC 0x%x\n", + pcifunc); + } + + /* Free and disable any MCAM entries used by this NIX LF */ + rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); +} + +int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + struct rvu_pfvf *pfvf; + int blkaddr, pf, type; + u16 chan_base, chan; + u64 cfg; + + pf = rvu_get_pf(pcifunc); + type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; + if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) + return 0; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + + chan_base = pfvf->rx_chan_base + req->chan_base; + for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { + cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); + rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), + cfg & ~BIT_ULL(16)); + } + return 0; +} + +static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, + int type, int chan_id) +{ + int bpid, blkaddr, lmac_chan_cnt; + struct rvu_hwinfo *hw = rvu->hw; + u16 cgx_bpid_cnt, lbk_bpid_cnt; + struct rvu_pfvf *pfvf; + u8 cgx_id, lmac_id; + u64 cfg; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); + lmac_chan_cnt = cfg & 0xFF; + + cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt; + lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF); + + pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); + + /* Backpressure IDs range division + * CGX channles are mapped to (0 - 191) BPIDs + * LBK channles are mapped to (192 - 255) BPIDs + * SDP channles are mapped to (256 - 511) BPIDs + * + * Lmac channles and bpids mapped as follows + * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15) + * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) .... + * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) .... + */ + switch (type) { + case NIX_INTF_TYPE_CGX: + if ((req->chan_base + req->chan_cnt) > 15) + return -EINVAL; + rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); + /* Assign bpid based on cgx, lmac and chan id */ + bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) + + (lmac_id * lmac_chan_cnt) + req->chan_base; + + if (req->bpid_per_chan) + bpid += chan_id; + if (bpid > cgx_bpid_cnt) + return -EINVAL; + break; + + case NIX_INTF_TYPE_LBK: + if ((req->chan_base + req->chan_cnt) > 63) + return -EINVAL; + bpid = cgx_bpid_cnt + req->chan_base; + if (req->bpid_per_chan) + bpid += chan_id; + if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt)) + return -EINVAL; + break; + default: + return -EINVAL; + } + return bpid; +} + +int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct nix_bp_cfg_rsp *rsp) +{ + int blkaddr, pf, type, chan_id = 0; + u16 pcifunc = req->hdr.pcifunc; + struct rvu_pfvf *pfvf; + u16 chan_base, chan; + s16 bpid, bpid_base; + u64 cfg; + + pf = rvu_get_pf(pcifunc); + type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; + + /* Enable backpressure only for CGX mapped PFs and LBK interface */ + if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) + return 0; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + + bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id); + chan_base = pfvf->rx_chan_base + req->chan_base; + bpid = bpid_base; + + for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { + if (bpid < 0) { + dev_warn(rvu->dev, "Fail to enable backpressure\n"); + return -EINVAL; + } + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); + rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), + cfg | (bpid & 0xFF) | BIT_ULL(16)); + chan_id++; + bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); + } + + for (chan = 0; chan < req->chan_cnt; chan++) { + /* Map channel and bpid assign to it */ + rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 | + (bpid_base & 0x3FF); + if (req->bpid_per_chan) + bpid_base++; + } + rsp->chan_cnt = req->chan_cnt; + + return 0; +} + +static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, + u64 format, bool v4, u64 *fidx) +{ + struct nix_lso_format field = {0}; + + /* IP's Length field */ + field.layer = NIX_TXLAYER_OL3; + /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */ + field.offset = v4 ? 2 : 4; + field.sizem1 = 1; /* i.e 2 bytes */ + field.alg = NIX_LSOALG_ADD_PAYLEN; + rvu_write64(rvu, blkaddr, + NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), + *(u64 *)&field); + + /* No ID field in IPv6 header */ + if (!v4) + return; + + /* IP's ID field */ + field.layer = NIX_TXLAYER_OL3; + field.offset = 4; + field.sizem1 = 1; /* i.e 2 bytes */ + field.alg = NIX_LSOALG_ADD_SEGNUM; + rvu_write64(rvu, blkaddr, + NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), + *(u64 *)&field); +} + +static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr, + u64 format, u64 *fidx) +{ + struct nix_lso_format field = {0}; + + /* TCP's sequence number field */ + field.layer = NIX_TXLAYER_OL4; + field.offset = 4; + field.sizem1 = 3; /* i.e 4 bytes */ + field.alg = NIX_LSOALG_ADD_OFFSET; + rvu_write64(rvu, blkaddr, + NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), + *(u64 *)&field); + + /* TCP's flags field */ + field.layer = NIX_TXLAYER_OL4; + field.offset = 12; + field.sizem1 = 1; /* 2 bytes */ + field.alg = NIX_LSOALG_TCP_FLAGS; + rvu_write64(rvu, blkaddr, + NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), + *(u64 *)&field); +} + +static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) +{ + u64 cfg, idx, fidx = 0; + + /* Get max HW supported format indices */ + cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF; + nix_hw->lso.total = cfg; + + /* Enable LSO */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG); + /* For TSO, set first and middle segment flags to + * mask out PSH, RST & FIN flags in TCP packet + */ + cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16)); + cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16); + rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63)); + + /* Setup default static LSO formats + * + * Configure format fields for TCPv4 segmentation offload + */ + idx = NIX_LSO_FORMAT_IDX_TSOV4; + nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx); + nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); + + /* Set rest of the fields to NOP */ + for (; fidx < 8; fidx++) { + rvu_write64(rvu, blkaddr, + NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); + } + nix_hw->lso.in_use++; + + /* Configure format fields for TCPv6 segmentation offload */ + idx = NIX_LSO_FORMAT_IDX_TSOV6; + fidx = 0; + nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx); + nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); + + /* Set rest of the fields to NOP */ + for (; fidx < 8; fidx++) { + rvu_write64(rvu, blkaddr, + NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); + } + nix_hw->lso.in_use++; +} + +static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) +{ + kfree(pfvf->rq_bmap); + kfree(pfvf->sq_bmap); + kfree(pfvf->cq_bmap); + if (pfvf->rq_ctx) + qmem_free(rvu->dev, pfvf->rq_ctx); + if (pfvf->sq_ctx) + qmem_free(rvu->dev, pfvf->sq_ctx); + if (pfvf->cq_ctx) + qmem_free(rvu->dev, pfvf->cq_ctx); + if (pfvf->rss_ctx) + qmem_free(rvu->dev, pfvf->rss_ctx); + if (pfvf->nix_qints_ctx) + qmem_free(rvu->dev, pfvf->nix_qints_ctx); + if (pfvf->cq_ints_ctx) + qmem_free(rvu->dev, pfvf->cq_ints_ctx); + + pfvf->rq_bmap = NULL; + pfvf->cq_bmap = NULL; + pfvf->sq_bmap = NULL; + pfvf->rq_ctx = NULL; + pfvf->sq_ctx = NULL; + pfvf->cq_ctx = NULL; + pfvf->rss_ctx = NULL; + pfvf->nix_qints_ctx = NULL; + pfvf->cq_ints_ctx = NULL; +} + +static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, + struct rvu_pfvf *pfvf, int nixlf, + int rss_sz, int rss_grps, int hwctx_size, + u64 way_mask) +{ + int err, grp, num_indices; + + /* RSS is not requested for this NIXLF */ + if (!rss_sz) + return 0; + num_indices = rss_sz * rss_grps; + + /* Alloc NIX RSS HW context memory and config the base */ + err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size); + if (err) + return err; + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf), + (u64)pfvf->rss_ctx->iova); + + /* Config full RSS table size, enable RSS and caching */ + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), + BIT_ULL(36) | BIT_ULL(4) | + ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) | + way_mask << 20); + /* Config RSS group offset and sizes */ + for (grp = 0; grp < rss_grps; grp++) + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp), + ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp)); + return 0; +} + +static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, + struct nix_aq_inst_s *inst) +{ + struct admin_queue *aq = block->aq; + struct nix_aq_res_s *result; + int timeout = 1000; + u64 reg, head; + + result = (struct nix_aq_res_s *)aq->res->base; + + /* Get current head pointer where to append this instruction */ + reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS); + head = (reg >> 4) & AQ_PTR_MASK; + + memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)), + (void *)inst, aq->inst->entry_sz); + memset(result, 0, sizeof(*result)); + /* sync into memory */ + wmb(); + + /* Ring the doorbell and wait for result */ + rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1); + while (result->compcode == NIX_AQ_COMP_NOTDONE) { + cpu_relax(); + udelay(1); + timeout--; + if (!timeout) + return -EBUSY; + } + + if (result->compcode != NIX_AQ_COMP_GOOD) + /* TODO: Replace this with some error code */ + return -EBUSY; + + return 0; +} + +static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, + struct nix_aq_enq_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + int nixlf, blkaddr, rc = 0; + struct nix_aq_inst_s inst; + struct rvu_block *block; + struct admin_queue *aq; + struct rvu_pfvf *pfvf; + void *ctx, *mask; + bool ena; + u64 cfg; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + block = &hw->block[blkaddr]; + aq = block->aq; + if (!aq) { + dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__); + return NIX_AF_ERR_AQ_ENQUEUE; + } + + pfvf = rvu_get_pfvf(rvu, pcifunc); + nixlf = rvu_get_lf(rvu, block, pcifunc, 0); + + /* Skip NIXLF check for broadcast MCE entry init */ + if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) { + if (!pfvf->nixlf || nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + } + + switch (req->ctype) { + case NIX_AQ_CTYPE_RQ: + /* Check if index exceeds max no of queues */ + if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize) + rc = NIX_AF_ERR_AQ_ENQUEUE; + break; + case NIX_AQ_CTYPE_SQ: + if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize) + rc = NIX_AF_ERR_AQ_ENQUEUE; + break; + case NIX_AQ_CTYPE_CQ: + if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize) + rc = NIX_AF_ERR_AQ_ENQUEUE; + break; + case NIX_AQ_CTYPE_RSS: + /* Check if RSS is enabled and qidx is within range */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf)); + if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx || + (req->qidx >= (256UL << (cfg & 0xF)))) + rc = NIX_AF_ERR_AQ_ENQUEUE; + break; + case NIX_AQ_CTYPE_MCE: + cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG); + /* Check if index exceeds MCE list length */ + if (!hw->nix0->mcast.mce_ctx || + (req->qidx >= (256UL << (cfg & 0xF)))) + rc = NIX_AF_ERR_AQ_ENQUEUE; + + /* Adding multicast lists for requests from PF/VFs is not + * yet supported, so ignore this. + */ + if (rsp) + rc = NIX_AF_ERR_AQ_ENQUEUE; + break; + default: + rc = NIX_AF_ERR_AQ_ENQUEUE; + } + + if (rc) + return rc; + + /* Check if SQ pointed SMQ belongs to this PF/VF or not */ + if (req->ctype == NIX_AQ_CTYPE_SQ && + ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) || + (req->op == NIX_AQ_INSTOP_WRITE && + req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) { + if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ, + pcifunc, req->sq.smq)) + return NIX_AF_ERR_AQ_ENQUEUE; + } + + memset(&inst, 0, sizeof(struct nix_aq_inst_s)); + inst.lf = nixlf; + inst.cindex = req->qidx; + inst.ctype = req->ctype; + inst.op = req->op; + /* Currently we are not supporting enqueuing multiple instructions, + * so always choose first entry in result memory. + */ + inst.res_addr = (u64)aq->res->iova; + + /* Hardware uses same aq->res->base for updating result of + * previous instruction hence wait here till it is done. + */ + spin_lock(&aq->lock); + + /* Clean result + context memory */ + memset(aq->res->base, 0, aq->res->entry_sz); + /* Context needs to be written at RES_ADDR + 128 */ + ctx = aq->res->base + 128; + /* Mask needs to be written at RES_ADDR + 256 */ + mask = aq->res->base + 256; + + switch (req->op) { + case NIX_AQ_INSTOP_WRITE: + if (req->ctype == NIX_AQ_CTYPE_RQ) + memcpy(mask, &req->rq_mask, + sizeof(struct nix_rq_ctx_s)); + else if (req->ctype == NIX_AQ_CTYPE_SQ) + memcpy(mask, &req->sq_mask, + sizeof(struct nix_sq_ctx_s)); + else if (req->ctype == NIX_AQ_CTYPE_CQ) + memcpy(mask, &req->cq_mask, + sizeof(struct nix_cq_ctx_s)); + else if (req->ctype == NIX_AQ_CTYPE_RSS) + memcpy(mask, &req->rss_mask, + sizeof(struct nix_rsse_s)); + else if (req->ctype == NIX_AQ_CTYPE_MCE) + memcpy(mask, &req->mce_mask, + sizeof(struct nix_rx_mce_s)); + fallthrough; + case NIX_AQ_INSTOP_INIT: + if (req->ctype == NIX_AQ_CTYPE_RQ) + memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); + else if (req->ctype == NIX_AQ_CTYPE_SQ) + memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s)); + else if (req->ctype == NIX_AQ_CTYPE_CQ) + memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s)); + else if (req->ctype == NIX_AQ_CTYPE_RSS) + memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s)); + else if (req->ctype == NIX_AQ_CTYPE_MCE) + memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s)); + break; + case NIX_AQ_INSTOP_NOP: + case NIX_AQ_INSTOP_READ: + case NIX_AQ_INSTOP_LOCK: + case NIX_AQ_INSTOP_UNLOCK: + break; + default: + rc = NIX_AF_ERR_AQ_ENQUEUE; + spin_unlock(&aq->lock); + return rc; + } + + /* Submit the instruction to AQ */ + rc = nix_aq_enqueue_wait(rvu, block, &inst); + if (rc) { + spin_unlock(&aq->lock); + return rc; + } + + /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */ + if (req->op == NIX_AQ_INSTOP_INIT) { + if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena) + __set_bit(req->qidx, pfvf->rq_bmap); + if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena) + __set_bit(req->qidx, pfvf->sq_bmap); + if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena) + __set_bit(req->qidx, pfvf->cq_bmap); + } + + if (req->op == NIX_AQ_INSTOP_WRITE) { + if (req->ctype == NIX_AQ_CTYPE_RQ) { + ena = (req->rq.ena & req->rq_mask.ena) | + (test_bit(req->qidx, pfvf->rq_bmap) & + ~req->rq_mask.ena); + if (ena) + __set_bit(req->qidx, pfvf->rq_bmap); + else + __clear_bit(req->qidx, pfvf->rq_bmap); + } + if (req->ctype == NIX_AQ_CTYPE_SQ) { + ena = (req->rq.ena & req->sq_mask.ena) | + (test_bit(req->qidx, pfvf->sq_bmap) & + ~req->sq_mask.ena); + if (ena) + __set_bit(req->qidx, pfvf->sq_bmap); + else + __clear_bit(req->qidx, pfvf->sq_bmap); + } + if (req->ctype == NIX_AQ_CTYPE_CQ) { + ena = (req->rq.ena & req->cq_mask.ena) | + (test_bit(req->qidx, pfvf->cq_bmap) & + ~req->cq_mask.ena); + if (ena) + __set_bit(req->qidx, pfvf->cq_bmap); + else + __clear_bit(req->qidx, pfvf->cq_bmap); + } + } + + if (rsp) { + /* Copy read context into mailbox */ + if (req->op == NIX_AQ_INSTOP_READ) { + if (req->ctype == NIX_AQ_CTYPE_RQ) + memcpy(&rsp->rq, ctx, + sizeof(struct nix_rq_ctx_s)); + else if (req->ctype == NIX_AQ_CTYPE_SQ) + memcpy(&rsp->sq, ctx, + sizeof(struct nix_sq_ctx_s)); + else if (req->ctype == NIX_AQ_CTYPE_CQ) + memcpy(&rsp->cq, ctx, + sizeof(struct nix_cq_ctx_s)); + else if (req->ctype == NIX_AQ_CTYPE_RSS) + memcpy(&rsp->rss, ctx, + sizeof(struct nix_rsse_s)); + else if (req->ctype == NIX_AQ_CTYPE_MCE) + memcpy(&rsp->mce, ctx, + sizeof(struct nix_rx_mce_s)); + } + } + + spin_unlock(&aq->lock); + return 0; +} + +static const char *nix_get_ctx_name(int ctype) +{ + switch (ctype) { + case NIX_AQ_CTYPE_CQ: + return "CQ"; + case NIX_AQ_CTYPE_SQ: + return "SQ"; + case NIX_AQ_CTYPE_RQ: + return "RQ"; + case NIX_AQ_CTYPE_RSS: + return "RSS"; + } + return ""; +} + +static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); + struct nix_aq_enq_req aq_req; + unsigned long *bmap; + int qidx, q_cnt = 0; + int err = 0, rc; + + if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx) + return NIX_AF_ERR_AQ_ENQUEUE; + + memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); + aq_req.hdr.pcifunc = req->hdr.pcifunc; + + if (req->ctype == NIX_AQ_CTYPE_CQ) { + aq_req.cq.ena = 0; + aq_req.cq_mask.ena = 1; + aq_req.cq.bp_ena = 0; + aq_req.cq_mask.bp_ena = 1; + q_cnt = pfvf->cq_ctx->qsize; + bmap = pfvf->cq_bmap; + } + if (req->ctype == NIX_AQ_CTYPE_SQ) { + aq_req.sq.ena = 0; + aq_req.sq_mask.ena = 1; + q_cnt = pfvf->sq_ctx->qsize; + bmap = pfvf->sq_bmap; + } + if (req->ctype == NIX_AQ_CTYPE_RQ) { + aq_req.rq.ena = 0; + aq_req.rq_mask.ena = 1; + q_cnt = pfvf->rq_ctx->qsize; + bmap = pfvf->rq_bmap; + } + + aq_req.ctype = req->ctype; + aq_req.op = NIX_AQ_INSTOP_WRITE; + + for (qidx = 0; qidx < q_cnt; qidx++) { + if (!test_bit(qidx, bmap)) + continue; + aq_req.qidx = qidx; + rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL); + if (rc) { + err = rc; + dev_err(rvu->dev, "Failed to disable %s:%d context\n", + nix_get_ctx_name(req->ctype), qidx); + } + } + + return err; +} + +#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING +static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req) +{ + struct nix_aq_enq_req lock_ctx_req; + int err; + + if (req->op != NIX_AQ_INSTOP_INIT) + return 0; + + if (req->ctype == NIX_AQ_CTYPE_MCE || + req->ctype == NIX_AQ_CTYPE_DYNO) + return 0; + + memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req)); + lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc; + lock_ctx_req.ctype = req->ctype; + lock_ctx_req.op = NIX_AQ_INSTOP_LOCK; + lock_ctx_req.qidx = req->qidx; + err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL); + if (err) + dev_err(rvu->dev, + "PFUNC 0x%x: Failed to lock NIX %s:%d context\n", + req->hdr.pcifunc, + nix_get_ctx_name(req->ctype), req->qidx); + return err; +} + +int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, + struct nix_aq_enq_req *req, + struct nix_aq_enq_rsp *rsp) +{ + int err; + + err = rvu_nix_aq_enq_inst(rvu, req, rsp); + if (!err) + err = nix_lf_hwctx_lockdown(rvu, req); + return err; +} +#else + +int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, + struct nix_aq_enq_req *req, + struct nix_aq_enq_rsp *rsp) +{ + return rvu_nix_aq_enq_inst(rvu, req, rsp); +} +#endif + +int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, + struct hwctx_disable_req *req, + struct msg_rsp *rsp) +{ + return nix_lf_hwctx_disable(rvu, req); +} + +int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, + struct nix_lf_alloc_req *req, + struct nix_lf_alloc_rsp *rsp) +{ + int nixlf, qints, hwctx_size, intf, err, rc = 0; + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + struct rvu_block *block; + struct rvu_pfvf *pfvf; + u64 cfg, ctx_cfg; + int blkaddr; + + if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt) + return NIX_AF_ERR_PARAM; + + if (req->way_mask) + req->way_mask &= 0xFFFF; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (!pfvf->nixlf || blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + block = &hw->block[blkaddr]; + nixlf = rvu_get_lf(rvu, block, pcifunc, 0); + if (nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + /* Check if requested 'NIXLF <=> NPALF' mapping is valid */ + if (req->npa_func) { + /* If default, use 'this' NIXLF's PFFUNC */ + if (req->npa_func == RVU_DEFAULT_PF_FUNC) + req->npa_func = pcifunc; + if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA)) + return NIX_AF_INVAL_NPA_PF_FUNC; + } + + /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */ + if (req->sso_func) { + /* If default, use 'this' NIXLF's PFFUNC */ + if (req->sso_func == RVU_DEFAULT_PF_FUNC) + req->sso_func = pcifunc; + if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO)) + return NIX_AF_INVAL_SSO_PF_FUNC; + } + + /* If RSS is being enabled, check if requested config is valid. + * RSS table size should be power of two, otherwise + * RSS_GRP::OFFSET + adder might go beyond that group or + * won't be able to use entire table. + */ + if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE || + !is_power_of_2(req->rss_sz))) + return NIX_AF_ERR_RSS_SIZE_INVALID; + + if (req->rss_sz && + (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS)) + return NIX_AF_ERR_RSS_GRPS_INVALID; + + /* Reset this NIX LF */ + err = rvu_lf_reset(rvu, block, nixlf); + if (err) { + dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", + block->addr - BLKADDR_NIX0, nixlf); + return NIX_AF_ERR_LF_RESET; + } + + ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3); + + /* Alloc NIX RQ HW context memory and config the base */ + hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF); + err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size); + if (err) + goto free_mem; + + pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL); + if (!pfvf->rq_bmap) + goto free_mem; + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf), + (u64)pfvf->rq_ctx->iova); + + /* Set caching and queue count in HW */ + cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20; + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg); + + /* Alloc NIX SQ HW context memory and config the base */ + hwctx_size = 1UL << (ctx_cfg & 0xF); + err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size); + if (err) + goto free_mem; + + pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL); + if (!pfvf->sq_bmap) + goto free_mem; + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf), + (u64)pfvf->sq_ctx->iova); + + cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20; + rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg); + + /* Alloc NIX CQ HW context memory and config the base */ + hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF); + err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size); + if (err) + goto free_mem; + + pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL); + if (!pfvf->cq_bmap) + goto free_mem; + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf), + (u64)pfvf->cq_ctx->iova); + + cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20; + rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg); + + /* Initialize receive side scaling (RSS) */ + hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF); + err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz, + req->rss_grps, hwctx_size, req->way_mask); + if (err) + goto free_mem; + + /* Alloc memory for CQINT's HW contexts */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); + qints = (cfg >> 24) & 0xFFF; + hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF); + err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size); + if (err) + goto free_mem; + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf), + (u64)pfvf->cq_ints_ctx->iova); + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), + BIT_ULL(36) | req->way_mask << 20); + + /* Alloc memory for QINT's HW contexts */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); + qints = (cfg >> 12) & 0xFFF; + hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF); + err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size); + if (err) + goto free_mem; + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf), + (u64)pfvf->nix_qints_ctx->iova); + rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), + BIT_ULL(36) | req->way_mask << 20); + + /* Setup VLANX TPID's. + * Use VLAN1 for 802.1Q + * and VLAN0 for 802.1AD. + */ + cfg = (0x8100ULL << 16) | 0x88A8ULL; + rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); + + /* Enable LMTST for this NIX LF */ + rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0)); + + /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */ + if (req->npa_func) + cfg = req->npa_func; + if (req->sso_func) + cfg |= (u64)req->sso_func << 16; + + cfg |= (u64)req->xqe_sz << 33; + rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg); + + /* Config Rx pkt length, csum checks and apad enable / disable */ + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg); + + /* Configure pkind for TX parse config */ + cfg = NPC_TX_DEF_PKIND; + rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg); + + intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; + err = nix_interface_init(rvu, pcifunc, intf, nixlf); + if (err) + goto free_mem; + + /* Disable NPC entries as NIXLF's contexts are not initialized yet */ + rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); + + goto exit; + +free_mem: + nix_ctx_free(rvu, pfvf); + rc = -ENOMEM; + +exit: + /* Set macaddr of this PF/VF */ + ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); + + /* set SQB size info */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST); + rsp->sqb_size = (cfg >> 34) & 0xFFFF; + rsp->rx_chan_base = pfvf->rx_chan_base; + rsp->tx_chan_base = pfvf->tx_chan_base; + rsp->rx_chan_cnt = pfvf->rx_chan_cnt; + rsp->tx_chan_cnt = pfvf->tx_chan_cnt; + rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4; + rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6; + /* Get HW supported stat count */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); + rsp->lf_rx_stats = ((cfg >> 32) & 0xFF); + rsp->lf_tx_stats = ((cfg >> 24) & 0xFF); + /* Get count of CQ IRQs and error IRQs supported per LF */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); + rsp->qints = ((cfg >> 12) & 0xFFF); + rsp->cints = ((cfg >> 24) & 0xFFF); + return rc; +} + +int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + struct rvu_block *block; + int blkaddr, nixlf, err; + struct rvu_pfvf *pfvf; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (!pfvf->nixlf || blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + block = &hw->block[blkaddr]; + nixlf = rvu_get_lf(rvu, block, pcifunc, 0); + if (nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + nix_interface_deinit(rvu, pcifunc, nixlf); + + /* Reset this NIX LF */ + err = rvu_lf_reset(rvu, block, nixlf); + if (err) { + dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", + block->addr - BLKADDR_NIX0, nixlf); + return NIX_AF_ERR_LF_RESET; + } + + nix_ctx_free(rvu, pfvf); + + return 0; +} + +int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, + struct nix_mark_format_cfg *req, + struct nix_mark_format_cfg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + struct nix_hw *nix_hw; + struct rvu_pfvf *pfvf; + int blkaddr, rc; + u32 cfg; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (!pfvf->nixlf || blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return -EINVAL; + + cfg = (((u32)req->offset & 0x7) << 16) | + (((u32)req->y_mask & 0xF) << 12) | + (((u32)req->y_val & 0xF) << 8) | + (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF); + + rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg); + if (rc < 0) { + dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)", + rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); + return NIX_AF_ERR_MARK_CFG_FAIL; + } + + rsp->mark_format_idx = rc; + return 0; +} + +/* Disable shaping of pkts by a scheduler queue + * at a given scheduler level. + */ +static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, + int lvl, int schq) +{ + u64 cir_reg = 0, pir_reg = 0; + u64 cfg; + + switch (lvl) { + case NIX_TXSCH_LVL_TL1: + cir_reg = NIX_AF_TL1X_CIR(schq); + pir_reg = 0; /* PIR not available at TL1 */ + break; + case NIX_TXSCH_LVL_TL2: + cir_reg = NIX_AF_TL2X_CIR(schq); + pir_reg = NIX_AF_TL2X_PIR(schq); + break; + case NIX_TXSCH_LVL_TL3: + cir_reg = NIX_AF_TL3X_CIR(schq); + pir_reg = NIX_AF_TL3X_PIR(schq); + break; + case NIX_TXSCH_LVL_TL4: + cir_reg = NIX_AF_TL4X_CIR(schq); + pir_reg = NIX_AF_TL4X_PIR(schq); + break; + } + + if (!cir_reg) + return; + cfg = rvu_read64(rvu, blkaddr, cir_reg); + rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0)); + + if (!pir_reg) + return; + cfg = rvu_read64(rvu, blkaddr, pir_reg); + rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0)); +} + +static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, + int lvl, int schq) +{ + struct rvu_hwinfo *hw = rvu->hw; + int link; + + if (lvl >= hw->cap.nix_tx_aggr_lvl) + return; + + /* Reset TL4's SDP link config */ + if (lvl == NIX_TXSCH_LVL_TL4) + rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00); + + if (lvl != NIX_TXSCH_LVL_TL2) + return; + + /* Reset TL2's CGX or LBK link config */ + for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) + rvu_write64(rvu, blkaddr, + NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); +} + +static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) +{ + struct rvu_hwinfo *hw = rvu->hw; + int pf = rvu_get_pf(pcifunc); + u8 cgx_id = 0, lmac_id = 0; + + if (is_afvf(pcifunc)) {/* LBK links */ + return hw->cgx_links; + } else if (is_pf_cgxmapped(rvu, pf)) { + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + return (cgx_id * hw->lmac_per_cgx) + lmac_id; + } + + /* SDP link */ + return hw->cgx_links + hw->lbk_links; +} + +static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, + int link, int *start, int *end) +{ + struct rvu_hwinfo *hw = rvu->hw; + int pf = rvu_get_pf(pcifunc); + + if (is_afvf(pcifunc)) { /* LBK links */ + *start = hw->cap.nix_txsch_per_cgx_lmac * link; + *end = *start + hw->cap.nix_txsch_per_lbk_lmac; + } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */ + *start = hw->cap.nix_txsch_per_cgx_lmac * link; + *end = *start + hw->cap.nix_txsch_per_cgx_lmac; + } else { /* SDP link */ + *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) + + (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links); + *end = *start + hw->cap.nix_txsch_per_sdp_lmac; + } +} + +static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc, + struct nix_hw *nix_hw, + struct nix_txsch_alloc_req *req) +{ + struct rvu_hwinfo *hw = rvu->hw; + int schq, req_schq, free_cnt; + struct nix_txsch *txsch; + int link, start, end; + + txsch = &nix_hw->txsch[lvl]; + req_schq = req->schq_contig[lvl] + req->schq[lvl]; + + if (!req_schq) + return 0; + + link = nix_get_tx_link(rvu, pcifunc); + + /* For traffic aggregating scheduler level, one queue is enough */ + if (lvl >= hw->cap.nix_tx_aggr_lvl) { + if (req_schq != 1) + return NIX_AF_ERR_TLX_ALLOC_FAIL; + return 0; + } + + /* Get free SCHQ count and check if request can be accomodated */ + if (hw->cap.nix_fixed_txschq_mapping) { + nix_get_txschq_range(rvu, pcifunc, link, &start, &end); + schq = start + (pcifunc & RVU_PFVF_FUNC_MASK); + if (end <= txsch->schq.max && schq < end && + !test_bit(schq, txsch->schq.bmap)) + free_cnt = 1; + else + free_cnt = 0; + } else { + free_cnt = rvu_rsrc_free_count(&txsch->schq); + } + + if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC || + req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC) + return NIX_AF_ERR_TLX_ALLOC_FAIL; + + /* If contiguous queues are needed, check for availability */ + if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] && + !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl])) + return NIX_AF_ERR_TLX_ALLOC_FAIL; + + return 0; +} + +static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch, + struct nix_txsch_alloc_rsp *rsp, + int lvl, int start, int end) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = rsp->hdr.pcifunc; + int idx, schq; + + /* For traffic aggregating levels, queue alloc is based + * on transmit link to which PF_FUNC is mapped to. + */ + if (lvl >= hw->cap.nix_tx_aggr_lvl) { + /* A single TL queue is allocated */ + if (rsp->schq_contig[lvl]) { + rsp->schq_contig[lvl] = 1; + rsp->schq_contig_list[lvl][0] = start; + } + + /* Both contig and non-contig reqs doesn't make sense here */ + if (rsp->schq_contig[lvl]) + rsp->schq[lvl] = 0; + + if (rsp->schq[lvl]) { + rsp->schq[lvl] = 1; + rsp->schq_list[lvl][0] = start; + } + return; + } + + /* Adjust the queue request count if HW supports + * only one queue per level configuration. + */ + if (hw->cap.nix_fixed_txschq_mapping) { + idx = pcifunc & RVU_PFVF_FUNC_MASK; + schq = start + idx; + if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) { + rsp->schq_contig[lvl] = 0; + rsp->schq[lvl] = 0; + return; + } + + if (rsp->schq_contig[lvl]) { + rsp->schq_contig[lvl] = 1; + set_bit(schq, txsch->schq.bmap); + rsp->schq_contig_list[lvl][0] = schq; + rsp->schq[lvl] = 0; + } else if (rsp->schq[lvl]) { + rsp->schq[lvl] = 1; + set_bit(schq, txsch->schq.bmap); + rsp->schq_list[lvl][0] = schq; + } + return; + } + + /* Allocate contiguous queue indices requesty first */ + if (rsp->schq_contig[lvl]) { + schq = bitmap_find_next_zero_area(txsch->schq.bmap, + txsch->schq.max, start, + rsp->schq_contig[lvl], 0); + if (schq >= end) + rsp->schq_contig[lvl] = 0; + for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) { + set_bit(schq, txsch->schq.bmap); + rsp->schq_contig_list[lvl][idx] = schq; + schq++; + } + } + + /* Allocate non-contiguous queue indices */ + if (rsp->schq[lvl]) { + idx = 0; + for (schq = start; schq < end; schq++) { + if (!test_bit(schq, txsch->schq.bmap)) { + set_bit(schq, txsch->schq.bmap); + rsp->schq_list[lvl][idx++] = schq; + } + if (idx == rsp->schq[lvl]) + break; + } + /* Update how many were allocated */ + rsp->schq[lvl] = idx; + } +} + +int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, + struct nix_txsch_alloc_req *req, + struct nix_txsch_alloc_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + int link, blkaddr, rc = 0; + int lvl, idx, start, end; + struct nix_txsch *txsch; + struct rvu_pfvf *pfvf; + struct nix_hw *nix_hw; + u32 *pfvf_map; + u16 schq; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (!pfvf->nixlf || blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return -EINVAL; + + mutex_lock(&rvu->rsrc_lock); + + /* Check if request is valid as per HW capabilities + * and can be accomodated. + */ + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req); + if (rc) + goto err; + } + + /* Allocate requested Tx scheduler queues */ + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + txsch = &nix_hw->txsch[lvl]; + pfvf_map = txsch->pfvf_map; + + if (!req->schq[lvl] && !req->schq_contig[lvl]) + continue; + + rsp->schq[lvl] = req->schq[lvl]; + rsp->schq_contig[lvl] = req->schq_contig[lvl]; + + link = nix_get_tx_link(rvu, pcifunc); + + if (lvl >= hw->cap.nix_tx_aggr_lvl) { + start = link; + end = link; + } else if (hw->cap.nix_fixed_txschq_mapping) { + nix_get_txschq_range(rvu, pcifunc, link, &start, &end); + } else { + start = 0; + end = txsch->schq.max; + } + + nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end); + + /* Reset queue config */ + for (idx = 0; idx < req->schq_contig[lvl]; idx++) { + schq = rsp->schq_contig_list[lvl][idx]; + if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & + NIX_TXSCHQ_CFG_DONE)) + pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); + nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); + nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); + } + + for (idx = 0; idx < req->schq[lvl]; idx++) { + schq = rsp->schq_list[lvl][idx]; + if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & + NIX_TXSCHQ_CFG_DONE)) + pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); + nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); + nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); + } + } + + rsp->aggr_level = hw->cap.nix_tx_aggr_lvl; + rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO; + rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr, + NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? + NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; + goto exit; +err: + rc = NIX_AF_ERR_TLX_ALLOC_FAIL; +exit: + mutex_unlock(&rvu->rsrc_lock); + return rc; +} + +static void nix_smq_flush(struct rvu *rvu, int blkaddr, + int smq, u16 pcifunc, int nixlf) +{ + int pf = rvu_get_pf(pcifunc); + u8 cgx_id = 0, lmac_id = 0; + int err, restore_tx_en = 0; + u64 cfg; + + /* enable cgx tx if disabled */ + if (is_pf_cgxmapped(rvu, pf)) { + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), + lmac_id, true); + } + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); + /* Do SMQ flush and set enqueue xoff */ + cfg |= BIT_ULL(50) | BIT_ULL(49); + rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); + + /* Disable backpressure from physical link, + * otherwise SMQ flush may stall. + */ + rvu_cgx_enadis_rx_bp(rvu, pf, false); + + /* Wait for flush to complete */ + err = rvu_poll_reg(rvu, blkaddr, + NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true); + if (err) + dev_err(rvu->dev, + "NIXLF%d: SMQ%d flush failed\n", nixlf, smq); + + rvu_cgx_enadis_rx_bp(rvu, pf, true); + /* restore cgx tx state */ + if (restore_tx_en) + cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); +} + +static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) +{ + int blkaddr, nixlf, lvl, schq, err; + struct rvu_hwinfo *hw = rvu->hw; + struct nix_txsch *txsch; + struct nix_hw *nix_hw; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return -EINVAL; + + nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); + if (nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + /* Disable TL2/3 queue links before SMQ flush*/ + mutex_lock(&rvu->rsrc_lock); + for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4) + continue; + + txsch = &nix_hw->txsch[lvl]; + for (schq = 0; schq < txsch->schq.max; schq++) { + if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) + continue; + nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); + } + } + + /* Flush SMQs */ + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; + for (schq = 0; schq < txsch->schq.max; schq++) { + if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) + continue; + nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); + } + + /* Now free scheduler queues to free pool */ + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + /* TLs above aggregation level are shared across all PF + * and it's VFs, hence skip freeing them. + */ + if (lvl >= hw->cap.nix_tx_aggr_lvl) + continue; + + txsch = &nix_hw->txsch[lvl]; + for (schq = 0; schq < txsch->schq.max; schq++) { + if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) + continue; + rvu_free_rsrc(&txsch->schq, schq); + txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); + } + } + mutex_unlock(&rvu->rsrc_lock); + + /* Sync cached info for this LF in NDC-TX to LLC/DRAM */ + rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf); + err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true); + if (err) + dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf); + + return 0; +} + +static int nix_txschq_free_one(struct rvu *rvu, + struct nix_txsch_free_req *req) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + int lvl, schq, nixlf, blkaddr; + struct nix_txsch *txsch; + struct nix_hw *nix_hw; + u32 *pfvf_map; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return -EINVAL; + + nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); + if (nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + lvl = req->schq_lvl; + schq = req->schq; + txsch = &nix_hw->txsch[lvl]; + + if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max) + return 0; + + pfvf_map = txsch->pfvf_map; + mutex_lock(&rvu->rsrc_lock); + + if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) { + mutex_unlock(&rvu->rsrc_lock); + goto err; + } + + /* Flush if it is a SMQ. Onus of disabling + * TL2/3 queue links before SMQ flush is on user + */ + if (lvl == NIX_TXSCH_LVL_SMQ) + nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); + + /* Free the resource */ + rvu_free_rsrc(&txsch->schq, schq); + txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); + mutex_unlock(&rvu->rsrc_lock); + return 0; +err: + return NIX_AF_ERR_TLX_INVALID; +} + +int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, + struct nix_txsch_free_req *req, + struct msg_rsp *rsp) +{ + if (req->flags & TXSCHQ_FREE_ALL) + return nix_txschq_free(rvu, req->hdr.pcifunc); + else + return nix_txschq_free_one(rvu, req); +} + +static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, + int lvl, u64 reg, u64 regval) +{ + u64 regbase = reg & 0xFFFF; + u16 schq, parent; + + if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg)) + return false; + + schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); + /* Check if this schq belongs to this PF/VF or not */ + if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq)) + return false; + + parent = (regval >> 16) & 0x1FF; + /* Validate MDQ's TL4 parent */ + if (regbase == NIX_AF_MDQX_PARENT(0) && + !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent)) + return false; + + /* Validate TL4's TL3 parent */ + if (regbase == NIX_AF_TL4X_PARENT(0) && + !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent)) + return false; + + /* Validate TL3's TL2 parent */ + if (regbase == NIX_AF_TL3X_PARENT(0) && + !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent)) + return false; + + /* Validate TL2's TL1 parent */ + if (regbase == NIX_AF_TL2X_PARENT(0) && + !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent)) + return false; + + return true; +} + +static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg) +{ + u64 regbase; + + if (hw->cap.nix_shaping) + return true; + + /* If shaping and coloring is not supported, then + * *_CIR and *_PIR registers should not be configured. + */ + regbase = reg & 0xFFFF; + + switch (lvl) { + case NIX_TXSCH_LVL_TL1: + if (regbase == NIX_AF_TL1X_CIR(0)) + return false; + break; + case NIX_TXSCH_LVL_TL2: + if (regbase == NIX_AF_TL2X_CIR(0) || + regbase == NIX_AF_TL2X_PIR(0)) + return false; + break; + case NIX_TXSCH_LVL_TL3: + if (regbase == NIX_AF_TL3X_CIR(0) || + regbase == NIX_AF_TL3X_PIR(0)) + return false; + break; + case NIX_TXSCH_LVL_TL4: + if (regbase == NIX_AF_TL4X_CIR(0) || + regbase == NIX_AF_TL4X_PIR(0)) + return false; + break; + } + return true; +} + +static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, + u16 pcifunc, int blkaddr) +{ + u32 *pfvf_map; + int schq; + + schq = nix_get_tx_link(rvu, pcifunc); + pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map; + /* Skip if PF has already done the config */ + if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE) + return; + rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq), + (TXSCH_TL1_DFLT_RR_PRIO << 1)); + rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), + TXSCH_TL1_DFLT_RR_QTM); + rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00); + pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); +} + +int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, + struct nix_txschq_config *req, + struct msg_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + u64 reg, regval, schq_regbase; + struct nix_txsch *txsch; + struct nix_hw *nix_hw; + int blkaddr, idx, err; + int nixlf, schq; + u32 *pfvf_map; + + if (req->lvl >= NIX_TXSCH_LVL_CNT || + req->num_regs > MAX_REGS_PER_MBOX_MSG) + return NIX_AF_INVAL_TXSCHQ_CFG; + + err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); + if (err) + return err; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return -EINVAL; + + txsch = &nix_hw->txsch[req->lvl]; + pfvf_map = txsch->pfvf_map; + + if (req->lvl >= hw->cap.nix_tx_aggr_lvl && + pcifunc & RVU_PFVF_FUNC_MASK) { + mutex_lock(&rvu->rsrc_lock); + if (req->lvl == NIX_TXSCH_LVL_TL1) + nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr); + mutex_unlock(&rvu->rsrc_lock); + return 0; + } + + for (idx = 0; idx < req->num_regs; idx++) { + reg = req->reg[idx]; + regval = req->regval[idx]; + schq_regbase = reg & 0xFFFF; + + if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr, + txsch->lvl, reg, regval)) + return NIX_AF_INVAL_TXSCHQ_CFG; + + /* Check if shaping and coloring is supported */ + if (!is_txschq_shaping_valid(hw, req->lvl, reg)) + continue; + + /* Replace PF/VF visible NIXLF slot with HW NIXLF id */ + if (schq_regbase == NIX_AF_SMQX_CFG(0)) { + nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], + pcifunc, 0); + regval &= ~(0x7FULL << 24); + regval |= ((u64)nixlf << 24); + } + + /* Clear 'BP_ENA' config, if it's not allowed */ + if (!hw->cap.nix_tx_link_bp) { + if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) || + (schq_regbase & 0xFF00) == + NIX_AF_TL3_TL2X_LINKX_CFG(0, 0)) + regval &= ~BIT_ULL(13); + } + + /* Mark config as done for TL1 by PF */ + if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) && + schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) { + schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); + mutex_lock(&rvu->rsrc_lock); + pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], + NIX_TXSCHQ_CFG_DONE); + mutex_unlock(&rvu->rsrc_lock); + } + + /* SMQ flush is special hence split register writes such + * that flush first and write rest of the bits later. + */ + if (schq_regbase == NIX_AF_SMQX_CFG(0) && + (regval & BIT_ULL(49))) { + schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); + nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); + regval &= ~BIT_ULL(49); + } + rvu_write64(rvu, blkaddr, reg, regval); + } + + return 0; +} + +static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, + struct nix_vtag_config *req) +{ + u64 regval = req->vtag_size; + + if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8) + return -EINVAL; + + if (req->rx.capture_vtag) + regval |= BIT_ULL(5); + if (req->rx.strip_vtag) + regval |= BIT_ULL(4); + + rvu_write64(rvu, blkaddr, + NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval); + return 0; +} + +int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, + struct nix_vtag_config *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + int blkaddr, nixlf, err; + + err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); + if (err) + return err; + + if (req->cfg_type) { + err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req); + if (err) + return NIX_AF_ERR_PARAM; + } else { + /* TODO: handle tx vtag configuration */ + return 0; + } + + return 0; +} + +static int nix_setup_mce(struct rvu *rvu, int mce, u8 op, + u16 pcifunc, int next, bool eol) +{ + struct nix_aq_enq_req aq_req; + int err; + + aq_req.hdr.pcifunc = 0; + aq_req.ctype = NIX_AQ_CTYPE_MCE; + aq_req.op = op; + aq_req.qidx = mce; + + /* Forward bcast pkts to RQ0, RSS not needed */ + aq_req.mce.op = 0; + aq_req.mce.index = 0; + aq_req.mce.eol = eol; + aq_req.mce.pf_func = pcifunc; + aq_req.mce.next = next; + + /* All fields valid */ + *(u64 *)(&aq_req.mce_mask) = ~0ULL; + + err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL); + if (err) { + dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n", + rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); + return err; + } + return 0; +} + +static int nix_update_mce_list(struct nix_mce_list *mce_list, + u16 pcifunc, bool add) +{ + struct mce *mce, *tail = NULL; + bool delete = false; + + /* Scan through the current list */ + hlist_for_each_entry(mce, &mce_list->head, node) { + /* If already exists, then delete */ + if (mce->pcifunc == pcifunc && !add) { + delete = true; + break; + } + tail = mce; + } + + if (delete) { + hlist_del(&mce->node); + kfree(mce); + mce_list->count--; + return 0; + } + + if (!add) + return 0; + + /* Add a new one to the list, at the tail */ + mce = kzalloc(sizeof(*mce), GFP_KERNEL); + if (!mce) + return -ENOMEM; + mce->pcifunc = pcifunc; + if (!tail) + hlist_add_head(&mce->node, &mce_list->head); + else + hlist_add_behind(&mce->node, &tail->node); + mce_list->count++; + return 0; +} + +int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add) +{ + int err = 0, idx, next_idx, last_idx; + struct nix_mce_list *mce_list; + struct nix_mcast *mcast; + struct nix_hw *nix_hw; + struct rvu_pfvf *pfvf; + struct mce *mce; + int blkaddr; + + /* Broadcast pkt replication is not needed for AF's VFs, hence skip */ + if (is_afvf(pcifunc)) + return 0; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return 0; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return 0; + + mcast = &nix_hw->mcast; + + /* Get this PF/VF func's MCE index */ + pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); + idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK); + + mce_list = &pfvf->bcast_mce_list; + if (idx > (pfvf->bcast_mce_idx + mce_list->max)) { + dev_err(rvu->dev, + "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n", + __func__, idx, mce_list->max, + pcifunc >> RVU_PFVF_PF_SHIFT); + return -EINVAL; + } + + mutex_lock(&mcast->mce_lock); + + err = nix_update_mce_list(mce_list, pcifunc, add); + if (err) + goto end; + + /* Disable MCAM entry in NPC */ + if (!mce_list->count) { + rvu_npc_enable_bcast_entry(rvu, pcifunc, false); + goto end; + } + + /* Dump the updated list to HW */ + idx = pfvf->bcast_mce_idx; + last_idx = idx + mce_list->count - 1; + hlist_for_each_entry(mce, &mce_list->head, node) { + if (idx > last_idx) + break; + + next_idx = idx + 1; + /* EOL should be set in last MCE */ + err = nix_setup_mce(rvu, idx, NIX_AQ_INSTOP_WRITE, + mce->pcifunc, next_idx, + (next_idx > last_idx) ? true : false); + if (err) + goto end; + idx++; + } + +end: + mutex_unlock(&mcast->mce_lock); + return err; +} + +static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw) +{ + struct nix_mcast *mcast = &nix_hw->mcast; + int err, pf, numvfs, idx; + struct rvu_pfvf *pfvf; + u16 pcifunc; + u64 cfg; + + /* Skip PF0 (i.e AF) */ + for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) { + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); + /* If PF is not enabled, nothing to do */ + if (!((cfg >> 20) & 0x01)) + continue; + /* Get numVFs attached to this PF */ + numvfs = (cfg >> 12) & 0xFF; + + pfvf = &rvu->pf[pf]; + /* Save the start MCE */ + pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); + + nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1); + + for (idx = 0; idx < (numvfs + 1); idx++) { + /* idx-0 is for PF, followed by VFs */ + pcifunc = (pf << RVU_PFVF_PF_SHIFT); + pcifunc |= idx; + /* Add dummy entries now, so that we don't have to check + * for whether AQ_OP should be INIT/WRITE later on. + * Will be updated when a NIXLF is attached/detached to + * these PF/VFs. + */ + err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx, + NIX_AQ_INSTOP_INIT, + pcifunc, 0, true); + if (err) + return err; + } + } + return 0; +} + +static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) +{ + struct nix_mcast *mcast = &nix_hw->mcast; + struct rvu_hwinfo *hw = rvu->hw; + int err, size; + + size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F; + size = (1ULL << size); + + /* Alloc memory for multicast/mirror replication entries */ + err = qmem_alloc(rvu->dev, &mcast->mce_ctx, + (256UL << MC_TBL_SIZE), size); + if (err) + return -ENOMEM; + + rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE, + (u64)mcast->mce_ctx->iova); + + /* Set max list length equal to max no of VFs per PF + PF itself */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG, + BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE); + + /* Alloc memory for multicast replication buffers */ + size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF; + err = qmem_alloc(rvu->dev, &mcast->mcast_buf, + (8UL << MC_BUF_CNT), size); + if (err) + return -ENOMEM; + + rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE, + (u64)mcast->mcast_buf->iova); + + /* Alloc pkind for NIX internal RX multicast/mirror replay */ + mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc); + + rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG, + BIT_ULL(63) | (mcast->replay_pkind << 24) | + BIT_ULL(20) | MC_BUF_CNT); + + mutex_init(&mcast->mce_lock); + + return nix_setup_bcast_tables(rvu, nix_hw); +} + +static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) +{ + struct nix_txsch *txsch; + int err, lvl, schq; + u64 cfg, reg; + + /* Get scheduler queue count of each type and alloc + * bitmap for each for alloc/free/attach operations. + */ + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + txsch = &nix_hw->txsch[lvl]; + txsch->lvl = lvl; + switch (lvl) { + case NIX_TXSCH_LVL_SMQ: + reg = NIX_AF_MDQ_CONST; + break; + case NIX_TXSCH_LVL_TL4: + reg = NIX_AF_TL4_CONST; + break; + case NIX_TXSCH_LVL_TL3: + reg = NIX_AF_TL3_CONST; + break; + case NIX_TXSCH_LVL_TL2: + reg = NIX_AF_TL2_CONST; + break; + case NIX_TXSCH_LVL_TL1: + reg = NIX_AF_TL1_CONST; + break; + } + cfg = rvu_read64(rvu, blkaddr, reg); + txsch->schq.max = cfg & 0xFFFF; + err = rvu_alloc_bitmap(&txsch->schq); + if (err) + return err; + + /* Allocate memory for scheduler queues to + * PF/VF pcifunc mapping info. + */ + txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max, + sizeof(u32), GFP_KERNEL); + if (!txsch->pfvf_map) + return -ENOMEM; + for (schq = 0; schq < txsch->schq.max; schq++) + txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); + } + return 0; +} + +int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw, + int blkaddr, u32 cfg) +{ + int fmt_idx; + + for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) { + if (nix_hw->mark_format.cfg[fmt_idx] == cfg) + return fmt_idx; + } + if (fmt_idx >= nix_hw->mark_format.total) + return -ERANGE; + + rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg); + nix_hw->mark_format.cfg[fmt_idx] = cfg; + nix_hw->mark_format.in_use++; + return fmt_idx; +} + +static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw, + int blkaddr) +{ + u64 cfgs[] = { + [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003, + [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200, + [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203, + [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c, + [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00, + [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c, + [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008, + [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800, + [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808, + }; + int i, rc; + u64 total; + + total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8; + nix_hw->mark_format.total = (u8)total; + nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32), + GFP_KERNEL); + if (!nix_hw->mark_format.cfg) + return -ENOMEM; + for (i = 0; i < NIX_MARK_CFG_MAX; i++) { + rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]); + if (rc < 0) + dev_err(rvu->dev, "Err %d in setup mark format %d\n", + i, rc); + } + + return 0; +} + +int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + int i, nixlf, blkaddr, err; + u64 stats; + + err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); + if (err) + return err; + + /* Get stats count supported by HW */ + stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); + + /* Reset tx stats */ + for (i = 0; i < ((stats >> 24) & 0xFF); i++) + rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0); + + /* Reset rx stats */ + for (i = 0; i < ((stats >> 32) & 0xFF); i++) + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0); + + return 0; +} + +/* Returns the ALG index to be set into NPC_RX_ACTION */ +static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg) +{ + int i; + + /* Scan over exiting algo entries to find a match */ + for (i = 0; i < nix_hw->flowkey.in_use; i++) + if (nix_hw->flowkey.flowkey[i] == flow_cfg) + return i; + + return -ERANGE; +} + +static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) +{ + int idx, nr_field, key_off, field_marker, keyoff_marker; + int max_key_off, max_bit_pos, group_member; + struct nix_rx_flowkey_alg *field; + struct nix_rx_flowkey_alg tmp; + u32 key_type, valid_key; + + if (!alg) + return -EINVAL; + +#define FIELDS_PER_ALG 5 +#define MAX_KEY_OFF 40 + /* Clear all fields */ + memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG); + + /* Each of the 32 possible flow key algorithm definitions should + * fall into above incremental config (except ALG0). Otherwise a + * single NPC MCAM entry is not sufficient for supporting RSS. + * + * If a different definition or combination needed then NPC MCAM + * has to be programmed to filter such pkts and it's action should + * point to this definition to calculate flowtag or hash. + * + * The `for loop` goes over _all_ protocol field and the following + * variables depicts the state machine forward progress logic. + * + * keyoff_marker - Enabled when hash byte length needs to be accounted + * in field->key_offset update. + * field_marker - Enabled when a new field needs to be selected. + * group_member - Enabled when protocol is part of a group. + */ + + keyoff_marker = 0; max_key_off = 0; group_member = 0; + nr_field = 0; key_off = 0; field_marker = 1; + field = &tmp; max_bit_pos = fls(flow_cfg); + for (idx = 0; + idx < max_bit_pos && nr_field < FIELDS_PER_ALG && + key_off < MAX_KEY_OFF; idx++) { + key_type = BIT(idx); + valid_key = flow_cfg & key_type; + /* Found a field marker, reset the field values */ + if (field_marker) + memset(&tmp, 0, sizeof(tmp)); + + field_marker = true; + keyoff_marker = true; + switch (key_type) { + case NIX_FLOW_KEY_TYPE_PORT: + field->sel_chan = true; + /* This should be set to 1, when SEL_CHAN is set */ + field->bytesm1 = 1; + break; + case NIX_FLOW_KEY_TYPE_IPV4: + case NIX_FLOW_KEY_TYPE_INNR_IPV4: + field->lid = NPC_LID_LC; + field->ltype_match = NPC_LT_LC_IP; + if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) { + field->lid = NPC_LID_LG; + field->ltype_match = NPC_LT_LG_TU_IP; + } + field->hdr_offset = 12; /* SIP offset */ + field->bytesm1 = 7; /* SIP + DIP, 8 bytes */ + field->ltype_mask = 0xF; /* Match only IPv4 */ + keyoff_marker = false; + break; + case NIX_FLOW_KEY_TYPE_IPV6: + case NIX_FLOW_KEY_TYPE_INNR_IPV6: + field->lid = NPC_LID_LC; + field->ltype_match = NPC_LT_LC_IP6; + if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) { + field->lid = NPC_LID_LG; + field->ltype_match = NPC_LT_LG_TU_IP6; + } + field->hdr_offset = 8; /* SIP offset */ + field->bytesm1 = 31; /* SIP + DIP, 32 bytes */ + field->ltype_mask = 0xF; /* Match only IPv6 */ + break; + case NIX_FLOW_KEY_TYPE_TCP: + case NIX_FLOW_KEY_TYPE_UDP: + case NIX_FLOW_KEY_TYPE_SCTP: + case NIX_FLOW_KEY_TYPE_INNR_TCP: + case NIX_FLOW_KEY_TYPE_INNR_UDP: + case NIX_FLOW_KEY_TYPE_INNR_SCTP: + field->lid = NPC_LID_LD; + if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP || + key_type == NIX_FLOW_KEY_TYPE_INNR_UDP || + key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) + field->lid = NPC_LID_LH; + field->bytesm1 = 3; /* Sport + Dport, 4 bytes */ + + /* Enum values for NPC_LID_LD and NPC_LID_LG are same, + * so no need to change the ltype_match, just change + * the lid for inner protocols + */ + BUILD_BUG_ON((int)NPC_LT_LD_TCP != + (int)NPC_LT_LH_TU_TCP); + BUILD_BUG_ON((int)NPC_LT_LD_UDP != + (int)NPC_LT_LH_TU_UDP); + BUILD_BUG_ON((int)NPC_LT_LD_SCTP != + (int)NPC_LT_LH_TU_SCTP); + + if ((key_type == NIX_FLOW_KEY_TYPE_TCP || + key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) && + valid_key) { + field->ltype_match |= NPC_LT_LD_TCP; + group_member = true; + } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP || + key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) && + valid_key) { + field->ltype_match |= NPC_LT_LD_UDP; + group_member = true; + } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP || + key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) && + valid_key) { + field->ltype_match |= NPC_LT_LD_SCTP; + group_member = true; + } + field->ltype_mask = ~field->ltype_match; + if (key_type == NIX_FLOW_KEY_TYPE_SCTP || + key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) { + /* Handle the case where any of the group item + * is enabled in the group but not the final one + */ + if (group_member) { + valid_key = true; + group_member = false; + } + } else { + field_marker = false; + keyoff_marker = false; + } + break; + case NIX_FLOW_KEY_TYPE_NVGRE: + field->lid = NPC_LID_LD; + field->hdr_offset = 4; /* VSID offset */ + field->bytesm1 = 2; + field->ltype_match = NPC_LT_LD_NVGRE; + field->ltype_mask = 0xF; + break; + case NIX_FLOW_KEY_TYPE_VXLAN: + case NIX_FLOW_KEY_TYPE_GENEVE: + field->lid = NPC_LID_LE; + field->bytesm1 = 2; + field->hdr_offset = 4; + field->ltype_mask = 0xF; + field_marker = false; + keyoff_marker = false; + + if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) { + field->ltype_match |= NPC_LT_LE_VXLAN; + group_member = true; + } + + if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) { + field->ltype_match |= NPC_LT_LE_GENEVE; + group_member = true; + } + + if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) { + if (group_member) { + field->ltype_mask = ~field->ltype_match; + field_marker = true; + keyoff_marker = true; + valid_key = true; + group_member = false; + } + } + break; + case NIX_FLOW_KEY_TYPE_ETH_DMAC: + case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC: + field->lid = NPC_LID_LA; + field->ltype_match = NPC_LT_LA_ETHER; + if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) { + field->lid = NPC_LID_LF; + field->ltype_match = NPC_LT_LF_TU_ETHER; + } + field->hdr_offset = 0; + field->bytesm1 = 5; /* DMAC 6 Byte */ + field->ltype_mask = 0xF; + break; + case NIX_FLOW_KEY_TYPE_IPV6_EXT: + field->lid = NPC_LID_LC; + field->hdr_offset = 40; /* IPV6 hdr */ + field->bytesm1 = 0; /* 1 Byte ext hdr*/ + field->ltype_match = NPC_LT_LC_IP6_EXT; + field->ltype_mask = 0xF; + break; + case NIX_FLOW_KEY_TYPE_GTPU: + field->lid = NPC_LID_LE; + field->hdr_offset = 4; + field->bytesm1 = 3; /* 4 bytes TID*/ + field->ltype_match = NPC_LT_LE_GTPU; + field->ltype_mask = 0xF; + break; + case NIX_FLOW_KEY_TYPE_VLAN: + field->lid = NPC_LID_LB; + field->hdr_offset = 2; /* Skip TPID (2-bytes) */ + field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */ + field->ltype_match = NPC_LT_LB_CTAG; + field->ltype_mask = 0xF; + field->fn_mask = 1; /* Mask out the first nibble */ + break; + } + field->ena = 1; + + /* Found a valid flow key type */ + if (valid_key) { + field->key_offset = key_off; + memcpy(&alg[nr_field], field, sizeof(*field)); + max_key_off = max(max_key_off, field->bytesm1 + 1); + + /* Found a field marker, get the next field */ + if (field_marker) + nr_field++; + } + + /* Found a keyoff marker, update the new key_off */ + if (keyoff_marker) { + key_off += max_key_off; + max_key_off = 0; + } + } + /* Processed all the flow key types */ + if (idx == max_bit_pos && key_off <= MAX_KEY_OFF) + return 0; + else + return NIX_AF_ERR_RSS_NOSPC_FIELD; +} + +static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg) +{ + u64 field[FIELDS_PER_ALG]; + struct nix_hw *hw; + int fid, rc; + + hw = get_nix_hw(rvu->hw, blkaddr); + if (!hw) + return -EINVAL; + + /* No room to add new flow hash algoritham */ + if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX) + return NIX_AF_ERR_RSS_NOSPC_ALGO; + + /* Generate algo fields for the given flow_cfg */ + rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg); + if (rc) + return rc; + + /* Update ALGX_FIELDX register with generated fields */ + for (fid = 0; fid < FIELDS_PER_ALG; fid++) + rvu_write64(rvu, blkaddr, + NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use, + fid), field[fid]); + + /* Store the flow_cfg for futher lookup */ + rc = hw->flowkey.in_use; + hw->flowkey.flowkey[rc] = flow_cfg; + hw->flowkey.in_use++; + + return rc; +} + +int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, + struct nix_rss_flowkey_cfg *req, + struct nix_rss_flowkey_cfg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + int alg_idx, nixlf, blkaddr; + struct nix_hw *nix_hw; + int err; + + err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); + if (err) + return err; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return -EINVAL; + + alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg); + /* Failed to get algo index from the exiting list, reserve new */ + if (alg_idx < 0) { + alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr, + req->flowkey_cfg); + if (alg_idx < 0) + return alg_idx; + } + rsp->alg_idx = alg_idx; + rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group, + alg_idx, req->mcam_index); + return 0; +} + +static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr) +{ + u32 flowkey_cfg, minkey_cfg; + int alg, fid, rc; + + /* Disable all flow key algx fieldx */ + for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) { + for (fid = 0; fid < FIELDS_PER_ALG; fid++) + rvu_write64(rvu, blkaddr, + NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid), + 0); + } + + /* IPv4/IPv6 SIP/DIPs */ + flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6; + rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); + if (rc < 0) + return rc; + + /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ + minkey_cfg = flowkey_cfg; + flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP; + rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); + if (rc < 0) + return rc; + + /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ + flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP; + rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); + if (rc < 0) + return rc; + + /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ + flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP; + rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); + if (rc < 0) + return rc; + + /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */ + flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | + NIX_FLOW_KEY_TYPE_UDP; + rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); + if (rc < 0) + return rc; + + /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ + flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | + NIX_FLOW_KEY_TYPE_SCTP; + rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); + if (rc < 0) + return rc; + + /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ + flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP | + NIX_FLOW_KEY_TYPE_SCTP; + rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); + if (rc < 0) + return rc; + + /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ + flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | + NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP; + rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); + if (rc < 0) + return rc; + + return 0; +} + +int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, + struct nix_set_mac_addr *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + int blkaddr, nixlf, err; + struct rvu_pfvf *pfvf; + + err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); + if (err) + return err; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + + ether_addr_copy(pfvf->mac_addr, req->mac_addr); + + rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, + pfvf->rx_chan_base, req->mac_addr); + + rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); + + return 0; +} + +int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu, + struct msg_req *req, + struct nix_get_mac_addr_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + struct rvu_pfvf *pfvf; + + if (!is_nixlf_attached(rvu, pcifunc)) + return NIX_AF_ERR_AF_LF_INVALID; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + + ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); + + return 0; +} + +int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, + struct msg_rsp *rsp) +{ + bool allmulti = false, disable_promisc = false; + u16 pcifunc = req->hdr.pcifunc; + int blkaddr, nixlf, err; + struct rvu_pfvf *pfvf; + + err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); + if (err) + return err; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + + if (req->mode & NIX_RX_MODE_PROMISC) + allmulti = false; + else if (req->mode & NIX_RX_MODE_ALLMULTI) + allmulti = true; + else + disable_promisc = true; + + if (disable_promisc) + rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf); + else + rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, + pfvf->rx_chan_base, allmulti); + + rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); + + return 0; +} + +static void nix_find_link_frs(struct rvu *rvu, + struct nix_frs_cfg *req, u16 pcifunc) +{ + int pf = rvu_get_pf(pcifunc); + struct rvu_pfvf *pfvf; + int maxlen, minlen; + int numvfs, hwvf; + int vf; + + /* Update with requester's min/max lengths */ + pfvf = rvu_get_pfvf(rvu, pcifunc); + pfvf->maxlen = req->maxlen; + if (req->update_minlen) + pfvf->minlen = req->minlen; + + maxlen = req->maxlen; + minlen = req->update_minlen ? req->minlen : 0; + + /* Get this PF's numVFs and starting hwvf */ + rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); + + /* For each VF, compare requested max/minlen */ + for (vf = 0; vf < numvfs; vf++) { + pfvf = &rvu->hwvf[hwvf + vf]; + if (pfvf->maxlen > maxlen) + maxlen = pfvf->maxlen; + if (req->update_minlen && + pfvf->minlen && pfvf->minlen < minlen) + minlen = pfvf->minlen; + } + + /* Compare requested max/minlen with PF's max/minlen */ + pfvf = &rvu->pf[pf]; + if (pfvf->maxlen > maxlen) + maxlen = pfvf->maxlen; + if (req->update_minlen && + pfvf->minlen && pfvf->minlen < minlen) + minlen = pfvf->minlen; + + /* Update the request with max/min PF's and it's VF's max/min */ + req->maxlen = maxlen; + if (req->update_minlen) + req->minlen = minlen; +} + +int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, + struct msg_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + int pf = rvu_get_pf(pcifunc); + int blkaddr, schq, link = -1; + struct nix_txsch *txsch; + u64 cfg, lmac_fifo_len; + struct nix_hw *nix_hw; + u8 cgx = 0, lmac = 0; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return -EINVAL; + + if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS) + return NIX_AF_ERR_FRS_INVALID; + + if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS) + return NIX_AF_ERR_FRS_INVALID; + + /* Check if requester wants to update SMQ's */ + if (!req->update_smq) + goto rx_frscfg; + + /* Update min/maxlen in each of the SMQ attached to this PF/VF */ + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; + mutex_lock(&rvu->rsrc_lock); + for (schq = 0; schq < txsch->schq.max; schq++) { + if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) + continue; + cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); + cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8); + if (req->update_minlen) + cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F); + rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); + } + mutex_unlock(&rvu->rsrc_lock); + +rx_frscfg: + /* Check if config is for SDP link */ + if (req->sdp_link) { + if (!hw->sdp_links) + return NIX_AF_ERR_RX_LINK_INVALID; + link = hw->cgx_links + hw->lbk_links; + goto linkcfg; + } + + /* Check if the request is from CGX mapped RVU PF */ + if (is_pf_cgxmapped(rvu, pf)) { + /* Get CGX and LMAC to which this PF is mapped and find link */ + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac); + link = (cgx * hw->lmac_per_cgx) + lmac; + } else if (pf == 0) { + /* For VFs of PF0 ingress is LBK port, so config LBK link */ + link = hw->cgx_links; + } + + if (link < 0) + return NIX_AF_ERR_RX_LINK_INVALID; + + +linkcfg: + nix_find_link_frs(rvu, req, pcifunc); + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link)); + cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16); + if (req->update_minlen) + cfg = (cfg & ~0xFFFFULL) | req->minlen; + rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg); + + if (req->sdp_link || pf == 0) + return 0; + + /* Update transmit credits for CGX links */ + lmac_fifo_len = + CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); + cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link)); + cfg &= ~(0xFFFFFULL << 12); + cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12; + rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); + return 0; +} + +int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + struct npc_mcam_alloc_entry_req alloc_req = { }; + struct npc_mcam_alloc_entry_rsp alloc_rsp = { }; + struct npc_mcam_free_entry_req free_req = { }; + u16 pcifunc = req->hdr.pcifunc; + int blkaddr, nixlf, err; + struct rvu_pfvf *pfvf; + + /* LBK VFs do not have separate MCAM UCAST entry hence + * skip allocating rxvlan for them + */ + if (is_afvf(pcifunc)) + return 0; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + if (pfvf->rxvlan) + return 0; + + /* alloc new mcam entry */ + alloc_req.hdr.pcifunc = pcifunc; + alloc_req.count = 1; + + err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req, + &alloc_rsp); + if (err) + return err; + + /* update entry to enable rxvlan offload */ + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) { + err = NIX_AF_ERR_AF_LF_INVALID; + goto free_entry; + } + + nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0); + if (nixlf < 0) { + err = NIX_AF_ERR_AF_LF_INVALID; + goto free_entry; + } + + pfvf->rxvlan_index = alloc_rsp.entry_list[0]; + /* all it means is that rxvlan_index is valid */ + pfvf->rxvlan = true; + + err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); + if (err) + goto free_entry; + + return 0; +free_entry: + free_req.hdr.pcifunc = pcifunc; + free_req.entry = alloc_rsp.entry_list[0]; + rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp); + pfvf->rxvlan = false; + return err; +} + +int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, + struct msg_rsp *rsp) +{ + int nixlf, blkaddr, err; + u64 cfg; + + err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr); + if (err) + return err; + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf)); + /* Set the interface configuration */ + if (req->len_verify & BIT(0)) + cfg |= BIT_ULL(41); + else + cfg &= ~BIT_ULL(41); + + if (req->len_verify & BIT(1)) + cfg |= BIT_ULL(40); + else + cfg &= ~BIT_ULL(40); + + if (req->csum_verify & BIT(0)) + cfg |= BIT_ULL(37); + else + cfg &= ~BIT_ULL(37); + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg); + + return 0; +} + +static void nix_link_config(struct rvu *rvu, int blkaddr) +{ + struct rvu_hwinfo *hw = rvu->hw; + int cgx, lmac_cnt, slink, link; + u64 tx_credits; + + /* Set default min/max packet lengths allowed on NIX Rx links. + * + * With HW reset minlen value of 60byte, HW will treat ARP pkts + * as undersize and report them to SW as error pkts, hence + * setting it to 40 bytes. + */ + for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) { + rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), + NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); + } + + if (hw->sdp_links) { + link = hw->cgx_links + hw->lbk_links; + rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), + SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); + } + + /* Set credits for Tx links assuming max packet length allowed. + * This will be reconfigured based on MTU set for PF/VF. + */ + for (cgx = 0; cgx < hw->cgx; cgx++) { + lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); + tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16; + /* Enable credits and set credit pkt count to max allowed */ + tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); + slink = cgx * hw->lmac_per_cgx; + for (link = slink; link < (slink + lmac_cnt); link++) { + rvu_write64(rvu, blkaddr, + NIX_AF_TX_LINKX_NORM_CREDIT(link), + tx_credits); + } + } + + /* Set Tx credits for LBK link */ + slink = hw->cgx_links; + for (link = slink; link < (slink + hw->lbk_links); link++) { + tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */ + /* Enable credits and set credit pkt count to max allowed */ + tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); + rvu_write64(rvu, blkaddr, + NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits); + } +} + +static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr) +{ + int idx, err; + u64 status; + + /* Start X2P bus calibration */ + rvu_write64(rvu, blkaddr, NIX_AF_CFG, + rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9)); + /* Wait for calibration to complete */ + err = rvu_poll_reg(rvu, blkaddr, + NIX_AF_STATUS, BIT_ULL(10), false); + if (err) { + dev_err(rvu->dev, "NIX X2P bus calibration failed\n"); + return err; + } + + status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS); + /* Check if CGX devices are ready */ + for (idx = 0; idx < rvu->cgx_cnt_max; idx++) { + /* Skip when cgx port is not available */ + if (!rvu_cgx_pdata(idx, rvu) || + (status & (BIT_ULL(16 + idx)))) + continue; + dev_err(rvu->dev, + "CGX%d didn't respond to NIX X2P calibration\n", idx); + err = -EBUSY; + } + + /* Check if LBK is ready */ + if (!(status & BIT_ULL(19))) { + dev_err(rvu->dev, + "LBK didn't respond to NIX X2P calibration\n"); + err = -EBUSY; + } + + /* Clear 'calibrate_x2p' bit */ + rvu_write64(rvu, blkaddr, NIX_AF_CFG, + rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9)); + if (err || (status & 0x3FFULL)) + dev_err(rvu->dev, + "NIX X2P calibration failed, status 0x%llx\n", status); + if (err) + return err; + return 0; +} + +static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) +{ + u64 cfg; + int err; + + /* Set admin queue endianness */ + cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG); +#ifdef __BIG_ENDIAN + cfg |= BIT_ULL(8); + rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); +#else + cfg &= ~BIT_ULL(8); + rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); +#endif + + /* Do not bypass NDC cache */ + cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG); + cfg &= ~0x3FFEULL; +#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING + /* Disable caching of SQB aka SQEs */ + cfg |= 0x04ULL; +#endif + rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg); + + /* Result structure can be followed by RQ/SQ/CQ context at + * RES + 128bytes and a write mask at RES + 256 bytes, depending on + * operation type. Alloc sufficient result memory for all operations. + */ + err = rvu_aq_alloc(rvu, &block->aq, + Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s), + ALIGN(sizeof(struct nix_aq_res_s), 128) + 256); + if (err) + return err; + + rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE); + rvu_write64(rvu, block->addr, + NIX_AF_AQ_BASE, (u64)block->aq->inst->iova); + return 0; +} + +int rvu_nix_init(struct rvu *rvu) +{ + const struct npc_lt_def_cfg *ltdefs; + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int blkaddr, err; + u64 cfg; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); + if (blkaddr < 0) + return 0; + block = &hw->block[blkaddr]; + + if (is_rvu_96xx_B0(rvu)) { + /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt + * internal state when conditional clocks are turned off. + * Hence enable them. + */ + rvu_write64(rvu, blkaddr, NIX_AF_CFG, + rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL); + + /* Set chan/link to backpressure TL3 instead of TL2 */ + rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01); + + /* Disable SQ manager's sticky mode operation (set TM6 = 0) + * This sticky mode is known to cause SQ stalls when multiple + * SQs are mapped to same SMQ and transmitting pkts at a time. + */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS); + cfg &= ~BIT_ULL(15); + rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg); + } + + ltdefs = rvu->kpu.lt_def; + /* Calibrate X2P bus to check if CGX/LBK links are fine */ + err = nix_calibrate_x2p(rvu, blkaddr); + if (err) + return err; + + /* Set num of links of each type */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); + hw->cgx = (cfg >> 12) & 0xF; + hw->lmac_per_cgx = (cfg >> 8) & 0xF; + hw->cgx_links = hw->cgx * hw->lmac_per_cgx; + hw->lbk_links = (cfg >> 24) & 0xF; + hw->sdp_links = 1; + + /* Initialize admin queue */ + err = nix_aq_init(rvu, block); + if (err) + return err; + + /* Restore CINT timer delay to HW reset values */ + rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL); + + if (blkaddr == BLKADDR_NIX0) { + hw->nix0 = devm_kzalloc(rvu->dev, + sizeof(struct nix_hw), GFP_KERNEL); + if (!hw->nix0) + return -ENOMEM; + + err = nix_setup_txschq(rvu, hw->nix0, blkaddr); + if (err) + return err; + + err = nix_af_mark_format_setup(rvu, hw->nix0, blkaddr); + if (err) + return err; + + err = nix_setup_mcast(rvu, hw->nix0, blkaddr); + if (err) + return err; + + /* Configure segmentation offload formats */ + nix_setup_lso(rvu, hw->nix0, blkaddr); + + /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info. + * This helps HW protocol checker to identify headers + * and validate length and checksums. + */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2, + (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) | + ltdefs->rx_ol2.ltype_mask); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4, + (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) | + ltdefs->rx_oip4.ltype_mask); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4, + (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) | + ltdefs->rx_iip4.ltype_mask); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6, + (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) | + ltdefs->rx_oip6.ltype_mask); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6, + (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) | + ltdefs->rx_iip6.ltype_mask); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP, + (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) | + ltdefs->rx_otcp.ltype_mask); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP, + (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) | + ltdefs->rx_itcp.ltype_mask); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP, + (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) | + ltdefs->rx_oudp.ltype_mask); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP, + (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) | + ltdefs->rx_iudp.ltype_mask); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP, + (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) | + ltdefs->rx_osctp.ltype_mask); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP, + (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) | + ltdefs->rx_isctp.ltype_mask); + + err = nix_rx_flowkey_alg_cfg(rvu, blkaddr); + if (err) + return err; + + /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */ + nix_link_config(rvu, blkaddr); + + /* Enable Channel backpressure */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0)); + } + return 0; +} + +void rvu_nix_freemem(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + struct nix_txsch *txsch; + struct nix_mcast *mcast; + struct nix_hw *nix_hw; + int blkaddr, lvl; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); + if (blkaddr < 0) + return; + + block = &hw->block[blkaddr]; + rvu_aq_free(rvu, block->aq); + + if (blkaddr == BLKADDR_NIX0) { + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return; + + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + txsch = &nix_hw->txsch[lvl]; + kfree(txsch->schq.bmap); + } + + mcast = &nix_hw->mcast; + qmem_free(rvu->dev, mcast->mce_ctx); + qmem_free(rvu->dev, mcast->mcast_buf); + mutex_destroy(&mcast->mce_lock); + } +} + +int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + int nixlf, err; + + err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); + if (err) + return err; + + rvu_npc_enable_default_entries(rvu, pcifunc, nixlf); + + return rvu_cgx_start_stop_io(rvu, pcifunc, true); +} + +int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + int nixlf, err; + + err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); + if (err) + return err; + + rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); + + return rvu_cgx_start_stop_io(rvu, pcifunc, false); +} + +void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct hwctx_disable_req ctx_req; + int err; + + ctx_req.hdr.pcifunc = pcifunc; + + /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */ + nix_interface_deinit(rvu, pcifunc, nixlf); + nix_rx_sync(rvu, blkaddr); + nix_txschq_free(rvu, pcifunc); + + rvu_cgx_start_stop_io(rvu, pcifunc, false); + + if (pfvf->sq_ctx) { + ctx_req.ctype = NIX_AQ_CTYPE_SQ; + err = nix_lf_hwctx_disable(rvu, &ctx_req); + if (err) + dev_err(rvu->dev, "SQ ctx disable failed\n"); + } + + if (pfvf->rq_ctx) { + ctx_req.ctype = NIX_AQ_CTYPE_RQ; + err = nix_lf_hwctx_disable(rvu, &ctx_req); + if (err) + dev_err(rvu->dev, "RQ ctx disable failed\n"); + } + + if (pfvf->cq_ctx) { + ctx_req.ctype = NIX_AQ_CTYPE_CQ; + err = nix_lf_hwctx_disable(rvu, &ctx_req); + if (err) + dev_err(rvu->dev, "CQ ctx disable failed\n"); + } + + nix_ctx_free(rvu, pfvf); +} + +#define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32) + +static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int blkaddr; + int nixlf; + u64 cfg; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + block = &hw->block[blkaddr]; + nixlf = rvu_get_lf(rvu, block, pcifunc, 0); + if (nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf)); + + if (enable) + cfg |= NIX_AF_LFX_TX_CFG_PTP_EN; + else + cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN; + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); + + return 0; +} + +int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true); +} + +int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false); +} + +int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, + struct nix_lso_format_cfg *req, + struct nix_lso_format_cfg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + struct nix_hw *nix_hw; + struct rvu_pfvf *pfvf; + int blkaddr, idx, f; + u64 reg; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (!pfvf->nixlf || blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return -EINVAL; + + /* Find existing matching LSO format, if any */ + for (idx = 0; idx < nix_hw->lso.in_use; idx++) { + for (f = 0; f < NIX_LSO_FIELD_MAX; f++) { + reg = rvu_read64(rvu, blkaddr, + NIX_AF_LSO_FORMATX_FIELDX(idx, f)); + if (req->fields[f] != (reg & req->field_mask)) + break; + } + + if (f == NIX_LSO_FIELD_MAX) + break; + } + + if (idx < nix_hw->lso.in_use) { + /* Match found */ + rsp->lso_format_idx = idx; + return 0; + } + + if (nix_hw->lso.in_use == nix_hw->lso.total) + return NIX_AF_ERR_LSO_CFG_FAIL; + + rsp->lso_format_idx = nix_hw->lso.in_use++; + + for (f = 0; f < NIX_LSO_FIELD_MAX; f++) + rvu_write64(rvu, blkaddr, + NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f), + req->fields[f]); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c new file mode 100644 index 000000000..67471cb2b --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c @@ -0,0 +1,543 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/pci.h> + +#include "rvu_struct.h" +#include "rvu_reg.h" +#include "rvu.h" + +static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, + struct npa_aq_inst_s *inst) +{ + struct admin_queue *aq = block->aq; + struct npa_aq_res_s *result; + int timeout = 1000; + u64 reg, head; + + result = (struct npa_aq_res_s *)aq->res->base; + + /* Get current head pointer where to append this instruction */ + reg = rvu_read64(rvu, block->addr, NPA_AF_AQ_STATUS); + head = (reg >> 4) & AQ_PTR_MASK; + + memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)), + (void *)inst, aq->inst->entry_sz); + memset(result, 0, sizeof(*result)); + /* sync into memory */ + wmb(); + + /* Ring the doorbell and wait for result */ + rvu_write64(rvu, block->addr, NPA_AF_AQ_DOOR, 1); + while (result->compcode == NPA_AQ_COMP_NOTDONE) { + cpu_relax(); + udelay(1); + timeout--; + if (!timeout) + return -EBUSY; + } + + if (result->compcode != NPA_AQ_COMP_GOOD) + /* TODO: Replace this with some error code */ + return -EBUSY; + + return 0; +} + +int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req, + struct npa_aq_enq_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + int blkaddr, npalf, rc = 0; + struct npa_aq_inst_s inst; + struct rvu_block *block; + struct admin_queue *aq; + struct rvu_pfvf *pfvf; + void *ctx, *mask; + bool ena; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + if (!pfvf->aura_ctx || req->aura_id >= pfvf->aura_ctx->qsize) + return NPA_AF_ERR_AQ_ENQUEUE; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc); + if (!pfvf->npalf || blkaddr < 0) + return NPA_AF_ERR_AF_LF_INVALID; + + block = &hw->block[blkaddr]; + aq = block->aq; + if (!aq) { + dev_warn(rvu->dev, "%s: NPA AQ not initialized\n", __func__); + return NPA_AF_ERR_AQ_ENQUEUE; + } + + npalf = rvu_get_lf(rvu, block, pcifunc, 0); + if (npalf < 0) + return NPA_AF_ERR_AF_LF_INVALID; + + memset(&inst, 0, sizeof(struct npa_aq_inst_s)); + inst.cindex = req->aura_id; + inst.lf = npalf; + inst.ctype = req->ctype; + inst.op = req->op; + /* Currently we are not supporting enqueuing multiple instructions, + * so always choose first entry in result memory. + */ + inst.res_addr = (u64)aq->res->iova; + + /* Hardware uses same aq->res->base for updating result of + * previous instruction hence wait here till it is done. + */ + spin_lock(&aq->lock); + + /* Clean result + context memory */ + memset(aq->res->base, 0, aq->res->entry_sz); + /* Context needs to be written at RES_ADDR + 128 */ + ctx = aq->res->base + 128; + /* Mask needs to be written at RES_ADDR + 256 */ + mask = aq->res->base + 256; + + switch (req->op) { + case NPA_AQ_INSTOP_WRITE: + /* Copy context and write mask */ + if (req->ctype == NPA_AQ_CTYPE_AURA) { + memcpy(mask, &req->aura_mask, + sizeof(struct npa_aura_s)); + memcpy(ctx, &req->aura, sizeof(struct npa_aura_s)); + } else { + memcpy(mask, &req->pool_mask, + sizeof(struct npa_pool_s)); + memcpy(ctx, &req->pool, sizeof(struct npa_pool_s)); + } + break; + case NPA_AQ_INSTOP_INIT: + if (req->ctype == NPA_AQ_CTYPE_AURA) { + if (req->aura.pool_addr >= pfvf->pool_ctx->qsize) { + rc = NPA_AF_ERR_AQ_FULL; + break; + } + /* Set pool's context address */ + req->aura.pool_addr = pfvf->pool_ctx->iova + + (req->aura.pool_addr * pfvf->pool_ctx->entry_sz); + memcpy(ctx, &req->aura, sizeof(struct npa_aura_s)); + } else { /* POOL's context */ + memcpy(ctx, &req->pool, sizeof(struct npa_pool_s)); + } + break; + case NPA_AQ_INSTOP_NOP: + case NPA_AQ_INSTOP_READ: + case NPA_AQ_INSTOP_LOCK: + case NPA_AQ_INSTOP_UNLOCK: + break; + default: + rc = NPA_AF_ERR_AQ_FULL; + break; + } + + if (rc) { + spin_unlock(&aq->lock); + return rc; + } + + /* Submit the instruction to AQ */ + rc = npa_aq_enqueue_wait(rvu, block, &inst); + if (rc) { + spin_unlock(&aq->lock); + return rc; + } + + /* Set aura bitmap if aura hw context is enabled */ + if (req->ctype == NPA_AQ_CTYPE_AURA) { + if (req->op == NPA_AQ_INSTOP_INIT && req->aura.ena) + __set_bit(req->aura_id, pfvf->aura_bmap); + if (req->op == NPA_AQ_INSTOP_WRITE) { + ena = (req->aura.ena & req->aura_mask.ena) | + (test_bit(req->aura_id, pfvf->aura_bmap) & + ~req->aura_mask.ena); + if (ena) + __set_bit(req->aura_id, pfvf->aura_bmap); + else + __clear_bit(req->aura_id, pfvf->aura_bmap); + } + } + + /* Set pool bitmap if pool hw context is enabled */ + if (req->ctype == NPA_AQ_CTYPE_POOL) { + if (req->op == NPA_AQ_INSTOP_INIT && req->pool.ena) + __set_bit(req->aura_id, pfvf->pool_bmap); + if (req->op == NPA_AQ_INSTOP_WRITE) { + ena = (req->pool.ena & req->pool_mask.ena) | + (test_bit(req->aura_id, pfvf->pool_bmap) & + ~req->pool_mask.ena); + if (ena) + __set_bit(req->aura_id, pfvf->pool_bmap); + else + __clear_bit(req->aura_id, pfvf->pool_bmap); + } + } + spin_unlock(&aq->lock); + + if (rsp) { + /* Copy read context into mailbox */ + if (req->op == NPA_AQ_INSTOP_READ) { + if (req->ctype == NPA_AQ_CTYPE_AURA) + memcpy(&rsp->aura, ctx, + sizeof(struct npa_aura_s)); + else + memcpy(&rsp->pool, ctx, + sizeof(struct npa_pool_s)); + } + } + + return 0; +} + +static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); + struct npa_aq_enq_req aq_req; + unsigned long *bmap; + int id, cnt = 0; + int err = 0, rc; + + if (!pfvf->pool_ctx || !pfvf->aura_ctx) + return NPA_AF_ERR_AQ_ENQUEUE; + + memset(&aq_req, 0, sizeof(struct npa_aq_enq_req)); + aq_req.hdr.pcifunc = req->hdr.pcifunc; + + if (req->ctype == NPA_AQ_CTYPE_POOL) { + aq_req.pool.ena = 0; + aq_req.pool_mask.ena = 1; + cnt = pfvf->pool_ctx->qsize; + bmap = pfvf->pool_bmap; + } else if (req->ctype == NPA_AQ_CTYPE_AURA) { + aq_req.aura.ena = 0; + aq_req.aura_mask.ena = 1; + aq_req.aura.bp_ena = 0; + aq_req.aura_mask.bp_ena = 1; + cnt = pfvf->aura_ctx->qsize; + bmap = pfvf->aura_bmap; + } + + aq_req.ctype = req->ctype; + aq_req.op = NPA_AQ_INSTOP_WRITE; + + for (id = 0; id < cnt; id++) { + if (!test_bit(id, bmap)) + continue; + aq_req.aura_id = id; + rc = rvu_npa_aq_enq_inst(rvu, &aq_req, NULL); + if (rc) { + err = rc; + dev_err(rvu->dev, "Failed to disable %s:%d context\n", + (req->ctype == NPA_AQ_CTYPE_AURA) ? + "Aura" : "Pool", id); + } + } + + return err; +} + +#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING +static int npa_lf_hwctx_lockdown(struct rvu *rvu, struct npa_aq_enq_req *req) +{ + struct npa_aq_enq_req lock_ctx_req; + int err; + + if (req->op != NPA_AQ_INSTOP_INIT) + return 0; + + memset(&lock_ctx_req, 0, sizeof(struct npa_aq_enq_req)); + lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc; + lock_ctx_req.ctype = req->ctype; + lock_ctx_req.op = NPA_AQ_INSTOP_LOCK; + lock_ctx_req.aura_id = req->aura_id; + err = rvu_npa_aq_enq_inst(rvu, &lock_ctx_req, NULL); + if (err) + dev_err(rvu->dev, + "PFUNC 0x%x: Failed to lock NPA context %s:%d\n", + req->hdr.pcifunc, + (req->ctype == NPA_AQ_CTYPE_AURA) ? + "Aura" : "Pool", req->aura_id); + return err; +} + +int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu, + struct npa_aq_enq_req *req, + struct npa_aq_enq_rsp *rsp) +{ + int err; + + err = rvu_npa_aq_enq_inst(rvu, req, rsp); + if (!err) + err = npa_lf_hwctx_lockdown(rvu, req); + return err; +} +#else + +int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu, + struct npa_aq_enq_req *req, + struct npa_aq_enq_rsp *rsp) +{ + return rvu_npa_aq_enq_inst(rvu, req, rsp); +} +#endif + +int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu, + struct hwctx_disable_req *req, + struct msg_rsp *rsp) +{ + return npa_lf_hwctx_disable(rvu, req); +} + +static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) +{ + kfree(pfvf->aura_bmap); + pfvf->aura_bmap = NULL; + + qmem_free(rvu->dev, pfvf->aura_ctx); + pfvf->aura_ctx = NULL; + + kfree(pfvf->pool_bmap); + pfvf->pool_bmap = NULL; + + qmem_free(rvu->dev, pfvf->pool_ctx); + pfvf->pool_ctx = NULL; + + qmem_free(rvu->dev, pfvf->npa_qints_ctx); + pfvf->npa_qints_ctx = NULL; +} + +int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu, + struct npa_lf_alloc_req *req, + struct npa_lf_alloc_rsp *rsp) +{ + int npalf, qints, hwctx_size, err, rc = 0; + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + struct rvu_block *block; + struct rvu_pfvf *pfvf; + u64 cfg, ctx_cfg; + int blkaddr; + + if (req->aura_sz > NPA_AURA_SZ_MAX || + req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools) + return NPA_AF_ERR_PARAM; + + if (req->way_mask) + req->way_mask &= 0xFFFF; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc); + if (!pfvf->npalf || blkaddr < 0) + return NPA_AF_ERR_AF_LF_INVALID; + + block = &hw->block[blkaddr]; + npalf = rvu_get_lf(rvu, block, pcifunc, 0); + if (npalf < 0) + return NPA_AF_ERR_AF_LF_INVALID; + + /* Reset this NPA LF */ + err = rvu_lf_reset(rvu, block, npalf); + if (err) { + dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf); + return NPA_AF_ERR_LF_RESET; + } + + ctx_cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST1); + + /* Alloc memory for aura HW contexts */ + hwctx_size = 1UL << (ctx_cfg & 0xF); + err = qmem_alloc(rvu->dev, &pfvf->aura_ctx, + NPA_AURA_COUNT(req->aura_sz), hwctx_size); + if (err) + goto free_mem; + + pfvf->aura_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long), + GFP_KERNEL); + if (!pfvf->aura_bmap) + goto free_mem; + + /* Alloc memory for pool HW contexts */ + hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF); + err = qmem_alloc(rvu->dev, &pfvf->pool_ctx, req->nr_pools, hwctx_size); + if (err) + goto free_mem; + + pfvf->pool_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long), + GFP_KERNEL); + if (!pfvf->pool_bmap) + goto free_mem; + + /* Get no of queue interrupts supported */ + cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST); + qints = (cfg >> 28) & 0xFFF; + + /* Alloc memory for Qints HW contexts */ + hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF); + err = qmem_alloc(rvu->dev, &pfvf->npa_qints_ctx, qints, hwctx_size); + if (err) + goto free_mem; + + cfg = rvu_read64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf)); + /* Clear way partition mask and set aura offset to '0' */ + cfg &= ~(BIT_ULL(34) - 1); + /* Set aura size & enable caching of contexts */ + cfg |= (req->aura_sz << 16) | BIT_ULL(34) | req->way_mask; + + rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg); + + /* Configure aura HW context's base */ + rvu_write64(rvu, blkaddr, NPA_AF_LFX_LOC_AURAS_BASE(npalf), + (u64)pfvf->aura_ctx->iova); + + /* Enable caching of qints hw context */ + rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf), + BIT_ULL(36) | req->way_mask << 20); + rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf), + (u64)pfvf->npa_qints_ctx->iova); + + goto exit; + +free_mem: + npa_ctx_free(rvu, pfvf); + rc = -ENOMEM; + +exit: + /* set stack page info */ + cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST); + rsp->stack_pg_ptrs = (cfg >> 8) & 0xFF; + rsp->stack_pg_bytes = cfg & 0xFF; + rsp->qints = (cfg >> 28) & 0xFFF; + return rc; +} + +int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + struct rvu_block *block; + struct rvu_pfvf *pfvf; + int npalf, err; + int blkaddr; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc); + if (!pfvf->npalf || blkaddr < 0) + return NPA_AF_ERR_AF_LF_INVALID; + + block = &hw->block[blkaddr]; + npalf = rvu_get_lf(rvu, block, pcifunc, 0); + if (npalf < 0) + return NPA_AF_ERR_AF_LF_INVALID; + + /* Reset this NPA LF */ + err = rvu_lf_reset(rvu, block, npalf); + if (err) { + dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf); + return NPA_AF_ERR_LF_RESET; + } + + npa_ctx_free(rvu, pfvf); + + return 0; +} + +static int npa_aq_init(struct rvu *rvu, struct rvu_block *block) +{ + u64 cfg; + int err; + + /* Set admin queue endianness */ + cfg = rvu_read64(rvu, block->addr, NPA_AF_GEN_CFG); +#ifdef __BIG_ENDIAN + cfg |= BIT_ULL(1); + rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg); +#else + cfg &= ~BIT_ULL(1); + rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg); +#endif + + /* Do not bypass NDC cache */ + cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG); + cfg &= ~0x03DULL; +#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING + /* Disable caching of stack pages */ + cfg |= 0x10ULL; +#endif + rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg); + + /* Result structure can be followed by Aura/Pool context at + * RES + 128bytes and a write mask at RES + 256 bytes, depending on + * operation type. Alloc sufficient result memory for all operations. + */ + err = rvu_aq_alloc(rvu, &block->aq, + Q_COUNT(AQ_SIZE), sizeof(struct npa_aq_inst_s), + ALIGN(sizeof(struct npa_aq_res_s), 128) + 256); + if (err) + return err; + + rvu_write64(rvu, block->addr, NPA_AF_AQ_CFG, AQ_SIZE); + rvu_write64(rvu, block->addr, + NPA_AF_AQ_BASE, (u64)block->aq->inst->iova); + return 0; +} + +int rvu_npa_init(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + int blkaddr, err; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); + if (blkaddr < 0) + return 0; + + /* Initialize admin queue */ + err = npa_aq_init(rvu, &hw->block[blkaddr]); + if (err) + return err; + + return 0; +} + +void rvu_npa_freemem(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); + if (blkaddr < 0) + return; + + block = &hw->block[blkaddr]; + rvu_aq_free(rvu, block->aq); +} + +void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct hwctx_disable_req ctx_req; + + /* Disable all pools */ + ctx_req.hdr.pcifunc = pcifunc; + ctx_req.ctype = NPA_AQ_CTYPE_POOL; + npa_lf_hwctx_disable(rvu, &ctx_req); + + /* Disable all auras */ + ctx_req.ctype = NPA_AQ_CTYPE_AURA; + npa_lf_hwctx_disable(rvu, &ctx_req); + + npa_ctx_free(rvu, pfvf); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c new file mode 100644 index 000000000..6fa9358e6 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -0,0 +1,2282 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/bitfield.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "rvu_struct.h" +#include "rvu_reg.h" +#include "rvu.h" +#include "npc.h" +#include "cgx.h" +#include "npc_profile.h" + +#define RSVD_MCAM_ENTRIES_PER_PF 2 /* Bcast & Promisc */ +#define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */ + +#define NIXLF_UCAST_ENTRY 0 +#define NIXLF_BCAST_ENTRY 1 +#define NIXLF_PROMISC_ENTRY 2 + +#define NPC_PARSE_RESULT_DMAC_OFFSET 8 +#define NPC_HW_TSTAMP_OFFSET 8ULL + +static const char def_pfl_name[] = "default"; + +static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, u16 pcifunc); +static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam, + u16 pcifunc); + +void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf) +{ + int blkaddr; + u64 val = 0; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + /* Config CPI base for the PKIND */ + val = pkind | 1ULL << 62; + rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_CPI_DEFX(pkind, 0), val); +} + +int rvu_npc_get_pkind(struct rvu *rvu, u16 pf) +{ + struct npc_pkind *pkind = &rvu->hw->pkind; + u32 map; + int i; + + for (i = 0; i < pkind->rsrc.max; i++) { + map = pkind->pfchan_map[i]; + if (((map >> 16) & 0x3F) == pf) + return i; + } + return -1; +} + +#define NPC_AF_ACTION0_PTR_ADVANCE GENMASK_ULL(27, 20) + +int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool enable) +{ + int pkind, blkaddr; + u64 val; + + pkind = rvu_npc_get_pkind(rvu, pf); + if (pkind < 0) { + dev_err(rvu->dev, "%s: pkind not mapped\n", __func__); + return -EINVAL; + } + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc); + if (blkaddr < 0) { + dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); + return -EINVAL; + } + + val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind)); + val &= ~NPC_AF_ACTION0_PTR_ADVANCE; + /* If timestamp is enabled then configure NPC to shift 8 bytes */ + if (enable) + val |= FIELD_PREP(NPC_AF_ACTION0_PTR_ADVANCE, + NPC_HW_TSTAMP_OFFSET); + rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val); + + return 0; +} + +static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, + u16 pcifunc, int nixlf, int type) +{ + int pf = rvu_get_pf(pcifunc); + int index; + + /* Check if this is for a PF */ + if (pf && !(pcifunc & RVU_PFVF_FUNC_MASK)) { + /* Reserved entries exclude PF0 */ + pf--; + index = mcam->pf_offset + (pf * RSVD_MCAM_ENTRIES_PER_PF); + /* Broadcast address matching entry should be first so + * that the packet can be replicated to all VFs. + */ + if (type == NIXLF_BCAST_ENTRY) + return index; + else if (type == NIXLF_PROMISC_ENTRY) + return index + 1; + } + + return (mcam->nixlf_offset + (nixlf * RSVD_MCAM_ENTRIES_PER_NIXLF)); +} + +static int npc_get_bank(struct npc_mcam *mcam, int index) +{ + int bank = index / mcam->banksize; + + /* 0,1 & 2,3 banks are combined for this keysize */ + if (mcam->keysize == NPC_MCAM_KEY_X2) + return bank ? 2 : 0; + + return bank; +} + +static bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, int index) +{ + int bank = npc_get_bank(mcam, index); + u64 cfg; + + index &= (mcam->banksize - 1); + cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(index, bank)); + return (cfg & 1); +} + +static void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, int index, bool enable) +{ + int bank = npc_get_bank(mcam, index); + int actbank = bank; + + index &= (mcam->banksize - 1); + for (; bank < (actbank + mcam->banks_per_entry); bank++) { + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CFG(index, bank), + enable ? 1 : 0); + } +} + +static void npc_clear_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, int index) +{ + int bank = npc_get_bank(mcam, index); + int actbank = bank; + + index &= (mcam->banksize - 1); + for (; bank < (actbank + mcam->banks_per_entry); bank++) { + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), 0); + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), 0); + + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), 0); + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), 0); + + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), 0); + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), 0); + } +} + +static void npc_get_keyword(struct mcam_entry *entry, int idx, + u64 *cam0, u64 *cam1) +{ + u64 kw_mask = 0x00; + +#define CAM_MASK(n) (BIT_ULL(n) - 1) + + /* 0, 2, 4, 6 indices refer to BANKX_CAMX_W0 and + * 1, 3, 5, 7 indices refer to BANKX_CAMX_W1. + * + * Also, only 48 bits of BANKX_CAMX_W1 are valid. + */ + switch (idx) { + case 0: + /* BANK(X)_CAM_W0<63:0> = MCAM_KEY[KW0]<63:0> */ + *cam1 = entry->kw[0]; + kw_mask = entry->kw_mask[0]; + break; + case 1: + /* BANK(X)_CAM_W1<47:0> = MCAM_KEY[KW1]<47:0> */ + *cam1 = entry->kw[1] & CAM_MASK(48); + kw_mask = entry->kw_mask[1] & CAM_MASK(48); + break; + case 2: + /* BANK(X + 1)_CAM_W0<15:0> = MCAM_KEY[KW1]<63:48> + * BANK(X + 1)_CAM_W0<63:16> = MCAM_KEY[KW2]<47:0> + */ + *cam1 = (entry->kw[1] >> 48) & CAM_MASK(16); + *cam1 |= ((entry->kw[2] & CAM_MASK(48)) << 16); + kw_mask = (entry->kw_mask[1] >> 48) & CAM_MASK(16); + kw_mask |= ((entry->kw_mask[2] & CAM_MASK(48)) << 16); + break; + case 3: + /* BANK(X + 1)_CAM_W1<15:0> = MCAM_KEY[KW2]<63:48> + * BANK(X + 1)_CAM_W1<47:16> = MCAM_KEY[KW3]<31:0> + */ + *cam1 = (entry->kw[2] >> 48) & CAM_MASK(16); + *cam1 |= ((entry->kw[3] & CAM_MASK(32)) << 16); + kw_mask = (entry->kw_mask[2] >> 48) & CAM_MASK(16); + kw_mask |= ((entry->kw_mask[3] & CAM_MASK(32)) << 16); + break; + case 4: + /* BANK(X + 2)_CAM_W0<31:0> = MCAM_KEY[KW3]<63:32> + * BANK(X + 2)_CAM_W0<63:32> = MCAM_KEY[KW4]<31:0> + */ + *cam1 = (entry->kw[3] >> 32) & CAM_MASK(32); + *cam1 |= ((entry->kw[4] & CAM_MASK(32)) << 32); + kw_mask = (entry->kw_mask[3] >> 32) & CAM_MASK(32); + kw_mask |= ((entry->kw_mask[4] & CAM_MASK(32)) << 32); + break; + case 5: + /* BANK(X + 2)_CAM_W1<31:0> = MCAM_KEY[KW4]<63:32> + * BANK(X + 2)_CAM_W1<47:32> = MCAM_KEY[KW5]<15:0> + */ + *cam1 = (entry->kw[4] >> 32) & CAM_MASK(32); + *cam1 |= ((entry->kw[5] & CAM_MASK(16)) << 32); + kw_mask = (entry->kw_mask[4] >> 32) & CAM_MASK(32); + kw_mask |= ((entry->kw_mask[5] & CAM_MASK(16)) << 32); + break; + case 6: + /* BANK(X + 3)_CAM_W0<47:0> = MCAM_KEY[KW5]<63:16> + * BANK(X + 3)_CAM_W0<63:48> = MCAM_KEY[KW6]<15:0> + */ + *cam1 = (entry->kw[5] >> 16) & CAM_MASK(48); + *cam1 |= ((entry->kw[6] & CAM_MASK(16)) << 48); + kw_mask = (entry->kw_mask[5] >> 16) & CAM_MASK(48); + kw_mask |= ((entry->kw_mask[6] & CAM_MASK(16)) << 48); + break; + case 7: + /* BANK(X + 3)_CAM_W1<47:0> = MCAM_KEY[KW6]<63:16> */ + *cam1 = (entry->kw[6] >> 16) & CAM_MASK(48); + kw_mask = (entry->kw_mask[6] >> 16) & CAM_MASK(48); + break; + } + + *cam1 &= kw_mask; + *cam0 = ~*cam1 & kw_mask; +} + +static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, int index, u8 intf, + struct mcam_entry *entry, bool enable) +{ + int bank = npc_get_bank(mcam, index); + int kw = 0, actbank, actindex; + u64 cam0, cam1; + + actbank = bank; /* Save bank id, to set action later on */ + actindex = index; + index &= (mcam->banksize - 1); + + /* Disable before mcam entry update */ + npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false); + + /* Clear mcam entry to avoid writes being suppressed by NPC */ + npc_clear_mcam_entry(rvu, mcam, blkaddr, actindex); + + /* CAM1 takes the comparison value and + * CAM0 specifies match for a bit in key being '0' or '1' or 'dontcare'. + * CAM1<n> = 0 & CAM0<n> = 1 => match if key<n> = 0 + * CAM1<n> = 1 & CAM0<n> = 0 => match if key<n> = 1 + * CAM1<n> = 0 & CAM0<n> = 0 => always match i.e dontcare. + */ + for (; bank < (actbank + mcam->banks_per_entry); bank++, kw = kw + 2) { + /* Interface should be set in all banks */ + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), + intf); + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), + ~intf & 0x3); + + /* Set the match key */ + npc_get_keyword(entry, kw, &cam0, &cam1); + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), cam1); + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), cam0); + + npc_get_keyword(entry, kw + 1, &cam0, &cam1); + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), cam1); + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), cam0); + } + + /* Set 'action' */ + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(index, actbank), entry->action); + + /* Set TAG 'action' */ + rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_TAG_ACT(index, actbank), + entry->vtag_action); + + /* Enable the entry */ + if (enable) + npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true); +} + +static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, u16 src, u16 dest) +{ + int dbank = npc_get_bank(mcam, dest); + int sbank = npc_get_bank(mcam, src); + u64 cfg, sreg, dreg; + int bank, i; + + src &= (mcam->banksize - 1); + dest &= (mcam->banksize - 1); + + /* Copy INTF's, W0's, W1's CAM0 and CAM1 configuration */ + for (bank = 0; bank < mcam->banks_per_entry; bank++) { + sreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank + bank, 0); + dreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(dest, dbank + bank, 0); + for (i = 0; i < 6; i++) { + cfg = rvu_read64(rvu, blkaddr, sreg + (i * 8)); + rvu_write64(rvu, blkaddr, dreg + (i * 8), cfg); + } + } + + /* Copy action */ + cfg = rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(src, sbank)); + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(dest, dbank), cfg); + + /* Copy TAG action */ + cfg = rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank)); + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_TAG_ACT(dest, dbank), cfg); + + /* Enable or disable */ + cfg = rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CFG(src, sbank)); + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CFG(dest, dbank), cfg); +} + +static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, int index) +{ + int bank = npc_get_bank(mcam, index); + + index &= (mcam->banksize - 1); + return rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); +} + +void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, + int nixlf, u64 chan, u8 *mac_addr) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct npc_mcam *mcam = &rvu->hw->mcam; + struct mcam_entry entry = { {0} }; + struct nix_rx_action action; + int blkaddr, index, kwi; + u64 mac = 0; + + /* AF's VFs work in promiscuous mode */ + if (is_afvf(pcifunc)) + return; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + for (index = ETH_ALEN - 1; index >= 0; index--) + mac |= ((u64)*mac_addr++) << (8 * index); + + index = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_UCAST_ENTRY); + + /* Match ingress channel and DMAC */ + entry.kw[0] = chan; + entry.kw_mask[0] = 0xFFFULL; + + kwi = NPC_PARSE_RESULT_DMAC_OFFSET / sizeof(u64); + entry.kw[kwi] = mac; + entry.kw_mask[kwi] = BIT_ULL(48) - 1; + + /* Don't change the action if entry is already enabled + * Otherwise RSS action may get overwritten. + */ + if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) { + *(u64 *)&action = npc_get_mcam_action(rvu, mcam, + blkaddr, index); + } else { + *(u64 *)&action = 0x00; + action.op = NIX_RX_ACTIONOP_UCAST; + action.pf_func = pcifunc; + } + + entry.action = *(u64 *)&action; + npc_config_mcam_entry(rvu, mcam, blkaddr, index, + NIX_INTF_RX, &entry, true); + + /* add VLAN matching, setup action and save entry back for later */ + entry.kw[0] |= (NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG) << 20; + entry.kw_mask[0] |= (NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG) << 20; + + entry.vtag_action = VTAG0_VALID_BIT | + FIELD_PREP(VTAG0_TYPE_MASK, 0) | + FIELD_PREP(VTAG0_LID_MASK, NPC_LID_LA) | + FIELD_PREP(VTAG0_RELPTR_MASK, 12); + + memcpy(&pfvf->entry, &entry, sizeof(entry)); +} + +void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, + int nixlf, u64 chan, bool allmulti) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + int blkaddr, ucast_idx, index, kwi; + struct mcam_entry entry = { {0} }; + struct nix_rx_action action = { }; + + /* Only PF or AF VF can add a promiscuous entry */ + if ((pcifunc & RVU_PFVF_FUNC_MASK) && !is_afvf(pcifunc)) + return; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + index = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_PROMISC_ENTRY); + + entry.kw[0] = chan; + entry.kw_mask[0] = 0xFFFULL; + + if (allmulti) { + kwi = NPC_KEXOF_DMAC / sizeof(u64); + entry.kw[kwi] = BIT_ULL(40); /* LSB bit of 1st byte in DMAC */ + entry.kw_mask[kwi] = BIT_ULL(40); + } + + ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_UCAST_ENTRY); + + /* If the corresponding PF's ucast action is RSS, + * use the same action for promisc also + */ + if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx)) + *(u64 *)&action = npc_get_mcam_action(rvu, mcam, + blkaddr, ucast_idx); + + if (action.op != NIX_RX_ACTIONOP_RSS) { + *(u64 *)&action = 0x00; + action.op = NIX_RX_ACTIONOP_UCAST; + action.pf_func = pcifunc; + } + + entry.action = *(u64 *)&action; + npc_config_mcam_entry(rvu, mcam, blkaddr, index, + NIX_INTF_RX, &entry, true); +} + +static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc, + int nixlf, bool enable) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + int blkaddr, index; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + /* Only PF's have a promiscuous entry */ + if (pcifunc & RVU_PFVF_FUNC_MASK) + return; + + index = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_PROMISC_ENTRY); + npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); +} + +void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf) +{ + npc_enadis_promisc_entry(rvu, pcifunc, nixlf, false); +} + +void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf) +{ + npc_enadis_promisc_entry(rvu, pcifunc, nixlf, true); +} + +void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, + int nixlf, u64 chan) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + struct mcam_entry entry = { {0} }; + struct rvu_hwinfo *hw = rvu->hw; + struct nix_rx_action action; + struct rvu_pfvf *pfvf; + int blkaddr, index; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + /* Skip LBK VFs */ + if (is_afvf(pcifunc)) + return; + + /* If pkt replication is not supported, + * then only PF is allowed to add a bcast match entry. + */ + if (!hw->cap.nix_rx_multicast && pcifunc & RVU_PFVF_FUNC_MASK) + return; + + /* Get 'pcifunc' of PF device */ + pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; + index = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_BCAST_ENTRY); + + /* Match ingress channel */ + entry.kw[0] = chan; + entry.kw_mask[0] = 0xfffull; + + /* Match broadcast MAC address. + * DMAC is extracted at 0th bit of PARSE_KEX::KW1 + */ + entry.kw[1] = 0xffffffffffffull; + entry.kw_mask[1] = 0xffffffffffffull; + + *(u64 *)&action = 0x00; + if (!hw->cap.nix_rx_multicast) { + /* Early silicon doesn't support pkt replication, + * so install entry with UCAST action, so that PF + * receives all broadcast packets. + */ + action.op = NIX_RX_ACTIONOP_UCAST; + action.pf_func = pcifunc; + } else { + pfvf = rvu_get_pfvf(rvu, pcifunc); + action.index = pfvf->bcast_mce_idx; + action.op = NIX_RX_ACTIONOP_MCAST; + } + + entry.action = *(u64 *)&action; + npc_config_mcam_entry(rvu, mcam, blkaddr, index, + NIX_INTF_RX, &entry, true); +} + +void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + int blkaddr, index; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + /* Get 'pcifunc' of PF device */ + pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; + + index = npc_get_nixlf_mcam_index(mcam, pcifunc, 0, NIXLF_BCAST_ENTRY); + npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); +} + +void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, + int group, int alg_idx, int mcam_index) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + struct nix_rx_action action; + int blkaddr, index, bank; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + /* Check if this is for reserved default entry */ + if (mcam_index < 0) { + if (group != DEFAULT_RSS_CONTEXT_GROUP) + return; + index = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_UCAST_ENTRY); + } else { + /* TODO: validate this mcam index */ + index = mcam_index; + } + + if (index >= mcam->total_entries) + return; + + bank = npc_get_bank(mcam, index); + index &= (mcam->banksize - 1); + + *(u64 *)&action = rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); + /* Ignore if no action was set earlier */ + if (!*(u64 *)&action) + return; + + action.op = NIX_RX_ACTIONOP_RSS; + action.pf_func = pcifunc; + action.index = group; + action.flow_key_alg = alg_idx; + + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action); + + index = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_PROMISC_ENTRY); + + /* If PF's promiscuous entry is enabled, + * Set RSS action for that entry as well + */ + if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) { + bank = npc_get_bank(mcam, index); + index &= (mcam->banksize - 1); + + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(index, bank), + *(u64 *)&action); + } + + rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); +} + +static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc, + int nixlf, bool enable) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + struct nix_rx_action action; + int index, bank, blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + /* Ucast MCAM match entry of this PF/VF */ + index = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_UCAST_ENTRY); + npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); + + /* For PF, ena/dis promisc and bcast MCAM match entries. + * For VFs add/delete from bcast list when RX multicast + * feature is present. + */ + if (pcifunc & RVU_PFVF_FUNC_MASK && !rvu->hw->cap.nix_rx_multicast) + return; + + /* For bcast, enable/disable only if it's action is not + * packet replication, incase if action is replication + * then this PF/VF's nixlf is removed from bcast replication + * list. + */ + index = npc_get_nixlf_mcam_index(mcam, pcifunc & ~RVU_PFVF_FUNC_MASK, + nixlf, NIXLF_BCAST_ENTRY); + bank = npc_get_bank(mcam, index); + *(u64 *)&action = rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(index & (mcam->banksize - 1), bank)); + + /* VFs will not have BCAST entry */ + if (action.op != NIX_RX_ACTIONOP_MCAST && + !(pcifunc & RVU_PFVF_FUNC_MASK)) { + npc_enable_mcam_entry(rvu, mcam, + blkaddr, index, enable); + } else { + nix_update_bcast_mce_list(rvu, pcifunc, enable); + /* Enable PF's BCAST entry for packet replication */ + rvu_npc_enable_bcast_entry(rvu, pcifunc, enable); + } + + if (enable) + rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf); + else + rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf); + + rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); +} + +void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf) +{ + npc_enadis_default_entries(rvu, pcifunc, nixlf, false); +} + +void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf) +{ + npc_enadis_default_entries(rvu, pcifunc, nixlf, true); +} + +void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + mutex_lock(&mcam->lock); + + /* Disable and free all MCAM entries mapped to this 'pcifunc' */ + npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc); + + /* Free all MCAM counters mapped to this 'pcifunc' */ + npc_mcam_free_all_counters(rvu, mcam, pcifunc); + + mutex_unlock(&mcam->lock); + + rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); +} + +#define SET_KEX_LD(intf, lid, ltype, ld, cfg) \ + rvu_write64(rvu, blkaddr, \ + NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg) + +#define SET_KEX_LDFLAGS(intf, ld, flags, cfg) \ + rvu_write64(rvu, blkaddr, \ + NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg) + +static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr, + const struct npc_mcam_kex *mkex) +{ + int lid, lt, ld, fl; + + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX), + mkex->keyx_cfg[NIX_INTF_RX]); + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX), + mkex->keyx_cfg[NIX_INTF_TX]); + + for (ld = 0; ld < NPC_MAX_LD; ld++) + rvu_write64(rvu, blkaddr, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld), + mkex->kex_ld_flags[ld]); + + for (lid = 0; lid < NPC_MAX_LID; lid++) { + for (lt = 0; lt < NPC_MAX_LT; lt++) { + for (ld = 0; ld < NPC_MAX_LD; ld++) { + SET_KEX_LD(NIX_INTF_RX, lid, lt, ld, + mkex->intf_lid_lt_ld[NIX_INTF_RX] + [lid][lt][ld]); + + SET_KEX_LD(NIX_INTF_TX, lid, lt, ld, + mkex->intf_lid_lt_ld[NIX_INTF_TX] + [lid][lt][ld]); + } + } + } + + for (ld = 0; ld < NPC_MAX_LD; ld++) { + for (fl = 0; fl < NPC_MAX_LFL; fl++) { + SET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl, + mkex->intf_ld_flags[NIX_INTF_RX] + [ld][fl]); + + SET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl, + mkex->intf_ld_flags[NIX_INTF_TX] + [ld][fl]); + } + } +} + +#define MKEX_END_SIGN 0xdeadbeef + +static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr, + const char *mkex_profile) +{ + struct device *dev = &rvu->pdev->dev; + struct npc_mcam_kex *mcam_kex; + void *mkex_prfl_addr = NULL; + u64 prfl_addr, prfl_sz; + + /* If user not selected mkex profile */ + if (!strncmp(mkex_profile, def_pfl_name, MKEX_NAME_LEN)) + goto program_mkex; + + if (!rvu->fwdata) + goto program_mkex; + prfl_addr = rvu->fwdata->mcam_addr; + prfl_sz = rvu->fwdata->mcam_sz; + + if (!prfl_addr || !prfl_sz) + goto program_mkex; + + mkex_prfl_addr = memremap(prfl_addr, prfl_sz, MEMREMAP_WC); + if (!mkex_prfl_addr) + goto program_mkex; + + mcam_kex = (struct npc_mcam_kex *)mkex_prfl_addr; + + while (((s64)prfl_sz > 0) && (mcam_kex->mkex_sign != MKEX_END_SIGN)) { + /* Compare with mkex mod_param name string */ + if (mcam_kex->mkex_sign == MKEX_SIGN && + !strncmp(mcam_kex->name, mkex_profile, MKEX_NAME_LEN)) { + /* Due to an errata (35786) in A0/B0 pass silicon, + * parse nibble enable configuration has to be + * identical for both Rx and Tx interfaces. + */ + if (!is_rvu_96xx_B0(rvu) || + mcam_kex->keyx_cfg[NIX_INTF_RX] == mcam_kex->keyx_cfg[NIX_INTF_TX]) + rvu->kpu.mkex = mcam_kex; + goto program_mkex; + } + + mcam_kex++; + prfl_sz -= sizeof(struct npc_mcam_kex); + } + dev_warn(dev, "Failed to load requested profile: %s\n", mkex_profile); + +program_mkex: + dev_info(rvu->dev, "Using %s mkex profile\n", rvu->kpu.mkex->name); + /* Program selected mkex profile */ + npc_program_mkex_profile(rvu, blkaddr, rvu->kpu.mkex); + if (mkex_prfl_addr) + memunmap(mkex_prfl_addr); +} + +static void npc_config_kpuaction(struct rvu *rvu, int blkaddr, + const struct npc_kpu_profile_action *kpuaction, + int kpu, int entry, bool pkind) +{ + struct npc_kpu_action0 action0 = {0}; + struct npc_kpu_action1 action1 = {0}; + u64 reg; + + action1.errlev = kpuaction->errlev; + action1.errcode = kpuaction->errcode; + action1.dp0_offset = kpuaction->dp0_offset; + action1.dp1_offset = kpuaction->dp1_offset; + action1.dp2_offset = kpuaction->dp2_offset; + + if (pkind) + reg = NPC_AF_PKINDX_ACTION1(entry); + else + reg = NPC_AF_KPUX_ENTRYX_ACTION1(kpu, entry); + + rvu_write64(rvu, blkaddr, reg, *(u64 *)&action1); + + action0.byp_count = kpuaction->bypass_count; + action0.capture_ena = kpuaction->cap_ena; + action0.parse_done = kpuaction->parse_done; + action0.next_state = kpuaction->next_state; + action0.capture_lid = kpuaction->lid; + action0.capture_ltype = kpuaction->ltype; + action0.capture_flags = kpuaction->flags; + action0.ptr_advance = kpuaction->ptr_advance; + action0.var_len_offset = kpuaction->offset; + action0.var_len_mask = kpuaction->mask; + action0.var_len_right = kpuaction->right; + action0.var_len_shift = kpuaction->shift; + + if (pkind) + reg = NPC_AF_PKINDX_ACTION0(entry); + else + reg = NPC_AF_KPUX_ENTRYX_ACTION0(kpu, entry); + + rvu_write64(rvu, blkaddr, reg, *(u64 *)&action0); +} + +static void npc_config_kpucam(struct rvu *rvu, int blkaddr, + const struct npc_kpu_profile_cam *kpucam, + int kpu, int entry) +{ + struct npc_kpu_cam cam0 = {0}; + struct npc_kpu_cam cam1 = {0}; + + cam1.state = kpucam->state & kpucam->state_mask; + cam1.dp0_data = kpucam->dp0 & kpucam->dp0_mask; + cam1.dp1_data = kpucam->dp1 & kpucam->dp1_mask; + cam1.dp2_data = kpucam->dp2 & kpucam->dp2_mask; + + cam0.state = ~kpucam->state & kpucam->state_mask; + cam0.dp0_data = ~kpucam->dp0 & kpucam->dp0_mask; + cam0.dp1_data = ~kpucam->dp1 & kpucam->dp1_mask; + cam0.dp2_data = ~kpucam->dp2 & kpucam->dp2_mask; + + rvu_write64(rvu, blkaddr, + NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 0), *(u64 *)&cam0); + rvu_write64(rvu, blkaddr, + NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 1), *(u64 *)&cam1); +} + +static inline u64 enable_mask(int count) +{ + return (((count) < 64) ? ~(BIT_ULL(count) - 1) : (0x00ULL)); +} + +static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu, + const struct npc_kpu_profile *profile) +{ + int entry, num_entries, max_entries; + + if (profile->cam_entries != profile->action_entries) { + dev_err(rvu->dev, + "KPU%d: CAM and action entries [%d != %d] not equal\n", + kpu, profile->cam_entries, profile->action_entries); + } + + max_entries = rvu_read64(rvu, blkaddr, NPC_AF_CONST1) & 0xFFF; + + /* Program CAM match entries for previous KPU extracted data */ + num_entries = min_t(int, profile->cam_entries, max_entries); + for (entry = 0; entry < num_entries; entry++) + npc_config_kpucam(rvu, blkaddr, + &profile->cam[entry], kpu, entry); + + /* Program this KPU's actions */ + num_entries = min_t(int, profile->action_entries, max_entries); + for (entry = 0; entry < num_entries; entry++) + npc_config_kpuaction(rvu, blkaddr, &profile->action[entry], + kpu, entry, false); + + /* Enable all programmed entries */ + num_entries = min_t(int, profile->action_entries, profile->cam_entries); + rvu_write64(rvu, blkaddr, + NPC_AF_KPUX_ENTRY_DISX(kpu, 0), enable_mask(num_entries)); + if (num_entries > 64) { + rvu_write64(rvu, blkaddr, + NPC_AF_KPUX_ENTRY_DISX(kpu, 1), + enable_mask(num_entries - 64)); + } + + /* Enable this KPU */ + rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(kpu), 0x01); +} + +static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile) +{ + profile->name = def_pfl_name; + profile->version = NPC_KPU_PROFILE_VER; + profile->ikpu = ikpu_action_entries; + profile->pkinds = ARRAY_SIZE(ikpu_action_entries); + profile->kpu = npc_kpu_profiles; + profile->kpus = ARRAY_SIZE(npc_kpu_profiles); + profile->lt_def = &npc_lt_defaults; + profile->mkex = &npc_mkex_default; + + return 0; +} + +static void npc_load_kpu_profile(struct rvu *rvu) +{ + struct npc_kpu_profile_adapter *profile = &rvu->kpu; + + npc_prepare_default_kpu(profile); +} + +static void npc_parser_profile_init(struct rvu *rvu, int blkaddr) +{ + struct rvu_hwinfo *hw = rvu->hw; + int num_pkinds, num_kpus, idx; + struct npc_pkind *pkind; + + /* Get HW limits */ + hw->npc_kpus = (rvu_read64(rvu, blkaddr, NPC_AF_CONST) >> 8) & 0x1F; + + /* Disable all KPUs and their entries */ + for (idx = 0; idx < hw->npc_kpus; idx++) { + rvu_write64(rvu, blkaddr, + NPC_AF_KPUX_ENTRY_DISX(idx, 0), ~0ULL); + rvu_write64(rvu, blkaddr, + NPC_AF_KPUX_ENTRY_DISX(idx, 1), ~0ULL); + rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(idx), 0x00); + } + + /* Load and customize KPU profile. */ + npc_load_kpu_profile(rvu); + + /* First program IKPU profile i.e PKIND configs. + * Check HW max count to avoid configuring junk or + * writing to unsupported CSR addresses. + */ + pkind = &hw->pkind; + num_pkinds = rvu->kpu.pkinds; + num_pkinds = min_t(int, pkind->rsrc.max, num_pkinds); + + for (idx = 0; idx < num_pkinds; idx++) + npc_config_kpuaction(rvu, blkaddr, &rvu->kpu.ikpu[idx], 0, idx, true); + + /* Program KPU CAM and Action profiles */ + num_kpus = rvu->kpu.kpus; + num_kpus = min_t(int, hw->npc_kpus, num_kpus); + + for (idx = 0; idx < num_kpus; idx++) + npc_program_kpu_profile(rvu, blkaddr, idx, &rvu->kpu.kpu[idx]); +} + +static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) +{ + int nixlf_count = rvu_get_nixlf_count(rvu); + struct npc_mcam *mcam = &rvu->hw->mcam; + int rsvd, err; + u64 cfg; + + /* Get HW limits */ + cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST); + mcam->banks = (cfg >> 44) & 0xF; + mcam->banksize = (cfg >> 28) & 0xFFFF; + mcam->counters.max = (cfg >> 48) & 0xFFFF; + + /* Actual number of MCAM entries vary by entry size */ + cfg = (rvu_read64(rvu, blkaddr, + NPC_AF_INTFX_KEX_CFG(0)) >> 32) & 0x07; + mcam->total_entries = (mcam->banks / BIT_ULL(cfg)) * mcam->banksize; + mcam->keysize = cfg; + + /* Number of banks combined per MCAM entry */ + if (cfg == NPC_MCAM_KEY_X4) + mcam->banks_per_entry = 4; + else if (cfg == NPC_MCAM_KEY_X2) + mcam->banks_per_entry = 2; + else + mcam->banks_per_entry = 1; + + /* Reserve one MCAM entry for each of the NIX LF to + * guarantee space to install default matching DMAC rule. + * Also reserve 2 MCAM entries for each PF for default + * channel based matching or 'bcast & promisc' matching to + * support BCAST and PROMISC modes of operation for PFs. + * PF0 is excluded. + */ + rsvd = (nixlf_count * RSVD_MCAM_ENTRIES_PER_NIXLF) + + ((rvu->hw->total_pfs - 1) * RSVD_MCAM_ENTRIES_PER_PF); + if (mcam->total_entries <= rsvd) { + dev_warn(rvu->dev, + "Insufficient NPC MCAM size %d for pkt I/O, exiting\n", + mcam->total_entries); + return -ENOMEM; + } + + mcam->bmap_entries = mcam->total_entries - rsvd; + mcam->nixlf_offset = mcam->bmap_entries; + mcam->pf_offset = mcam->nixlf_offset + nixlf_count; + + /* Allocate bitmaps for managing MCAM entries */ + mcam->bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(mcam->bmap_entries), + sizeof(long), GFP_KERNEL); + if (!mcam->bmap) + return -ENOMEM; + + mcam->bmap_reverse = devm_kcalloc(rvu->dev, + BITS_TO_LONGS(mcam->bmap_entries), + sizeof(long), GFP_KERNEL); + if (!mcam->bmap_reverse) + return -ENOMEM; + + mcam->bmap_fcnt = mcam->bmap_entries; + + /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ + mcam->entry2pfvf_map = devm_kcalloc(rvu->dev, mcam->bmap_entries, + sizeof(u16), GFP_KERNEL); + if (!mcam->entry2pfvf_map) + return -ENOMEM; + + /* Reserve 1/8th of MCAM entries at the bottom for low priority + * allocations and another 1/8th at the top for high priority + * allocations. + */ + mcam->lprio_count = mcam->bmap_entries / 8; + if (mcam->lprio_count > BITS_PER_LONG) + mcam->lprio_count = round_down(mcam->lprio_count, + BITS_PER_LONG); + mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count; + mcam->hprio_count = mcam->lprio_count; + mcam->hprio_end = mcam->hprio_count; + + /* Reserve last counter for MCAM RX miss action which is set to + * drop pkt. This way we will know how many pkts didn't match + * any MCAM entry. + */ + mcam->counters.max--; + mcam->rx_miss_act_cntr = mcam->counters.max; + + /* Allocate bitmap for managing MCAM counters and memory + * for saving counter to RVU PFFUNC allocation mapping. + */ + err = rvu_alloc_bitmap(&mcam->counters); + if (err) + return err; + + mcam->cntr2pfvf_map = devm_kcalloc(rvu->dev, mcam->counters.max, + sizeof(u16), GFP_KERNEL); + if (!mcam->cntr2pfvf_map) + goto free_mem; + + /* Alloc memory for MCAM entry to counter mapping and for tracking + * counter's reference count. + */ + mcam->entry2cntr_map = devm_kcalloc(rvu->dev, mcam->bmap_entries, + sizeof(u16), GFP_KERNEL); + if (!mcam->entry2cntr_map) + goto free_mem; + + mcam->cntr_refcnt = devm_kcalloc(rvu->dev, mcam->counters.max, + sizeof(u16), GFP_KERNEL); + if (!mcam->cntr_refcnt) + goto free_mem; + + mutex_init(&mcam->lock); + + return 0; + +free_mem: + kfree(mcam->counters.bmap); + return -ENOMEM; +} + +int rvu_npc_init(struct rvu *rvu) +{ + struct npc_kpu_profile_adapter *kpu = &rvu->kpu; + struct npc_pkind *pkind = &rvu->hw->pkind; + struct npc_mcam *mcam = &rvu->hw->mcam; + u64 cfg, nibble_ena, rx_kex, tx_kex; + int blkaddr, entry, bank, err; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) { + dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); + return -ENODEV; + } + + /* First disable all MCAM entries, to stop traffic towards NIXLFs */ + cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST); + for (bank = 0; bank < ((cfg >> 44) & 0xF); bank++) { + for (entry = 0; entry < ((cfg >> 28) & 0xFFFF); entry++) + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CFG(entry, bank), 0); + } + + /* Allocate resource bimap for pkind*/ + pkind->rsrc.max = (rvu_read64(rvu, blkaddr, + NPC_AF_CONST1) >> 12) & 0xFF; + err = rvu_alloc_bitmap(&pkind->rsrc); + if (err) + return err; + + /* Allocate mem for pkind to PF and channel mapping info */ + pkind->pfchan_map = devm_kcalloc(rvu->dev, pkind->rsrc.max, + sizeof(u32), GFP_KERNEL); + if (!pkind->pfchan_map) + return -ENOMEM; + + /* Configure KPU profile */ + npc_parser_profile_init(rvu, blkaddr); + + /* Config Outer L2, IPv4's NPC layer info */ + rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OL2, + (kpu->lt_def->pck_ol2.lid << 8) | (kpu->lt_def->pck_ol2.ltype_match << 4) | + kpu->lt_def->pck_ol2.ltype_mask); + rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OIP4, + (kpu->lt_def->pck_oip4.lid << 8) | (kpu->lt_def->pck_oip4.ltype_match << 4) | + kpu->lt_def->pck_oip4.ltype_mask); + + /* Config Inner IPV4 NPC layer info */ + rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_IIP4, + (kpu->lt_def->pck_iip4.lid << 8) | (kpu->lt_def->pck_iip4.ltype_match << 4) | + kpu->lt_def->pck_iip4.ltype_mask); + + /* Enable below for Rx pkts. + * - Outer IPv4 header checksum validation. + * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2B]. + * - Detect outer L2 multicast address and set NPC_RESULT_S[L2M]. + * - Inner IPv4 header checksum validation. + * - Set non zero checksum error code value + */ + rvu_write64(rvu, blkaddr, NPC_AF_PCK_CFG, + rvu_read64(rvu, blkaddr, NPC_AF_PCK_CFG) | + ((u64)NPC_EC_OIP4_CSUM << 32) | (NPC_EC_IIP4_CSUM << 24) | + BIT_ULL(7) | BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1)); + + /* Set RX and TX side MCAM search key size. + * LA..LD (ltype only) + Channel + */ + rx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_RX]; + tx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_TX]; + nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex); + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX), rx_kex); + /* Due to an errata (35786) in A0 pass silicon, parse nibble enable + * configuration has to be identical for both Rx and Tx interfaces. + */ + if (is_rvu_96xx_B0(rvu)) { + tx_kex &= ~NPC_PARSE_NIBBLE; + tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena); + } + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX), tx_kex); + + err = npc_mcam_rsrcs_init(rvu, blkaddr); + if (err) + return err; + + /* Configure MKEX profile */ + npc_load_mkex_profile(rvu, blkaddr, rvu->mkex_pfl_name); + + /* Set TX miss action to UCAST_DEFAULT i.e + * transmit the packet on NIX LF SQ's default channel. + */ + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_TX), + NIX_TX_ACTIONOP_UCAST_DEFAULT); + + /* If MCAM lookup doesn't result in a match, drop the received packet. + * And map this action to a counter to count dropped pkts. + */ + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_RX), + NIX_RX_ACTIONOP_DROP); + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_STAT_ACT(NIX_INTF_RX), + BIT_ULL(9) | mcam->rx_miss_act_cntr); + + return 0; +} + +void rvu_npc_freemem(struct rvu *rvu) +{ + struct npc_pkind *pkind = &rvu->hw->pkind; + struct npc_mcam *mcam = &rvu->hw->mcam; + + kfree(pkind->rsrc.bmap); + kfree(mcam->counters.bmap); + mutex_destroy(&mcam->lock); +} + +void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc, + int blkaddr, int *alloc_cnt, + int *enable_cnt) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + int entry; + + *alloc_cnt = 0; + *enable_cnt = 0; + + for (entry = 0; entry < mcam->bmap_entries; entry++) { + if (mcam->entry2pfvf_map[entry] == pcifunc) { + (*alloc_cnt)++; + if (is_mcam_entry_enabled(rvu, mcam, blkaddr, entry)) + (*enable_cnt)++; + } + } +} + +void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc, + int blkaddr, int *alloc_cnt, + int *enable_cnt) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + int cntr; + + *alloc_cnt = 0; + *enable_cnt = 0; + + for (cntr = 0; cntr < mcam->counters.max; cntr++) { + if (mcam->cntr2pfvf_map[cntr] == pcifunc) { + (*alloc_cnt)++; + if (mcam->cntr_refcnt[cntr]) + (*enable_cnt)++; + } + } +} + +static int npc_mcam_verify_entry(struct npc_mcam *mcam, + u16 pcifunc, int entry) +{ + /* Verify if entry is valid and if it is indeed + * allocated to the requesting PFFUNC. + */ + if (entry >= mcam->bmap_entries) + return NPC_MCAM_INVALID_REQ; + + if (pcifunc != mcam->entry2pfvf_map[entry]) + return NPC_MCAM_PERM_DENIED; + + return 0; +} + +static int npc_mcam_verify_counter(struct npc_mcam *mcam, + u16 pcifunc, int cntr) +{ + /* Verify if counter is valid and if it is indeed + * allocated to the requesting PFFUNC. + */ + if (cntr >= mcam->counters.max) + return NPC_MCAM_INVALID_REQ; + + if (pcifunc != mcam->cntr2pfvf_map[cntr]) + return NPC_MCAM_PERM_DENIED; + + return 0; +} + +static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, u16 entry, u16 cntr) +{ + u16 index = entry & (mcam->banksize - 1); + u16 bank = npc_get_bank(mcam, entry); + + /* Set mapping and increment counter's refcnt */ + mcam->entry2cntr_map[entry] = cntr; + mcam->cntr_refcnt[cntr]++; + /* Enable stats */ + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), + BIT_ULL(9) | cntr); +} + +static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu, + struct npc_mcam *mcam, + int blkaddr, u16 entry, u16 cntr) +{ + u16 index = entry & (mcam->banksize - 1); + u32 bank = npc_get_bank(mcam, entry); + + /* Remove mapping and reduce counter's refcnt */ + mcam->entry2cntr_map[entry] = NPC_MCAM_INVALID_MAP; + mcam->cntr_refcnt[cntr]--; + /* Disable stats */ + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), 0x00); +} + +/* Sets MCAM entry in bitmap as used. Update + * reverse bitmap too. Should be called with + * 'mcam->lock' held. + */ +static void npc_mcam_set_bit(struct npc_mcam *mcam, u16 index) +{ + u16 entry, rentry; + + entry = index; + rentry = mcam->bmap_entries - index - 1; + + __set_bit(entry, mcam->bmap); + __set_bit(rentry, mcam->bmap_reverse); + mcam->bmap_fcnt--; +} + +/* Sets MCAM entry in bitmap as free. Update + * reverse bitmap too. Should be called with + * 'mcam->lock' held. + */ +static void npc_mcam_clear_bit(struct npc_mcam *mcam, u16 index) +{ + u16 entry, rentry; + + entry = index; + rentry = mcam->bmap_entries - index - 1; + + __clear_bit(entry, mcam->bmap); + __clear_bit(rentry, mcam->bmap_reverse); + mcam->bmap_fcnt++; +} + +static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, u16 pcifunc) +{ + u16 index, cntr; + + /* Scan all MCAM entries and free the ones mapped to 'pcifunc' */ + for (index = 0; index < mcam->bmap_entries; index++) { + if (mcam->entry2pfvf_map[index] == pcifunc) { + mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP; + /* Free the entry in bitmap */ + npc_mcam_clear_bit(mcam, index); + /* Disable the entry */ + npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false); + + /* Update entry2counter mapping */ + cntr = mcam->entry2cntr_map[index]; + if (cntr != NPC_MCAM_INVALID_MAP) + npc_unmap_mcam_entry_and_cntr(rvu, mcam, + blkaddr, index, + cntr); + } + } +} + +static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam, + u16 pcifunc) +{ + u16 cntr; + + /* Scan all MCAM counters and free the ones mapped to 'pcifunc' */ + for (cntr = 0; cntr < mcam->counters.max; cntr++) { + if (mcam->cntr2pfvf_map[cntr] == pcifunc) { + mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP; + mcam->cntr_refcnt[cntr] = 0; + rvu_free_rsrc(&mcam->counters, cntr); + /* This API is expected to be called after freeing + * MCAM entries, which inturn will remove + * 'entry to counter' mapping. + * No need to do it again. + */ + } + } +} + +/* Find area of contiguous free entries of size 'nr'. + * If not found return max contiguous free entries available. + */ +static u16 npc_mcam_find_zero_area(unsigned long *map, u16 size, u16 start, + u16 nr, u16 *max_area) +{ + u16 max_area_start = 0; + u16 index, next, end; + + *max_area = 0; + +again: + index = find_next_zero_bit(map, size, start); + if (index >= size) + return max_area_start; + + end = ((index + nr) >= size) ? size : index + nr; + next = find_next_bit(map, end, index); + if (*max_area < (next - index)) { + *max_area = next - index; + max_area_start = index; + } + + if (next < end) { + start = next + 1; + goto again; + } + + return max_area_start; +} + +/* Find number of free MCAM entries available + * within range i.e in between 'start' and 'end'. + */ +static u16 npc_mcam_get_free_count(unsigned long *map, u16 start, u16 end) +{ + u16 index, next; + u16 fcnt = 0; + +again: + if (start >= end) + return fcnt; + + index = find_next_zero_bit(map, end, start); + if (index >= end) + return fcnt; + + next = find_next_bit(map, end, index); + if (next <= end) { + fcnt += next - index; + start = next + 1; + goto again; + } + + fcnt += end - index; + return fcnt; +} + +static void +npc_get_mcam_search_range_priority(struct npc_mcam *mcam, + struct npc_mcam_alloc_entry_req *req, + u16 *start, u16 *end, bool *reverse) +{ + u16 fcnt; + + if (req->priority == NPC_MCAM_HIGHER_PRIO) + goto hprio; + + /* For a low priority entry allocation + * - If reference entry is not in hprio zone then + * search range: ref_entry to end. + * - If reference entry is in hprio zone and if + * request can be accomodated in non-hprio zone then + * search range: 'start of middle zone' to 'end' + * - else search in reverse, so that less number of hprio + * zone entries are allocated. + */ + + *reverse = false; + *start = req->ref_entry + 1; + *end = mcam->bmap_entries; + + if (req->ref_entry >= mcam->hprio_end) + return; + + fcnt = npc_mcam_get_free_count(mcam->bmap, + mcam->hprio_end, mcam->bmap_entries); + if (fcnt > req->count) + *start = mcam->hprio_end; + else + *reverse = true; + return; + +hprio: + /* For a high priority entry allocation, search is always + * in reverse to preserve hprio zone entries. + * - If reference entry is not in lprio zone then + * search range: 0 to ref_entry. + * - If reference entry is in lprio zone and if + * request can be accomodated in middle zone then + * search range: 'hprio_end' to 'lprio_start' + */ + + *reverse = true; + *start = 0; + *end = req->ref_entry; + + if (req->ref_entry <= mcam->lprio_start) + return; + + fcnt = npc_mcam_get_free_count(mcam->bmap, + mcam->hprio_end, mcam->lprio_start); + if (fcnt < req->count) + return; + *start = mcam->hprio_end; + *end = mcam->lprio_start; +} + +static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc, + struct npc_mcam_alloc_entry_req *req, + struct npc_mcam_alloc_entry_rsp *rsp) +{ + u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES]; + u16 fcnt, hp_fcnt, lp_fcnt; + u16 start, end, index; + int entry, next_start; + bool reverse = false; + unsigned long *bmap; + u16 max_contig; + + mutex_lock(&mcam->lock); + + /* Check if there are any free entries */ + if (!mcam->bmap_fcnt) { + mutex_unlock(&mcam->lock); + return NPC_MCAM_ALLOC_FAILED; + } + + /* MCAM entries are divided into high priority, middle and + * low priority zones. Idea is to not allocate top and lower + * most entries as much as possible, this is to increase + * probability of honouring priority allocation requests. + * + * Two bitmaps are used for mcam entry management, + * mcam->bmap for forward search i.e '0 to mcam->bmap_entries'. + * mcam->bmap_reverse for reverse search i.e 'mcam->bmap_entries to 0'. + * + * Reverse bitmap is used to allocate entries + * - when a higher priority entry is requested + * - when available free entries are less. + * Lower priority ones out of avaialble free entries are always + * chosen when 'high vs low' question arises. + */ + + /* Get the search range for priority allocation request */ + if (req->priority) { + npc_get_mcam_search_range_priority(mcam, req, + &start, &end, &reverse); + goto alloc; + } + + /* Find out the search range for non-priority allocation request + * + * Get MCAM free entry count in middle zone. + */ + lp_fcnt = npc_mcam_get_free_count(mcam->bmap, + mcam->lprio_start, + mcam->bmap_entries); + hp_fcnt = npc_mcam_get_free_count(mcam->bmap, 0, mcam->hprio_end); + fcnt = mcam->bmap_fcnt - lp_fcnt - hp_fcnt; + + /* Check if request can be accomodated in the middle zone */ + if (fcnt > req->count) { + start = mcam->hprio_end; + end = mcam->lprio_start; + } else if ((fcnt + (hp_fcnt / 2) + (lp_fcnt / 2)) > req->count) { + /* Expand search zone from half of hprio zone to + * half of lprio zone. + */ + start = mcam->hprio_end / 2; + end = mcam->bmap_entries - (mcam->lprio_count / 2); + reverse = true; + } else { + /* Not enough free entries, search all entries in reverse, + * so that low priority ones will get used up. + */ + reverse = true; + start = 0; + end = mcam->bmap_entries; + } + +alloc: + if (reverse) { + bmap = mcam->bmap_reverse; + start = mcam->bmap_entries - start; + end = mcam->bmap_entries - end; + index = start; + start = end; + end = index; + } else { + bmap = mcam->bmap; + } + + if (req->contig) { + /* Allocate requested number of contiguous entries, if + * unsuccessful find max contiguous entries available. + */ + index = npc_mcam_find_zero_area(bmap, end, start, + req->count, &max_contig); + rsp->count = max_contig; + if (reverse) + rsp->entry = mcam->bmap_entries - index - max_contig; + else + rsp->entry = index; + } else { + /* Allocate requested number of non-contiguous entries, + * if unsuccessful allocate as many as possible. + */ + rsp->count = 0; + next_start = start; + for (entry = 0; entry < req->count; entry++) { + index = find_next_zero_bit(bmap, end, next_start); + if (index >= end) + break; + + next_start = start + (index - start) + 1; + + /* Save the entry's index */ + if (reverse) + index = mcam->bmap_entries - index - 1; + entry_list[entry] = index; + rsp->count++; + } + } + + /* If allocating requested no of entries is unsucessful, + * expand the search range to full bitmap length and retry. + */ + if (!req->priority && (rsp->count < req->count) && + ((end - start) != mcam->bmap_entries)) { + reverse = true; + start = 0; + end = mcam->bmap_entries; + goto alloc; + } + + /* For priority entry allocation requests, if allocation is + * failed then expand search to max possible range and retry. + */ + if (req->priority && rsp->count < req->count) { + if (req->priority == NPC_MCAM_LOWER_PRIO && + (start != (req->ref_entry + 1))) { + start = req->ref_entry + 1; + end = mcam->bmap_entries; + reverse = false; + goto alloc; + } else if ((req->priority == NPC_MCAM_HIGHER_PRIO) && + ((end - start) != req->ref_entry)) { + start = 0; + end = req->ref_entry; + reverse = true; + goto alloc; + } + } + + /* Copy MCAM entry indices into mbox response entry_list. + * Requester always expects indices in ascending order, so + * so reverse the list if reverse bitmap is used for allocation. + */ + if (!req->contig && rsp->count) { + index = 0; + for (entry = rsp->count - 1; entry >= 0; entry--) { + if (reverse) + rsp->entry_list[index++] = entry_list[entry]; + else + rsp->entry_list[entry] = entry_list[entry]; + } + } + + /* Mark the allocated entries as used and set nixlf mapping */ + for (entry = 0; entry < rsp->count; entry++) { + index = req->contig ? + (rsp->entry + entry) : rsp->entry_list[entry]; + npc_mcam_set_bit(mcam, index); + mcam->entry2pfvf_map[index] = pcifunc; + mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP; + } + + /* Update available free count in mbox response */ + rsp->free_count = mcam->bmap_fcnt; + + mutex_unlock(&mcam->lock); + return 0; +} + +int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu, + struct npc_mcam_alloc_entry_req *req, + struct npc_mcam_alloc_entry_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 pcifunc = req->hdr.pcifunc; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + rsp->entry = NPC_MCAM_ENTRY_INVALID; + rsp->free_count = 0; + + /* Check if ref_entry is within range */ + if (req->priority && req->ref_entry >= mcam->bmap_entries) + return NPC_MCAM_INVALID_REQ; + + /* ref_entry can't be '0' if requested priority is high. + * Can't be last entry if requested priority is low. + */ + if ((!req->ref_entry && req->priority == NPC_MCAM_HIGHER_PRIO) || + ((req->ref_entry == (mcam->bmap_entries - 1)) && + req->priority == NPC_MCAM_LOWER_PRIO)) + return NPC_MCAM_INVALID_REQ; + + /* Since list of allocated indices needs to be sent to requester, + * max number of non-contiguous entries per mbox msg is limited. + */ + if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES) + return NPC_MCAM_INVALID_REQ; + + /* Alloc request from PFFUNC with no NIXLF attached should be denied */ + if (!is_nixlf_attached(rvu, pcifunc)) + return NPC_MCAM_ALLOC_DENIED; + + return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp); +} + +int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu, + struct npc_mcam_free_entry_req *req, + struct msg_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 pcifunc = req->hdr.pcifunc; + int blkaddr, rc = 0; + u16 cntr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + /* Free request from PFFUNC with no NIXLF attached, ignore */ + if (!is_nixlf_attached(rvu, pcifunc)) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + + if (req->all) + goto free_all; + + rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); + if (rc) + goto exit; + + mcam->entry2pfvf_map[req->entry] = 0; + npc_mcam_clear_bit(mcam, req->entry); + npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false); + + /* Update entry2counter mapping */ + cntr = mcam->entry2cntr_map[req->entry]; + if (cntr != NPC_MCAM_INVALID_MAP) + npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, + req->entry, cntr); + + goto exit; + +free_all: + /* Free up all entries allocated to requesting PFFUNC */ + npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc); +exit: + mutex_unlock(&mcam->lock); + return rc; +} + +int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, + struct npc_mcam_write_entry_req *req, + struct msg_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 pcifunc = req->hdr.pcifunc; + int blkaddr, rc; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); + if (rc) + goto exit; + + if (req->set_cntr && + npc_mcam_verify_counter(mcam, pcifunc, req->cntr)) { + rc = NPC_MCAM_INVALID_REQ; + goto exit; + } + + if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX) { + rc = NPC_MCAM_INVALID_REQ; + goto exit; + } + + npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, req->intf, + &req->entry_data, req->enable_entry); + + if (req->set_cntr) + npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, + req->entry, req->cntr); + + rc = 0; +exit: + mutex_unlock(&mcam->lock); + return rc; +} + +int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu, + struct npc_mcam_ena_dis_entry_req *req, + struct msg_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 pcifunc = req->hdr.pcifunc; + int blkaddr, rc; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); + mutex_unlock(&mcam->lock); + if (rc) + return rc; + + npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, true); + + return 0; +} + +int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu, + struct npc_mcam_ena_dis_entry_req *req, + struct msg_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 pcifunc = req->hdr.pcifunc; + int blkaddr, rc; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); + mutex_unlock(&mcam->lock); + if (rc) + return rc; + + npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false); + + return 0; +} + +int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu, + struct npc_mcam_shift_entry_req *req, + struct npc_mcam_shift_entry_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 pcifunc = req->hdr.pcifunc; + u16 old_entry, new_entry; + int blkaddr, rc = 0; + u16 index, cntr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + if (req->shift_count > NPC_MCAM_MAX_SHIFTS) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + for (index = 0; index < req->shift_count; index++) { + old_entry = req->curr_entry[index]; + new_entry = req->new_entry[index]; + + /* Check if both old and new entries are valid and + * does belong to this PFFUNC or not. + */ + rc = npc_mcam_verify_entry(mcam, pcifunc, old_entry); + if (rc) + break; + + rc = npc_mcam_verify_entry(mcam, pcifunc, new_entry); + if (rc) + break; + + /* new_entry should not have a counter mapped */ + if (mcam->entry2cntr_map[new_entry] != NPC_MCAM_INVALID_MAP) { + rc = NPC_MCAM_PERM_DENIED; + break; + } + + /* Disable the new_entry */ + npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, false); + + /* Copy rule from old entry to new entry */ + npc_copy_mcam_entry(rvu, mcam, blkaddr, old_entry, new_entry); + + /* Copy counter mapping, if any */ + cntr = mcam->entry2cntr_map[old_entry]; + if (cntr != NPC_MCAM_INVALID_MAP) { + npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, + old_entry, cntr); + npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, + new_entry, cntr); + } + + /* Enable new_entry and disable old_entry */ + npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, true); + npc_enable_mcam_entry(rvu, mcam, blkaddr, old_entry, false); + } + + /* If shift has failed then report the failed index */ + if (index != req->shift_count) { + rc = NPC_MCAM_PERM_DENIED; + rsp->failed_entry_idx = index; + } + + mutex_unlock(&mcam->lock); + return rc; +} + +int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu, + struct npc_mcam_alloc_counter_req *req, + struct npc_mcam_alloc_counter_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 pcifunc = req->hdr.pcifunc; + u16 max_contig, cntr; + int blkaddr, index; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + /* If the request is from a PFFUNC with no NIXLF attached, ignore */ + if (!is_nixlf_attached(rvu, pcifunc)) + return NPC_MCAM_INVALID_REQ; + + /* Since list of allocated counter IDs needs to be sent to requester, + * max number of non-contiguous counters per mbox msg is limited. + */ + if (!req->contig && req->count > NPC_MAX_NONCONTIG_COUNTERS) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + + /* Check if unused counters are available or not */ + if (!rvu_rsrc_free_count(&mcam->counters)) { + mutex_unlock(&mcam->lock); + return NPC_MCAM_ALLOC_FAILED; + } + + rsp->count = 0; + + if (req->contig) { + /* Allocate requested number of contiguous counters, if + * unsuccessful find max contiguous entries available. + */ + index = npc_mcam_find_zero_area(mcam->counters.bmap, + mcam->counters.max, 0, + req->count, &max_contig); + rsp->count = max_contig; + rsp->cntr = index; + for (cntr = index; cntr < (index + max_contig); cntr++) { + __set_bit(cntr, mcam->counters.bmap); + mcam->cntr2pfvf_map[cntr] = pcifunc; + } + } else { + /* Allocate requested number of non-contiguous counters, + * if unsuccessful allocate as many as possible. + */ + for (cntr = 0; cntr < req->count; cntr++) { + index = rvu_alloc_rsrc(&mcam->counters); + if (index < 0) + break; + rsp->cntr_list[cntr] = index; + rsp->count++; + mcam->cntr2pfvf_map[index] = pcifunc; + } + } + + mutex_unlock(&mcam->lock); + return 0; +} + +int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu, + struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 index, entry = 0; + int blkaddr, err; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); + if (err) { + mutex_unlock(&mcam->lock); + return err; + } + + /* Mark counter as free/unused */ + mcam->cntr2pfvf_map[req->cntr] = NPC_MCAM_INVALID_MAP; + rvu_free_rsrc(&mcam->counters, req->cntr); + + /* Disable all MCAM entry's stats which are using this counter */ + while (entry < mcam->bmap_entries) { + if (!mcam->cntr_refcnt[req->cntr]) + break; + + index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry); + if (index >= mcam->bmap_entries) + break; + entry = index + 1; + if (mcam->entry2cntr_map[index] != req->cntr) + continue; + + npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, + index, req->cntr); + } + + mutex_unlock(&mcam->lock); + return 0; +} + +int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu, + struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 index, entry = 0; + int blkaddr, rc; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + rc = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); + if (rc) + goto exit; + + /* Unmap the MCAM entry and counter */ + if (!req->all) { + rc = npc_mcam_verify_entry(mcam, req->hdr.pcifunc, req->entry); + if (rc) + goto exit; + npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, + req->entry, req->cntr); + goto exit; + } + + /* Disable all MCAM entry's stats which are using this counter */ + while (entry < mcam->bmap_entries) { + if (!mcam->cntr_refcnt[req->cntr]) + break; + + index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry); + if (index >= mcam->bmap_entries) + break; + entry = index + 1; + + if (mcam->entry2cntr_map[index] != req->cntr) + continue; + + npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, + index, req->cntr); + } +exit: + mutex_unlock(&mcam->lock); + return rc; +} + +int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu, + struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + int blkaddr, err; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); + mutex_unlock(&mcam->lock); + if (err) + return err; + + rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr), 0x00); + + return 0; +} + +int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu, + struct npc_mcam_oper_counter_req *req, + struct npc_mcam_oper_counter_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + int blkaddr, err; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); + mutex_unlock(&mcam->lock); + if (err) + return err; + + rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr)); + rsp->stat &= BIT_ULL(48) - 1; + + return 0; +} + +int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, + struct npc_mcam_alloc_and_write_entry_req *req, + struct npc_mcam_alloc_and_write_entry_rsp *rsp) +{ + struct npc_mcam_alloc_counter_req cntr_req; + struct npc_mcam_alloc_counter_rsp cntr_rsp; + struct npc_mcam_alloc_entry_req entry_req; + struct npc_mcam_alloc_entry_rsp entry_rsp; + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 entry = NPC_MCAM_ENTRY_INVALID; + u16 cntr = NPC_MCAM_ENTRY_INVALID; + int blkaddr, rc; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX) + return NPC_MCAM_INVALID_REQ; + + /* Try to allocate a MCAM entry */ + entry_req.hdr.pcifunc = req->hdr.pcifunc; + entry_req.contig = true; + entry_req.priority = req->priority; + entry_req.ref_entry = req->ref_entry; + entry_req.count = 1; + + rc = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, + &entry_req, &entry_rsp); + if (rc) + return rc; + + if (!entry_rsp.count) + return NPC_MCAM_ALLOC_FAILED; + + entry = entry_rsp.entry; + + if (!req->alloc_cntr) + goto write_entry; + + /* Now allocate counter */ + cntr_req.hdr.pcifunc = req->hdr.pcifunc; + cntr_req.contig = true; + cntr_req.count = 1; + + rc = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp); + if (rc) { + /* Free allocated MCAM entry */ + mutex_lock(&mcam->lock); + mcam->entry2pfvf_map[entry] = 0; + npc_mcam_clear_bit(mcam, entry); + mutex_unlock(&mcam->lock); + return rc; + } + + cntr = cntr_rsp.cntr; + +write_entry: + mutex_lock(&mcam->lock); + npc_config_mcam_entry(rvu, mcam, blkaddr, entry, req->intf, + &req->entry_data, req->enable_entry); + + if (req->alloc_cntr) + npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, entry, cntr); + mutex_unlock(&mcam->lock); + + rsp->entry = entry; + rsp->cntr = cntr; + + return 0; +} + +#define GET_KEX_CFG(intf) \ + rvu_read64(rvu, BLKADDR_NPC, NPC_AF_INTFX_KEX_CFG(intf)) + +#define GET_KEX_FLAGS(ld) \ + rvu_read64(rvu, BLKADDR_NPC, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld)) + +#define GET_KEX_LD(intf, lid, lt, ld) \ + rvu_read64(rvu, BLKADDR_NPC, \ + NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld)) + +#define GET_KEX_LDFLAGS(intf, ld, fl) \ + rvu_read64(rvu, BLKADDR_NPC, \ + NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, fl)) + +int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req, + struct npc_get_kex_cfg_rsp *rsp) +{ + int lid, lt, ld, fl; + + rsp->rx_keyx_cfg = GET_KEX_CFG(NIX_INTF_RX); + rsp->tx_keyx_cfg = GET_KEX_CFG(NIX_INTF_TX); + for (lid = 0; lid < NPC_MAX_LID; lid++) { + for (lt = 0; lt < NPC_MAX_LT; lt++) { + for (ld = 0; ld < NPC_MAX_LD; ld++) { + rsp->intf_lid_lt_ld[NIX_INTF_RX][lid][lt][ld] = + GET_KEX_LD(NIX_INTF_RX, lid, lt, ld); + rsp->intf_lid_lt_ld[NIX_INTF_TX][lid][lt][ld] = + GET_KEX_LD(NIX_INTF_TX, lid, lt, ld); + } + } + } + for (ld = 0; ld < NPC_MAX_LD; ld++) + rsp->kex_ld_flags[ld] = GET_KEX_FLAGS(ld); + + for (ld = 0; ld < NPC_MAX_LD; ld++) { + for (fl = 0; fl < NPC_MAX_LFL; fl++) { + rsp->intf_ld_flags[NIX_INTF_RX][ld][fl] = + GET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl); + rsp->intf_ld_flags[NIX_INTF_TX][ld][fl] = + GET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl); + } + } + memcpy(rsp->mkex_pfl_name, rvu->mkex_pfl_name, MKEX_NAME_LEN); + return 0; +} + +int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct npc_mcam *mcam = &rvu->hw->mcam; + int blkaddr, index; + bool enable; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + if (!pfvf->rxvlan) + return 0; + + index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, + NIXLF_UCAST_ENTRY); + pfvf->entry.action = npc_get_mcam_action(rvu, mcam, blkaddr, index); + enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, index); + npc_config_mcam_entry(rvu, mcam, blkaddr, pfvf->rxvlan_index, + NIX_INTF_RX, &pfvf->entry, enable); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c new file mode 100644 index 000000000..9d7c135c7 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/pci.h> + +#include "rvu_struct.h" +#include "common.h" +#include "mbox.h" +#include "rvu.h" + +struct reg_range { + u64 start; + u64 end; +}; + +struct hw_reg_map { + u8 regblk; + u8 num_ranges; + u64 mask; +#define MAX_REG_RANGES 8 + struct reg_range range[MAX_REG_RANGES]; +}; + +static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = { + {NIX_TXSCH_LVL_SMQ, 2, 0xFFFF, {{0x0700, 0x0708}, {0x1400, 0x14C8} } }, + {NIX_TXSCH_LVL_TL4, 3, 0xFFFF, {{0x0B00, 0x0B08}, {0x0B10, 0x0B18}, + {0x1200, 0x12E0} } }, + {NIX_TXSCH_LVL_TL3, 3, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608}, + {0x1610, 0x1618} } }, + {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x1768} } }, + {NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } }, +}; + +bool rvu_check_valid_reg(int regmap, int regblk, u64 reg) +{ + int idx; + struct hw_reg_map *map; + + /* Only 64bit offsets */ + if (reg & 0x07) + return false; + + if (regmap == TXSCHQ_HWREGMAP) { + if (regblk >= NIX_TXSCH_LVL_CNT) + return false; + map = &txsch_reg_map[regblk]; + } else { + return false; + } + + /* Should never happen */ + if (map->regblk != regblk) + return false; + + reg &= map->mask; + + for (idx = 0; idx < map->num_ranges; idx++) { + if (reg >= map->range[idx].start && + reg < map->range[idx].end) + return true; + } + return false; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h new file mode 100644 index 000000000..7ca599b97 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -0,0 +1,528 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef RVU_REG_H +#define RVU_REG_H + +/* Admin function registers */ +#define RVU_AF_MSIXTR_BASE (0x10) +#define RVU_AF_ECO (0x20) +#define RVU_AF_BLK_RST (0x30) +#define RVU_AF_PF_BAR4_ADDR (0x40) +#define RVU_AF_RAS (0x100) +#define RVU_AF_RAS_W1S (0x108) +#define RVU_AF_RAS_ENA_W1S (0x110) +#define RVU_AF_RAS_ENA_W1C (0x118) +#define RVU_AF_GEN_INT (0x120) +#define RVU_AF_GEN_INT_W1S (0x128) +#define RVU_AF_GEN_INT_ENA_W1S (0x130) +#define RVU_AF_GEN_INT_ENA_W1C (0x138) +#define RVU_AF_AFPF_MBOX0 (0x02000) +#define RVU_AF_AFPF_MBOX1 (0x02008) +#define RVU_AF_AFPFX_MBOXX(a, b) (0x2000 | (a) << 4 | (b) << 3) +#define RVU_AF_PFME_STATUS (0x2800) +#define RVU_AF_PFTRPEND (0x2810) +#define RVU_AF_PFTRPEND_W1S (0x2820) +#define RVU_AF_PF_RST (0x2840) +#define RVU_AF_HWVF_RST (0x2850) +#define RVU_AF_PFAF_MBOX_INT (0x2880) +#define RVU_AF_PFAF_MBOX_INT_W1S (0x2888) +#define RVU_AF_PFAF_MBOX_INT_ENA_W1S (0x2890) +#define RVU_AF_PFAF_MBOX_INT_ENA_W1C (0x2898) +#define RVU_AF_PFFLR_INT (0x28a0) +#define RVU_AF_PFFLR_INT_W1S (0x28a8) +#define RVU_AF_PFFLR_INT_ENA_W1S (0x28b0) +#define RVU_AF_PFFLR_INT_ENA_W1C (0x28b8) +#define RVU_AF_PFME_INT (0x28c0) +#define RVU_AF_PFME_INT_W1S (0x28c8) +#define RVU_AF_PFME_INT_ENA_W1S (0x28d0) +#define RVU_AF_PFME_INT_ENA_W1C (0x28d8) + +/* Admin function's privileged PF/VF registers */ +#define RVU_PRIV_CONST (0x8000000) +#define RVU_PRIV_GEN_CFG (0x8000010) +#define RVU_PRIV_CLK_CFG (0x8000020) +#define RVU_PRIV_ACTIVE_PC (0x8000030) +#define RVU_PRIV_PFX_CFG(a) (0x8000100 | (a) << 16) +#define RVU_PRIV_PFX_MSIX_CFG(a) (0x8000110 | (a) << 16) +#define RVU_PRIV_PFX_ID_CFG(a) (0x8000120 | (a) << 16) +#define RVU_PRIV_PFX_INT_CFG(a) (0x8000200 | (a) << 16) +#define RVU_PRIV_PFX_NIX0_CFG (0x8000300) +#define RVU_PRIV_PFX_NPA_CFG (0x8000310) +#define RVU_PRIV_PFX_SSO_CFG (0x8000320) +#define RVU_PRIV_PFX_SSOW_CFG (0x8000330) +#define RVU_PRIV_PFX_TIM_CFG (0x8000340) +#define RVU_PRIV_PFX_CPT0_CFG (0x8000350) +#define RVU_PRIV_BLOCK_TYPEX_REV(a) (0x8000400 | (a) << 3) +#define RVU_PRIV_HWVFX_INT_CFG(a) (0x8001280 | (a) << 16) +#define RVU_PRIV_HWVFX_NIX0_CFG (0x8001300) +#define RVU_PRIV_HWVFX_NPA_CFG (0x8001310) +#define RVU_PRIV_HWVFX_SSO_CFG (0x8001320) +#define RVU_PRIV_HWVFX_SSOW_CFG (0x8001330) +#define RVU_PRIV_HWVFX_TIM_CFG (0x8001340) +#define RVU_PRIV_HWVFX_CPT0_CFG (0x8001350) + +/* RVU PF registers */ +#define RVU_PF_VFX_PFVF_MBOX0 (0x00000) +#define RVU_PF_VFX_PFVF_MBOX1 (0x00008) +#define RVU_PF_VFX_PFVF_MBOXX(a, b) (0x0 | (a) << 12 | (b) << 3) +#define RVU_PF_VF_BAR4_ADDR (0x10) +#define RVU_PF_BLOCK_ADDRX_DISC(a) (0x200 | (a) << 3) +#define RVU_PF_VFME_STATUSX(a) (0x800 | (a) << 3) +#define RVU_PF_VFTRPENDX(a) (0x820 | (a) << 3) +#define RVU_PF_VFTRPEND_W1SX(a) (0x840 | (a) << 3) +#define RVU_PF_VFPF_MBOX_INTX(a) (0x880 | (a) << 3) +#define RVU_PF_VFPF_MBOX_INT_W1SX(a) (0x8A0 | (a) << 3) +#define RVU_PF_VFPF_MBOX_INT_ENA_W1SX(a) (0x8C0 | (a) << 3) +#define RVU_PF_VFPF_MBOX_INT_ENA_W1CX(a) (0x8E0 | (a) << 3) +#define RVU_PF_VFFLR_INTX(a) (0x900 | (a) << 3) +#define RVU_PF_VFFLR_INT_W1SX(a) (0x920 | (a) << 3) +#define RVU_PF_VFFLR_INT_ENA_W1SX(a) (0x940 | (a) << 3) +#define RVU_PF_VFFLR_INT_ENA_W1CX(a) (0x960 | (a) << 3) +#define RVU_PF_VFME_INTX(a) (0x980 | (a) << 3) +#define RVU_PF_VFME_INT_W1SX(a) (0x9A0 | (a) << 3) +#define RVU_PF_VFME_INT_ENA_W1SX(a) (0x9C0 | (a) << 3) +#define RVU_PF_VFME_INT_ENA_W1CX(a) (0x9E0 | (a) << 3) +#define RVU_PF_PFAF_MBOX0 (0xC00) +#define RVU_PF_PFAF_MBOX1 (0xC08) +#define RVU_PF_PFAF_MBOXX(a) (0xC00 | (a) << 3) +#define RVU_PF_INT (0xc20) +#define RVU_PF_INT_W1S (0xc28) +#define RVU_PF_INT_ENA_W1S (0xc30) +#define RVU_PF_INT_ENA_W1C (0xc38) +#define RVU_PF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4) +#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4) +#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3) + +/* RVU VF registers */ +#define RVU_VF_VFPF_MBOX0 (0x00000) +#define RVU_VF_VFPF_MBOX1 (0x00008) + +/* NPA block's admin function registers */ +#define NPA_AF_BLK_RST (0x0000) +#define NPA_AF_CONST (0x0010) +#define NPA_AF_CONST1 (0x0018) +#define NPA_AF_LF_RST (0x0020) +#define NPA_AF_GEN_CFG (0x0030) +#define NPA_AF_NDC_CFG (0x0040) +#define NPA_AF_INP_CTL (0x00D0) +#define NPA_AF_ACTIVE_CYCLES_PC (0x00F0) +#define NPA_AF_AVG_DELAY (0x0100) +#define NPA_AF_GEN_INT (0x0140) +#define NPA_AF_GEN_INT_W1S (0x0148) +#define NPA_AF_GEN_INT_ENA_W1S (0x0150) +#define NPA_AF_GEN_INT_ENA_W1C (0x0158) +#define NPA_AF_RVU_INT (0x0160) +#define NPA_AF_RVU_INT_W1S (0x0168) +#define NPA_AF_RVU_INT_ENA_W1S (0x0170) +#define NPA_AF_RVU_INT_ENA_W1C (0x0178) +#define NPA_AF_ERR_INT (0x0180) +#define NPA_AF_ERR_INT_W1S (0x0188) +#define NPA_AF_ERR_INT_ENA_W1S (0x0190) +#define NPA_AF_ERR_INT_ENA_W1C (0x0198) +#define NPA_AF_RAS (0x01A0) +#define NPA_AF_RAS_W1S (0x01A8) +#define NPA_AF_RAS_ENA_W1S (0x01B0) +#define NPA_AF_RAS_ENA_W1C (0x01B8) +#define NPA_AF_BP_TEST (0x0200) +#define NPA_AF_ECO (0x0300) +#define NPA_AF_AQ_CFG (0x0600) +#define NPA_AF_AQ_BASE (0x0610) +#define NPA_AF_AQ_STATUS (0x0620) +#define NPA_AF_AQ_DOOR (0x0630) +#define NPA_AF_AQ_DONE_WAIT (0x0640) +#define NPA_AF_AQ_DONE (0x0650) +#define NPA_AF_AQ_DONE_ACK (0x0660) +#define NPA_AF_AQ_DONE_INT (0x0680) +#define NPA_AF_AQ_DONE_INT_W1S (0x0688) +#define NPA_AF_AQ_DONE_ENA_W1S (0x0690) +#define NPA_AF_AQ_DONE_ENA_W1C (0x0698) +#define NPA_AF_LFX_AURAS_CFG(a) (0x4000 | (a) << 18) +#define NPA_AF_LFX_LOC_AURAS_BASE(a) (0x4010 | (a) << 18) +#define NPA_AF_LFX_QINTS_CFG(a) (0x4100 | (a) << 18) +#define NPA_AF_LFX_QINTS_BASE(a) (0x4110 | (a) << 18) +#define NPA_PRIV_AF_INT_CFG (0x10000) +#define NPA_PRIV_LFX_CFG (0x10010) +#define NPA_PRIV_LFX_INT_CFG (0x10020) +#define NPA_AF_RVU_LF_CFG_DEBUG (0x10030) + +/* NIX block's admin function registers */ +#define NIX_AF_CFG (0x0000) +#define NIX_AF_STATUS (0x0010) +#define NIX_AF_NDC_CFG (0x0018) +#define NIX_AF_CONST (0x0020) +#define NIX_AF_CONST1 (0x0028) +#define NIX_AF_CONST2 (0x0030) +#define NIX_AF_CONST3 (0x0038) +#define NIX_AF_SQ_CONST (0x0040) +#define NIX_AF_CQ_CONST (0x0048) +#define NIX_AF_RQ_CONST (0x0050) +#define NIX_AF_PSE_CONST (0x0060) +#define NIX_AF_TL1_CONST (0x0070) +#define NIX_AF_TL2_CONST (0x0078) +#define NIX_AF_TL3_CONST (0x0080) +#define NIX_AF_TL4_CONST (0x0088) +#define NIX_AF_MDQ_CONST (0x0090) +#define NIX_AF_MC_MIRROR_CONST (0x0098) +#define NIX_AF_LSO_CFG (0x00A8) +#define NIX_AF_BLK_RST (0x00B0) +#define NIX_AF_TX_TSTMP_CFG (0x00C0) +#define NIX_AF_RX_CFG (0x00D0) +#define NIX_AF_AVG_DELAY (0x00E0) +#define NIX_AF_CINT_DELAY (0x00F0) +#define NIX_AF_RX_MCAST_BASE (0x0100) +#define NIX_AF_RX_MCAST_CFG (0x0110) +#define NIX_AF_RX_MCAST_BUF_BASE (0x0120) +#define NIX_AF_RX_MCAST_BUF_CFG (0x0130) +#define NIX_AF_RX_MIRROR_BUF_BASE (0x0140) +#define NIX_AF_RX_MIRROR_BUF_CFG (0x0148) +#define NIX_AF_LF_RST (0x0150) +#define NIX_AF_GEN_INT (0x0160) +#define NIX_AF_GEN_INT_W1S (0x0168) +#define NIX_AF_GEN_INT_ENA_W1S (0x0170) +#define NIX_AF_GEN_INT_ENA_W1C (0x0178) +#define NIX_AF_ERR_INT (0x0180) +#define NIX_AF_ERR_INT_W1S (0x0188) +#define NIX_AF_ERR_INT_ENA_W1S (0x0190) +#define NIX_AF_ERR_INT_ENA_W1C (0x0198) +#define NIX_AF_RAS (0x01A0) +#define NIX_AF_RAS_W1S (0x01A8) +#define NIX_AF_RAS_ENA_W1S (0x01B0) +#define NIX_AF_RAS_ENA_W1C (0x01B8) +#define NIX_AF_RVU_INT (0x01C0) +#define NIX_AF_RVU_INT_W1S (0x01C8) +#define NIX_AF_RVU_INT_ENA_W1S (0x01D0) +#define NIX_AF_RVU_INT_ENA_W1C (0x01D8) +#define NIX_AF_TCP_TIMER (0x01E0) +#define NIX_AF_RX_WQE_TAG_CTL (0x01F0) +#define NIX_AF_RX_DEF_OL2 (0x0200) +#define NIX_AF_RX_DEF_OIP4 (0x0210) +#define NIX_AF_RX_DEF_IIP4 (0x0220) +#define NIX_AF_RX_DEF_OIP6 (0x0230) +#define NIX_AF_RX_DEF_IIP6 (0x0240) +#define NIX_AF_RX_DEF_OTCP (0x0250) +#define NIX_AF_RX_DEF_ITCP (0x0260) +#define NIX_AF_RX_DEF_OUDP (0x0270) +#define NIX_AF_RX_DEF_IUDP (0x0280) +#define NIX_AF_RX_DEF_OSCTP (0x0290) +#define NIX_AF_RX_DEF_ISCTP (0x02A0) +#define NIX_AF_RX_DEF_IPSECX (0x02B0) +#define NIX_AF_RX_IPSEC_GEN_CFG (0x0300) +#define NIX_AF_RX_CPTX_INST_ADDR (0x0310) +#define NIX_AF_NDC_TX_SYNC (0x03F0) +#define NIX_AF_AQ_CFG (0x0400) +#define NIX_AF_AQ_BASE (0x0410) +#define NIX_AF_AQ_STATUS (0x0420) +#define NIX_AF_AQ_DOOR (0x0430) +#define NIX_AF_AQ_DONE_WAIT (0x0440) +#define NIX_AF_AQ_DONE (0x0450) +#define NIX_AF_AQ_DONE_ACK (0x0460) +#define NIX_AF_AQ_DONE_TIMER (0x0470) +#define NIX_AF_AQ_DONE_INT (0x0480) +#define NIX_AF_AQ_DONE_INT_W1S (0x0488) +#define NIX_AF_AQ_DONE_ENA_W1S (0x0490) +#define NIX_AF_AQ_DONE_ENA_W1C (0x0498) +#define NIX_AF_RX_LINKX_SLX_SPKT_CNT (0x0500) +#define NIX_AF_RX_LINKX_SLX_SXQE_CNT (0x0510) +#define NIX_AF_RX_MCAST_JOBSX_SW_CNT (0x0520) +#define NIX_AF_RX_MIRROR_JOBSX_SW_CNT (0x0530) +#define NIX_AF_RX_LINKX_CFG(a) (0x0540 | (a) << 16) +#define NIX_AF_RX_SW_SYNC (0x0550) +#define NIX_AF_RX_SW_SYNC_DONE (0x0560) +#define NIX_AF_SEB_ECO (0x0600) +#define NIX_AF_SEB_TEST_BP (0x0610) +#define NIX_AF_NORM_TX_FIFO_STATUS (0x0620) +#define NIX_AF_EXPR_TX_FIFO_STATUS (0x0630) +#define NIX_AF_SDP_TX_FIFO_STATUS (0x0640) +#define NIX_AF_TX_NPC_CAPTURE_CONFIG (0x0660) +#define NIX_AF_TX_NPC_CAPTURE_INFO (0x0670) + +#define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3) +#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16) +#define NIX_AF_SQM_DBG_CTL_STATUS (0x750) +#define NIX_AF_PSE_CHANNEL_LEVEL (0x800) +#define NIX_AF_PSE_SHAPER_CFG (0x810) +#define NIX_AF_TX_EXPR_CREDIT (0x830) +#define NIX_AF_MARK_FORMATX_CTL(a) (0x900 | (a) << 18) +#define NIX_AF_TX_LINKX_NORM_CREDIT(a) (0xA00 | (a) << 16) +#define NIX_AF_TX_LINKX_EXPR_CREDIT(a) (0xA10 | (a) << 16) +#define NIX_AF_TX_LINKX_SW_XOFF(a) (0xA20 | (a) << 16) +#define NIX_AF_TX_LINKX_HW_XOFF(a) (0xA30 | (a) << 16) +#define NIX_AF_SDP_LINK_CREDIT (0xa40) +#define NIX_AF_SDP_SW_XOFFX(a) (0xA60 | (a) << 3) +#define NIX_AF_SDP_HW_XOFFX(a) (0xAC0 | (a) << 3) +#define NIX_AF_TL4X_BP_STATUS(a) (0xB00 | (a) << 16) +#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (a) << 16) +#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16) +#define NIX_AF_TL1X_SHAPE(a) (0xC10 | (a) << 16) +#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16) +#define NIX_AF_TL1X_SHAPE_STATE(a) (0xC50 | (a) << 16) +#define NIX_AF_TL1X_SW_XOFF(a) (0xC70 | (a) << 16) +#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16) +#define NIX_AF_TL1X_GREEN(a) (0xC90 | (a) << 16) +#define NIX_AF_TL1X_YELLOW(a) (0xCA0 | (a) << 16) +#define NIX_AF_TL1X_RED(a) (0xCB0 | (a) << 16) +#define NIX_AF_TL1X_MD_DEBUG0(a) (0xCC0 | (a) << 16) +#define NIX_AF_TL1X_MD_DEBUG1(a) (0xCC8 | (a) << 16) +#define NIX_AF_TL1X_MD_DEBUG2(a) (0xCD0 | (a) << 16) +#define NIX_AF_TL1X_MD_DEBUG3(a) (0xCD8 | (a) << 16) +#define NIX_AF_TL1A_DEBUG (0xce0) +#define NIX_AF_TL1B_DEBUG (0xcf0) +#define NIX_AF_TL1_DEBUG_GREEN (0xd00) +#define NIX_AF_TL1_DEBUG_NODE (0xd10) +#define NIX_AF_TL1X_DROPPED_PACKETS(a) (0xD20 | (a) << 16) +#define NIX_AF_TL1X_DROPPED_BYTES(a) (0xD30 | (a) << 16) +#define NIX_AF_TL1X_RED_PACKETS(a) (0xD40 | (a) << 16) +#define NIX_AF_TL1X_RED_BYTES(a) (0xD50 | (a) << 16) +#define NIX_AF_TL1X_YELLOW_PACKETS(a) (0xD60 | (a) << 16) +#define NIX_AF_TL1X_YELLOW_BYTES(a) (0xD70 | (a) << 16) +#define NIX_AF_TL1X_GREEN_PACKETS(a) (0xD80 | (a) << 16) +#define NIX_AF_TL1X_GREEN_BYTES(a) (0xD90 | (a) << 16) +#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16) +#define NIX_AF_TL2X_SHAPE(a) (0xE10 | (a) << 16) +#define NIX_AF_TL2X_CIR(a) (0xE20 | (a) << 16) +#define NIX_AF_TL2X_PIR(a) (0xE30 | (a) << 16) +#define NIX_AF_TL2X_SCHED_STATE(a) (0xE40 | (a) << 16) +#define NIX_AF_TL2X_SHAPE_STATE(a) (0xE50 | (a) << 16) +#define NIX_AF_TL2X_POINTERS(a) (0xE60 | (a) << 16) +#define NIX_AF_TL2X_SW_XOFF(a) (0xE70 | (a) << 16) +#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (a) << 16) +#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16) +#define NIX_AF_TL2X_GREEN(a) (0xE90 | (a) << 16) +#define NIX_AF_TL2X_YELLOW(a) (0xEA0 | (a) << 16) +#define NIX_AF_TL2X_RED(a) (0xEB0 | (a) << 16) +#define NIX_AF_TL2X_MD_DEBUG0(a) (0xEC0 | (a) << 16) +#define NIX_AF_TL2X_MD_DEBUG1(a) (0xEC8 | (a) << 16) +#define NIX_AF_TL2X_MD_DEBUG2(a) (0xED0 | (a) << 16) +#define NIX_AF_TL2X_MD_DEBUG3(a) (0xED8 | (a) << 16) +#define NIX_AF_TL2A_DEBUG (0xee0) +#define NIX_AF_TL2B_DEBUG (0xef0) +#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16) +#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (a) << 16) +#define NIX_AF_TL3X_CIR(a) (0x1020 | (a) << 16) +#define NIX_AF_TL3X_PIR(a) (0x1030 | (a) << 16) +#define NIX_AF_TL3X_SCHED_STATE(a) (0x1040 | (a) << 16) +#define NIX_AF_TL3X_SHAPE_STATE(a) (0x1050 | (a) << 16) +#define NIX_AF_TL3X_POINTERS(a) (0x1060 | (a) << 16) +#define NIX_AF_TL3X_SW_XOFF(a) (0x1070 | (a) << 16) +#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (a) << 16) +#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16) +#define NIX_AF_TL3X_GREEN(a) (0x1090 | (a) << 16) +#define NIX_AF_TL3X_YELLOW(a) (0x10A0 | (a) << 16) +#define NIX_AF_TL3X_RED(a) (0x10B0 | (a) << 16) +#define NIX_AF_TL3X_MD_DEBUG0(a) (0x10C0 | (a) << 16) +#define NIX_AF_TL3X_MD_DEBUG1(a) (0x10C8 | (a) << 16) +#define NIX_AF_TL3X_MD_DEBUG2(a) (0x10D0 | (a) << 16) +#define NIX_AF_TL3X_MD_DEBUG3(a) (0x10D8 | (a) << 16) +#define NIX_AF_TL3A_DEBUG (0x10e0) +#define NIX_AF_TL3B_DEBUG (0x10f0) +#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16) +#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (a) << 16) +#define NIX_AF_TL4X_CIR(a) (0x1220 | (a) << 16) +#define NIX_AF_TL4X_PIR(a) (0x1230 | (a) << 16) +#define NIX_AF_TL4X_SCHED_STATE(a) (0x1240 | (a) << 16) +#define NIX_AF_TL4X_SHAPE_STATE(a) (0x1250 | (a) << 16) +#define NIX_AF_TL4X_POINTERS(a) (0x1260 | (a) << 16) +#define NIX_AF_TL4X_SW_XOFF(a) (0x1270 | (a) << 16) +#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (a) << 16) +#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16) +#define NIX_AF_TL4X_GREEN(a) (0x1290 | (a) << 16) +#define NIX_AF_TL4X_YELLOW(a) (0x12A0 | (a) << 16) +#define NIX_AF_TL4X_RED(a) (0x12B0 | (a) << 16) +#define NIX_AF_TL4X_MD_DEBUG0(a) (0x12C0 | (a) << 16) +#define NIX_AF_TL4X_MD_DEBUG1(a) (0x12C8 | (a) << 16) +#define NIX_AF_TL4X_MD_DEBUG2(a) (0x12D0 | (a) << 16) +#define NIX_AF_TL4X_MD_DEBUG3(a) (0x12D8 | (a) << 16) +#define NIX_AF_TL4A_DEBUG (0x12e0) +#define NIX_AF_TL4B_DEBUG (0x12f0) +#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16) +#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (a) << 16) +#define NIX_AF_MDQX_CIR(a) (0x1420 | (a) << 16) +#define NIX_AF_MDQX_PIR(a) (0x1430 | (a) << 16) +#define NIX_AF_MDQX_SCHED_STATE(a) (0x1440 | (a) << 16) +#define NIX_AF_MDQX_SHAPE_STATE(a) (0x1450 | (a) << 16) +#define NIX_AF_MDQX_POINTERS(a) (0x1460 | (a) << 16) +#define NIX_AF_MDQX_SW_XOFF(a) (0x1470 | (a) << 16) +#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16) +#define NIX_AF_MDQX_MD_DEBUG(a) (0x14C0 | (a) << 16) +#define NIX_AF_MDQX_PTR_FIFO(a) (0x14D0 | (a) << 16) +#define NIX_AF_MDQA_DEBUG (0x14e0) +#define NIX_AF_MDQB_DEBUG (0x14f0) +#define NIX_AF_TL3_TL2X_CFG(a) (0x1600 | (a) << 18) +#define NIX_AF_TL3_TL2X_BP_STATUS(a) (0x1610 | (a) << 16) +#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3) +#define NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(a, b) (0x1800 | (a) << 18 | (b) << 3) +#define NIX_AF_TX_MCASTX(a) (0x1900 | (a) << 15) +#define NIX_AF_TX_VTAG_DEFX_CTL(a) (0x1A00 | (a) << 16) +#define NIX_AF_TX_VTAG_DEFX_DATA(a) (0x1A10 | (a) << 16) +#define NIX_AF_RX_BPIDX_STATUS(a) (0x1A20 | (a) << 17) +#define NIX_AF_RX_CHANX_CFG(a) (0x1A30 | (a) << 15) +#define NIX_AF_CINT_TIMERX(a) (0x1A40 | (a) << 18) +#define NIX_AF_LSO_FORMATX_FIELDX(a, b) (0x1B00 | (a) << 16 | (b) << 3) +#define NIX_AF_LFX_CFG(a) (0x4000 | (a) << 17) +#define NIX_AF_LFX_SQS_CFG(a) (0x4020 | (a) << 17) +#define NIX_AF_LFX_TX_CFG2(a) (0x4028 | (a) << 17) +#define NIX_AF_LFX_SQS_BASE(a) (0x4030 | (a) << 17) +#define NIX_AF_LFX_RQS_CFG(a) (0x4040 | (a) << 17) +#define NIX_AF_LFX_RQS_BASE(a) (0x4050 | (a) << 17) +#define NIX_AF_LFX_CQS_CFG(a) (0x4060 | (a) << 17) +#define NIX_AF_LFX_CQS_BASE(a) (0x4070 | (a) << 17) +#define NIX_AF_LFX_TX_CFG(a) (0x4080 | (a) << 17) +#define NIX_AF_LFX_TX_PARSE_CFG(a) (0x4090 | (a) << 17) +#define NIX_AF_LFX_RX_CFG(a) (0x40A0 | (a) << 17) +#define NIX_AF_LFX_RSS_CFG(a) (0x40C0 | (a) << 17) +#define NIX_AF_LFX_RSS_BASE(a) (0x40D0 | (a) << 17) +#define NIX_AF_LFX_QINTS_CFG(a) (0x4100 | (a) << 17) +#define NIX_AF_LFX_QINTS_BASE(a) (0x4110 | (a) << 17) +#define NIX_AF_LFX_CINTS_CFG(a) (0x4120 | (a) << 17) +#define NIX_AF_LFX_CINTS_BASE(a) (0x4130 | (a) << 17) +#define NIX_AF_LFX_RX_IPSEC_CFG0(a) (0x4140 | (a) << 17) +#define NIX_AF_LFX_RX_IPSEC_CFG1(a) (0x4148 | (a) << 17) +#define NIX_AF_LFX_RX_IPSEC_DYNO_CFG(a) (0x4150 | (a) << 17) +#define NIX_AF_LFX_RX_IPSEC_DYNO_BASE(a) (0x4158 | (a) << 17) +#define NIX_AF_LFX_RX_IPSEC_SA_BASE(a) (0x4170 | (a) << 17) +#define NIX_AF_LFX_TX_STATUS(a) (0x4180 | (a) << 17) +#define NIX_AF_LFX_RX_VTAG_TYPEX(a, b) (0x4200 | (a) << 17 | (b) << 3) +#define NIX_AF_LFX_LOCKX(a, b) (0x4300 | (a) << 17 | (b) << 3) +#define NIX_AF_LFX_TX_STATX(a, b) (0x4400 | (a) << 17 | (b) << 3) +#define NIX_AF_LFX_RX_STATX(a, b) (0x4500 | (a) << 17 | (b) << 3) +#define NIX_AF_LFX_RSS_GRPX(a, b) (0x4600 | (a) << 17 | (b) << 3) +#define NIX_AF_RX_NPC_MC_RCV (0x4700) +#define NIX_AF_RX_NPC_MC_DROP (0x4710) +#define NIX_AF_RX_NPC_MIRROR_RCV (0x4720) +#define NIX_AF_RX_NPC_MIRROR_DROP (0x4730) +#define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16) + +#define NIX_PRIV_AF_INT_CFG (0x8000000) +#define NIX_PRIV_LFX_CFG (0x8000010) +#define NIX_PRIV_LFX_INT_CFG (0x8000020) +#define NIX_AF_RVU_LF_CFG_DEBUG (0x8000030) + +/* SSO */ +#define SSO_AF_CONST (0x1000) +#define SSO_AF_CONST1 (0x1008) +#define SSO_AF_BLK_RST (0x10f8) +#define SSO_AF_LF_HWGRP_RST (0x10e0) +#define SSO_AF_RVU_LF_CFG_DEBUG (0x3800) +#define SSO_PRIV_LFX_HWGRP_CFG (0x10000) +#define SSO_PRIV_LFX_HWGRP_INT_CFG (0x20000) + +/* SSOW */ +#define SSOW_AF_RVU_LF_HWS_CFG_DEBUG (0x0010) +#define SSOW_AF_LF_HWS_RST (0x0030) +#define SSOW_PRIV_LFX_HWS_CFG (0x1000) +#define SSOW_PRIV_LFX_HWS_INT_CFG (0x2000) + +/* TIM */ +#define TIM_AF_CONST (0x90) +#define TIM_PRIV_LFX_CFG (0x20000) +#define TIM_PRIV_LFX_INT_CFG (0x24000) +#define TIM_AF_RVU_LF_CFG_DEBUG (0x30000) +#define TIM_AF_BLK_RST (0x10) +#define TIM_AF_LF_RST (0x20) + +/* CPT */ +#define CPT_AF_CONSTANTS0 (0x0000) +#define CPT_PRIV_LFX_CFG (0x41000) +#define CPT_PRIV_LFX_INT_CFG (0x43000) +#define CPT_AF_RVU_LF_CFG_DEBUG (0x45000) +#define CPT_AF_LF_RST (0x44000) +#define CPT_AF_BLK_RST (0x46000) + +#define NPC_AF_BLK_RST (0x00040) + +/* NPC */ +#define NPC_AF_CFG (0x00000) +#define NPC_AF_ACTIVE_PC (0x00010) +#define NPC_AF_CONST (0x00020) +#define NPC_AF_CONST1 (0x00030) +#define NPC_AF_BLK_RST (0x00040) +#define NPC_AF_MCAM_SCRUB_CTL (0x000a0) +#define NPC_AF_KCAM_SCRUB_CTL (0x000b0) +#define NPC_AF_KPUX_CFG(a) (0x00500 | (a) << 3) +#define NPC_AF_PCK_CFG (0x00600) +#define NPC_AF_PCK_DEF_OL2 (0x00610) +#define NPC_AF_PCK_DEF_OIP4 (0x00620) +#define NPC_AF_PCK_DEF_OIP6 (0x00630) +#define NPC_AF_PCK_DEF_IIP4 (0x00640) +#define NPC_AF_KEX_LDATAX_FLAGS_CFG(a) (0x00800 | (a) << 3) +#define NPC_AF_INTFX_KEX_CFG(a) (0x01010 | (a) << 8) +#define NPC_AF_PKINDX_ACTION0(a) (0x80000ull | (a) << 6) +#define NPC_AF_PKINDX_ACTION1(a) (0x80008ull | (a) << 6) +#define NPC_AF_PKINDX_CPI_DEFX(a, b) (0x80020ull | (a) << 6 | (b) << 3) +#define NPC_AF_KPUX_ENTRYX_CAMX(a, b, c) \ + (0x100000 | (a) << 14 | (b) << 6 | (c) << 3) +#define NPC_AF_KPUX_ENTRYX_ACTION0(a, b) \ + (0x100020 | (a) << 14 | (b) << 6) +#define NPC_AF_KPUX_ENTRYX_ACTION1(a, b) \ + (0x100028 | (a) << 14 | (b) << 6) +#define NPC_AF_KPUX_ENTRY_DISX(a, b) (0x180000 | (a) << 6 | (b) << 3) +#define NPC_AF_CPIX_CFG(a) (0x200000 | (a) << 3) +#define NPC_AF_INTFX_LIDX_LTX_LDX_CFG(a, b, c, d) \ + (0x900000 | (a) << 16 | (b) << 12 | (c) << 5 | (d) << 3) +#define NPC_AF_INTFX_LDATAX_FLAGSX_CFG(a, b, c) \ + (0x980000 | (a) << 16 | (b) << 12 | (c) << 3) +#define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c) \ + (0x1000000ull | (a) << 10 | (b) << 6 | (c) << 3) +#define NPC_AF_MCAMEX_BANKX_CAMX_W0(a, b, c) \ + (0x1000010ull | (a) << 10 | (b) << 6 | (c) << 3) +#define NPC_AF_MCAMEX_BANKX_CAMX_W1(a, b, c) \ + (0x1000020ull | (a) << 10 | (b) << 6 | (c) << 3) +#define NPC_AF_MCAMEX_BANKX_CFG(a, b) (0x1800000ull | (a) << 8 | (b) << 4) +#define NPC_AF_MCAMEX_BANKX_STAT_ACT(a, b) \ + (0x1880000 | (a) << 8 | (b) << 4) +#define NPC_AF_MATCH_STATX(a) (0x1880008 | (a) << 8) +#define NPC_AF_INTFX_MISS_STAT_ACT(a) (0x1880040 + (a) * 0x8) +#define NPC_AF_MCAMEX_BANKX_ACTION(a, b) (0x1900000ull | (a) << 8 | (b) << 4) +#define NPC_AF_MCAMEX_BANKX_TAG_ACT(a, b) \ + (0x1900008 | (a) << 8 | (b) << 4) +#define NPC_AF_INTFX_MISS_ACT(a) (0x1a00000 | (a) << 4) +#define NPC_AF_INTFX_MISS_TAG_ACT(a) (0x1b00008 | (a) << 4) +#define NPC_AF_MCAM_BANKX_HITX(a, b) (0x1c80000 | (a) << 8 | (b) << 4) +#define NPC_AF_LKUP_CTL (0x2000000) +#define NPC_AF_LKUP_DATAX(a) (0x2000200 | (a) << 4) +#define NPC_AF_LKUP_RESULTX(a) (0x2000400 | (a) << 4) +#define NPC_AF_INTFX_STAT(a) (0x2000800 | (a) << 4) +#define NPC_AF_DBG_CTL (0x3000000) +#define NPC_AF_DBG_STATUS (0x3000010) +#define NPC_AF_KPUX_DBG(a) (0x3000020 | (a) << 8) +#define NPC_AF_IKPU_ERR_CTL (0x3000080) +#define NPC_AF_KPUX_ERR_CTL(a) (0x30000a0 | (a) << 8) +#define NPC_AF_MCAM_DBG (0x3001000) +#define NPC_AF_DBG_DATAX(a) (0x3001400 | (a) << 4) +#define NPC_AF_DBG_RESULTX(a) (0x3001800 | (a) << 4) + +/* NDC */ +#define NDC_AF_CONST (0x00000) +#define NDC_AF_CLK_EN (0x00020) +#define NDC_AF_CTL (0x00030) +#define NDC_AF_BANK_CTL (0x00040) +#define NDC_AF_BANK_CTL_DONE (0x00048) +#define NDC_AF_INTR (0x00058) +#define NDC_AF_INTR_W1S (0x00060) +#define NDC_AF_INTR_ENA_W1S (0x00068) +#define NDC_AF_INTR_ENA_W1C (0x00070) +#define NDC_AF_ACTIVE_PC (0x00078) +#define NDC_AF_BP_TEST_ENABLE (0x001F8) +#define NDC_AF_BP_TEST(a) (0x00200 | (a) << 3) +#define NDC_AF_BLK_RST (0x002F0) +#define NDC_PRIV_AF_INT_CFG (0x002F8) +#define NDC_AF_HASHX(a) (0x00300 | (a) << 3) +#define NDC_AF_PORTX_RTX_RWX_REQ_PC(a, b, c) \ + (0x00C00 | (a) << 5 | (b) << 4 | (c) << 3) +#define NDC_AF_PORTX_RTX_RWX_OSTDN_PC(a, b, c) \ + (0x00D00 | (a) << 5 | (b) << 4 | (c) << 3) +#define NDC_AF_PORTX_RTX_RWX_LAT_PC(a, b, c) \ + (0x00E00 | (a) << 5 | (b) << 4 | (c) << 3) +#define NDC_AF_PORTX_RTX_CANT_ALLOC_PC(a, b) \ + (0x00F00 | (a) << 5 | (b) << 4) +#define NDC_AF_BANKX_HIT_PC(a) (0x01000 | (a) << 3) +#define NDC_AF_BANKX_MISS_PC(a) (0x01100 | (a) << 3) +#endif /* RVU_REG_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h new file mode 100644 index 000000000..a3ecb5de9 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h @@ -0,0 +1,920 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef RVU_STRUCT_H +#define RVU_STRUCT_H + +/* RVU Block revision IDs */ +#define RVU_BLK_RVUM_REVID 0x01 + +/* RVU Block Address Enumeration */ +enum rvu_block_addr_e { + BLKADDR_RVUM = 0x0ULL, + BLKADDR_LMT = 0x1ULL, + BLKADDR_MSIX = 0x2ULL, + BLKADDR_NPA = 0x3ULL, + BLKADDR_NIX0 = 0x4ULL, + BLKADDR_NIX1 = 0x5ULL, + BLKADDR_NPC = 0x6ULL, + BLKADDR_SSO = 0x7ULL, + BLKADDR_SSOW = 0x8ULL, + BLKADDR_TIM = 0x9ULL, + BLKADDR_CPT0 = 0xaULL, + BLKADDR_CPT1 = 0xbULL, + BLKADDR_NDC_NIX0_RX = 0xcULL, + BLKADDR_NDC_NIX0_TX = 0xdULL, + BLKADDR_NDC_NPA0 = 0xeULL, + BLK_COUNT = 0xfULL, +}; + +/* RVU Block Type Enumeration */ +enum rvu_block_type_e { + BLKTYPE_RVUM = 0x0, + BLKTYPE_MSIX = 0x1, + BLKTYPE_LMT = 0x2, + BLKTYPE_NIX = 0x3, + BLKTYPE_NPA = 0x4, + BLKTYPE_NPC = 0x5, + BLKTYPE_SSO = 0x6, + BLKTYPE_SSOW = 0x7, + BLKTYPE_TIM = 0x8, + BLKTYPE_CPT = 0x9, + BLKTYPE_NDC = 0xa, + BLKTYPE_MAX = 0xa, +}; + +/* RVU Admin function Interrupt Vector Enumeration */ +enum rvu_af_int_vec_e { + RVU_AF_INT_VEC_POISON = 0x0, + RVU_AF_INT_VEC_PFFLR = 0x1, + RVU_AF_INT_VEC_PFME = 0x2, + RVU_AF_INT_VEC_GEN = 0x3, + RVU_AF_INT_VEC_MBOX = 0x4, + RVU_AF_INT_VEC_CNT = 0x5, +}; + +/** + * RVU PF Interrupt Vector Enumeration + */ +enum rvu_pf_int_vec_e { + RVU_PF_INT_VEC_VFFLR0 = 0x0, + RVU_PF_INT_VEC_VFFLR1 = 0x1, + RVU_PF_INT_VEC_VFME0 = 0x2, + RVU_PF_INT_VEC_VFME1 = 0x3, + RVU_PF_INT_VEC_VFPF_MBOX0 = 0x4, + RVU_PF_INT_VEC_VFPF_MBOX1 = 0x5, + RVU_PF_INT_VEC_AFPF_MBOX = 0x6, + RVU_PF_INT_VEC_CNT = 0x7, +}; + +/* NPA admin queue completion enumeration */ +enum npa_aq_comp { + NPA_AQ_COMP_NOTDONE = 0x0, + NPA_AQ_COMP_GOOD = 0x1, + NPA_AQ_COMP_SWERR = 0x2, + NPA_AQ_COMP_CTX_POISON = 0x3, + NPA_AQ_COMP_CTX_FAULT = 0x4, + NPA_AQ_COMP_LOCKERR = 0x5, +}; + +/* NPA admin queue context types */ +enum npa_aq_ctype { + NPA_AQ_CTYPE_AURA = 0x0, + NPA_AQ_CTYPE_POOL = 0x1, +}; + +/* NPA admin queue instruction opcodes */ +enum npa_aq_instop { + NPA_AQ_INSTOP_NOP = 0x0, + NPA_AQ_INSTOP_INIT = 0x1, + NPA_AQ_INSTOP_WRITE = 0x2, + NPA_AQ_INSTOP_READ = 0x3, + NPA_AQ_INSTOP_LOCK = 0x4, + NPA_AQ_INSTOP_UNLOCK = 0x5, +}; + +/* NPA admin queue instruction structure */ +struct npa_aq_inst_s { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 doneint : 1; /* W0 */ + u64 reserved_44_62 : 19; + u64 cindex : 20; + u64 reserved_17_23 : 7; + u64 lf : 9; + u64 ctype : 4; + u64 op : 4; +#else + u64 op : 4; + u64 ctype : 4; + u64 lf : 9; + u64 reserved_17_23 : 7; + u64 cindex : 20; + u64 reserved_44_62 : 19; + u64 doneint : 1; +#endif + u64 res_addr; /* W1 */ +}; + +/* NPA admin queue result structure */ +struct npa_aq_res_s { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 reserved_17_63 : 47; /* W0 */ + u64 doneint : 1; + u64 compcode : 8; + u64 ctype : 4; + u64 op : 4; +#else + u64 op : 4; + u64 ctype : 4; + u64 compcode : 8; + u64 doneint : 1; + u64 reserved_17_63 : 47; +#endif + u64 reserved_64_127; /* W1 */ +}; + +struct npa_aura_s { + u64 pool_addr; /* W0 */ +#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */ + u64 avg_level : 8; + u64 reserved_118_119 : 2; + u64 shift : 6; + u64 aura_drop : 8; + u64 reserved_98_103 : 6; + u64 bp_ena : 2; + u64 aura_drop_ena : 1; + u64 pool_drop_ena : 1; + u64 reserved_93 : 1; + u64 avg_con : 9; + u64 pool_way_mask : 16; + u64 pool_caching : 1; + u64 reserved_65 : 2; + u64 ena : 1; +#else + u64 ena : 1; + u64 reserved_65 : 2; + u64 pool_caching : 1; + u64 pool_way_mask : 16; + u64 avg_con : 9; + u64 reserved_93 : 1; + u64 pool_drop_ena : 1; + u64 aura_drop_ena : 1; + u64 bp_ena : 2; + u64 reserved_98_103 : 6; + u64 aura_drop : 8; + u64 shift : 6; + u64 reserved_118_119 : 2; + u64 avg_level : 8; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */ + u64 reserved_189_191 : 3; + u64 nix1_bpid : 9; + u64 reserved_177_179 : 3; + u64 nix0_bpid : 9; + u64 reserved_164_167 : 4; + u64 count : 36; +#else + u64 count : 36; + u64 reserved_164_167 : 4; + u64 nix0_bpid : 9; + u64 reserved_177_179 : 3; + u64 nix1_bpid : 9; + u64 reserved_189_191 : 3; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */ + u64 reserved_252_255 : 4; + u64 fc_hyst_bits : 4; + u64 fc_stype : 2; + u64 fc_up_crossing : 1; + u64 fc_ena : 1; + u64 reserved_240_243 : 4; + u64 bp : 8; + u64 reserved_228_231 : 4; + u64 limit : 36; +#else + u64 limit : 36; + u64 reserved_228_231 : 4; + u64 bp : 8; + u64 reserved_240_243 : 4; + u64 fc_ena : 1; + u64 fc_up_crossing : 1; + u64 fc_stype : 2; + u64 fc_hyst_bits : 4; + u64 reserved_252_255 : 4; +#endif + u64 fc_addr; /* W4 */ +#if defined(__BIG_ENDIAN_BITFIELD) /* W5 */ + u64 reserved_379_383 : 5; + u64 err_qint_idx : 7; + u64 reserved_371 : 1; + u64 thresh_qint_idx : 7; + u64 reserved_363 : 1; + u64 thresh_up : 1; + u64 thresh_int_ena : 1; + u64 thresh_int : 1; + u64 err_int_ena : 8; + u64 err_int : 8; + u64 update_time : 16; + u64 pool_drop : 8; +#else + u64 pool_drop : 8; + u64 update_time : 16; + u64 err_int : 8; + u64 err_int_ena : 8; + u64 thresh_int : 1; + u64 thresh_int_ena : 1; + u64 thresh_up : 1; + u64 reserved_363 : 1; + u64 thresh_qint_idx : 7; + u64 reserved_371 : 1; + u64 err_qint_idx : 7; + u64 reserved_379_383 : 5; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W6 */ + u64 reserved_420_447 : 28; + u64 thresh : 36; +#else + u64 thresh : 36; + u64 reserved_420_447 : 28; +#endif + u64 reserved_448_511; /* W7 */ +}; + +struct npa_pool_s { + u64 stack_base; /* W0 */ +#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */ + u64 reserved_115_127 : 13; + u64 buf_size : 11; + u64 reserved_100_103 : 4; + u64 buf_offset : 12; + u64 stack_way_mask : 16; + u64 reserved_70_71 : 3; + u64 stack_caching : 1; + u64 reserved_66_67 : 2; + u64 nat_align : 1; + u64 ena : 1; +#else + u64 ena : 1; + u64 nat_align : 1; + u64 reserved_66_67 : 2; + u64 stack_caching : 1; + u64 reserved_70_71 : 3; + u64 stack_way_mask : 16; + u64 buf_offset : 12; + u64 reserved_100_103 : 4; + u64 buf_size : 11; + u64 reserved_115_127 : 13; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */ + u64 stack_pages : 32; + u64 stack_max_pages : 32; +#else + u64 stack_max_pages : 32; + u64 stack_pages : 32; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */ + u64 reserved_240_255 : 16; + u64 op_pc : 48; +#else + u64 op_pc : 48; + u64 reserved_240_255 : 16; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W4 */ + u64 reserved_316_319 : 4; + u64 update_time : 16; + u64 reserved_297_299 : 3; + u64 fc_up_crossing : 1; + u64 fc_hyst_bits : 4; + u64 fc_stype : 2; + u64 fc_ena : 1; + u64 avg_con : 9; + u64 avg_level : 8; + u64 reserved_270_271 : 2; + u64 shift : 6; + u64 reserved_260_263 : 4; + u64 stack_offset : 4; +#else + u64 stack_offset : 4; + u64 reserved_260_263 : 4; + u64 shift : 6; + u64 reserved_270_271 : 2; + u64 avg_level : 8; + u64 avg_con : 9; + u64 fc_ena : 1; + u64 fc_stype : 2; + u64 fc_hyst_bits : 4; + u64 fc_up_crossing : 1; + u64 reserved_297_299 : 3; + u64 update_time : 16; + u64 reserved_316_319 : 4; +#endif + u64 fc_addr; /* W5 */ + u64 ptr_start; /* W6 */ + u64 ptr_end; /* W7 */ +#if defined(__BIG_ENDIAN_BITFIELD) /* W8 */ + u64 reserved_571_575 : 5; + u64 err_qint_idx : 7; + u64 reserved_563 : 1; + u64 thresh_qint_idx : 7; + u64 reserved_555 : 1; + u64 thresh_up : 1; + u64 thresh_int_ena : 1; + u64 thresh_int : 1; + u64 err_int_ena : 8; + u64 err_int : 8; + u64 reserved_512_535 : 24; +#else + u64 reserved_512_535 : 24; + u64 err_int : 8; + u64 err_int_ena : 8; + u64 thresh_int : 1; + u64 thresh_int_ena : 1; + u64 thresh_up : 1; + u64 reserved_555 : 1; + u64 thresh_qint_idx : 7; + u64 reserved_563 : 1; + u64 err_qint_idx : 7; + u64 reserved_571_575 : 5; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */ + u64 reserved_612_639 : 28; + u64 thresh : 36; +#else + u64 thresh : 36; + u64 reserved_612_639 : 28; +#endif + u64 reserved_640_703; /* W10 */ + u64 reserved_704_767; /* W11 */ + u64 reserved_768_831; /* W12 */ + u64 reserved_832_895; /* W13 */ + u64 reserved_896_959; /* W14 */ + u64 reserved_960_1023; /* W15 */ +}; + +/* NIX admin queue completion status */ +enum nix_aq_comp { + NIX_AQ_COMP_NOTDONE = 0x0, + NIX_AQ_COMP_GOOD = 0x1, + NIX_AQ_COMP_SWERR = 0x2, + NIX_AQ_COMP_CTX_POISON = 0x3, + NIX_AQ_COMP_CTX_FAULT = 0x4, + NIX_AQ_COMP_LOCKERR = 0x5, + NIX_AQ_COMP_SQB_ALLOC_FAIL = 0x6, +}; + +/* NIX admin queue context types */ +enum nix_aq_ctype { + NIX_AQ_CTYPE_RQ = 0x0, + NIX_AQ_CTYPE_SQ = 0x1, + NIX_AQ_CTYPE_CQ = 0x2, + NIX_AQ_CTYPE_MCE = 0x3, + NIX_AQ_CTYPE_RSS = 0x4, + NIX_AQ_CTYPE_DYNO = 0x5, +}; + +/* NIX admin queue instruction opcodes */ +enum nix_aq_instop { + NIX_AQ_INSTOP_NOP = 0x0, + NIX_AQ_INSTOP_INIT = 0x1, + NIX_AQ_INSTOP_WRITE = 0x2, + NIX_AQ_INSTOP_READ = 0x3, + NIX_AQ_INSTOP_LOCK = 0x4, + NIX_AQ_INSTOP_UNLOCK = 0x5, +}; + +/* NIX admin queue instruction structure */ +struct nix_aq_inst_s { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 doneint : 1; /* W0 */ + u64 reserved_44_62 : 19; + u64 cindex : 20; + u64 reserved_15_23 : 9; + u64 lf : 7; + u64 ctype : 4; + u64 op : 4; +#else + u64 op : 4; + u64 ctype : 4; + u64 lf : 7; + u64 reserved_15_23 : 9; + u64 cindex : 20; + u64 reserved_44_62 : 19; + u64 doneint : 1; +#endif + u64 res_addr; /* W1 */ +}; + +/* NIX admin queue result structure */ +struct nix_aq_res_s { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 reserved_17_63 : 47; /* W0 */ + u64 doneint : 1; + u64 compcode : 8; + u64 ctype : 4; + u64 op : 4; +#else + u64 op : 4; + u64 ctype : 4; + u64 compcode : 8; + u64 doneint : 1; + u64 reserved_17_63 : 47; +#endif + u64 reserved_64_127; /* W1 */ +}; + +/* NIX Completion queue context structure */ +struct nix_cq_ctx_s { + u64 base; +#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */ + u64 wrptr : 20; + u64 avg_con : 9; + u64 cint_idx : 7; + u64 cq_err : 1; + u64 qint_idx : 7; + u64 rsvd_81_83 : 3; + u64 bpid : 9; + u64 rsvd_69_71 : 3; + u64 bp_ena : 1; + u64 rsvd_64_67 : 4; +#else + u64 rsvd_64_67 : 4; + u64 bp_ena : 1; + u64 rsvd_69_71 : 3; + u64 bpid : 9; + u64 rsvd_81_83 : 3; + u64 qint_idx : 7; + u64 cq_err : 1; + u64 cint_idx : 7; + u64 avg_con : 9; + u64 wrptr : 20; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */ + u64 update_time : 16; + u64 avg_level : 8; + u64 head : 20; + u64 tail : 20; +#else + u64 tail : 20; + u64 head : 20; + u64 avg_level : 8; + u64 update_time : 16; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */ + u64 cq_err_int_ena : 8; + u64 cq_err_int : 8; + u64 qsize : 4; + u64 rsvd_233_235 : 3; + u64 caching : 1; + u64 substream : 20; + u64 rsvd_210_211 : 2; + u64 ena : 1; + u64 drop_ena : 1; + u64 drop : 8; + u64 bp : 8; +#else + u64 bp : 8; + u64 drop : 8; + u64 drop_ena : 1; + u64 ena : 1; + u64 rsvd_210_211 : 2; + u64 substream : 20; + u64 caching : 1; + u64 rsvd_233_235 : 3; + u64 qsize : 4; + u64 cq_err_int : 8; + u64 cq_err_int_ena : 8; +#endif +}; + +/* NIX Receive queue context structure */ +struct nix_rq_ctx_s { +#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */ + u64 wqe_aura : 20; + u64 substream : 20; + u64 cq : 20; + u64 ena_wqwd : 1; + u64 ipsech_ena : 1; + u64 sso_ena : 1; + u64 ena : 1; +#else + u64 ena : 1; + u64 sso_ena : 1; + u64 ipsech_ena : 1; + u64 ena_wqwd : 1; + u64 cq : 20; + u64 substream : 20; + u64 wqe_aura : 20; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */ + u64 rsvd_127_122 : 6; + u64 lpb_drop_ena : 1; + u64 spb_drop_ena : 1; + u64 xqe_drop_ena : 1; + u64 wqe_caching : 1; + u64 pb_caching : 2; + u64 sso_tt : 2; + u64 sso_grp : 10; + u64 lpb_aura : 20; + u64 spb_aura : 20; +#else + u64 spb_aura : 20; + u64 lpb_aura : 20; + u64 sso_grp : 10; + u64 sso_tt : 2; + u64 pb_caching : 2; + u64 wqe_caching : 1; + u64 xqe_drop_ena : 1; + u64 spb_drop_ena : 1; + u64 lpb_drop_ena : 1; + u64 rsvd_127_122 : 6; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */ + u64 xqe_hdr_split : 1; + u64 xqe_imm_copy : 1; + u64 rsvd_189_184 : 6; + u64 xqe_imm_size : 6; + u64 later_skip : 6; + u64 rsvd_171 : 1; + u64 first_skip : 7; + u64 lpb_sizem1 : 12; + u64 spb_ena : 1; + u64 rsvd_150_148 : 3; + u64 wqe_skip : 2; + u64 spb_sizem1 : 6; + u64 rsvd_139_128 : 12; +#else + u64 rsvd_139_128 : 12; + u64 spb_sizem1 : 6; + u64 wqe_skip : 2; + u64 rsvd_150_148 : 3; + u64 spb_ena : 1; + u64 lpb_sizem1 : 12; + u64 first_skip : 7; + u64 rsvd_171 : 1; + u64 later_skip : 6; + u64 xqe_imm_size : 6; + u64 rsvd_189_184 : 6; + u64 xqe_imm_copy : 1; + u64 xqe_hdr_split : 1; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */ + u64 spb_pool_pass : 8; + u64 spb_pool_drop : 8; + u64 spb_aura_pass : 8; + u64 spb_aura_drop : 8; + u64 wqe_pool_pass : 8; + u64 wqe_pool_drop : 8; + u64 xqe_pass : 8; + u64 xqe_drop : 8; +#else + u64 xqe_drop : 8; + u64 xqe_pass : 8; + u64 wqe_pool_drop : 8; + u64 wqe_pool_pass : 8; + u64 spb_aura_drop : 8; + u64 spb_aura_pass : 8; + u64 spb_pool_drop : 8; + u64 spb_pool_pass : 8; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W4 */ + u64 rsvd_319_315 : 5; + u64 qint_idx : 7; + u64 rq_int_ena : 8; + u64 rq_int : 8; + u64 rsvd_291_288 : 4; + u64 lpb_pool_pass : 8; + u64 lpb_pool_drop : 8; + u64 lpb_aura_pass : 8; + u64 lpb_aura_drop : 8; +#else + u64 lpb_aura_drop : 8; + u64 lpb_aura_pass : 8; + u64 lpb_pool_drop : 8; + u64 lpb_pool_pass : 8; + u64 rsvd_291_288 : 4; + u64 rq_int : 8; + u64 rq_int_ena : 8; + u64 qint_idx : 7; + u64 rsvd_319_315 : 5; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W5 */ + u64 rsvd_383_366 : 18; + u64 flow_tagw : 6; + u64 bad_utag : 8; + u64 good_utag : 8; + u64 ltag : 24; +#else + u64 ltag : 24; + u64 good_utag : 8; + u64 bad_utag : 8; + u64 flow_tagw : 6; + u64 rsvd_383_366 : 18; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W6 */ + u64 rsvd_447_432 : 16; + u64 octs : 48; +#else + u64 octs : 48; + u64 rsvd_447_432 : 16; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W7 */ + u64 rsvd_511_496 : 16; + u64 pkts : 48; +#else + u64 pkts : 48; + u64 rsvd_511_496 : 16; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W8 */ + u64 rsvd_575_560 : 16; + u64 drop_octs : 48; +#else + u64 drop_octs : 48; + u64 rsvd_575_560 : 16; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */ + u64 rsvd_639_624 : 16; + u64 drop_pkts : 48; +#else + u64 drop_pkts : 48; + u64 rsvd_639_624 : 16; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W10 */ + u64 rsvd_703_688 : 16; + u64 re_pkts : 48; +#else + u64 re_pkts : 48; + u64 rsvd_703_688 : 16; +#endif + u64 rsvd_767_704; /* W11 */ + u64 rsvd_831_768; /* W12 */ + u64 rsvd_895_832; /* W13 */ + u64 rsvd_959_896; /* W14 */ + u64 rsvd_1023_960; /* W15 */ +}; + +/* NIX sqe sizes */ +enum nix_maxsqesz { + NIX_MAXSQESZ_W16 = 0x0, + NIX_MAXSQESZ_W8 = 0x1, +}; + +/* NIX SQB caching type */ +enum nix_stype { + NIX_STYPE_STF = 0x0, + NIX_STYPE_STT = 0x1, + NIX_STYPE_STP = 0x2, +}; + +/* NIX Send queue context structure */ +struct nix_sq_ctx_s { +#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */ + u64 sqe_way_mask : 16; + u64 cq : 20; + u64 sdp_mcast : 1; + u64 substream : 20; + u64 qint_idx : 6; + u64 ena : 1; +#else + u64 ena : 1; + u64 qint_idx : 6; + u64 substream : 20; + u64 sdp_mcast : 1; + u64 cq : 20; + u64 sqe_way_mask : 16; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */ + u64 sqb_count : 16; + u64 default_chan : 12; + u64 smq_rr_quantum : 24; + u64 sso_ena : 1; + u64 xoff : 1; + u64 cq_ena : 1; + u64 smq : 9; +#else + u64 smq : 9; + u64 cq_ena : 1; + u64 xoff : 1; + u64 sso_ena : 1; + u64 smq_rr_quantum : 24; + u64 default_chan : 12; + u64 sqb_count : 16; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */ + u64 rsvd_191 : 1; + u64 sqe_stype : 2; + u64 sq_int_ena : 8; + u64 sq_int : 8; + u64 sqb_aura : 20; + u64 smq_rr_count : 25; +#else + u64 smq_rr_count : 25; + u64 sqb_aura : 20; + u64 sq_int : 8; + u64 sq_int_ena : 8; + u64 sqe_stype : 2; + u64 rsvd_191 : 1; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */ + u64 rsvd_255_253 : 3; + u64 smq_next_sq_vld : 1; + u64 smq_pend : 1; + u64 smenq_next_sqb_vld : 1; + u64 head_offset : 6; + u64 smenq_offset : 6; + u64 tail_offset : 6; + u64 smq_lso_segnum : 8; + u64 smq_next_sq : 20; + u64 mnq_dis : 1; + u64 lmt_dis : 1; + u64 cq_limit : 8; + u64 max_sqe_size : 2; +#else + u64 max_sqe_size : 2; + u64 cq_limit : 8; + u64 lmt_dis : 1; + u64 mnq_dis : 1; + u64 smq_next_sq : 20; + u64 smq_lso_segnum : 8; + u64 tail_offset : 6; + u64 smenq_offset : 6; + u64 head_offset : 6; + u64 smenq_next_sqb_vld : 1; + u64 smq_pend : 1; + u64 smq_next_sq_vld : 1; + u64 rsvd_255_253 : 3; +#endif + u64 next_sqb : 64;/* W4 */ + u64 tail_sqb : 64;/* W5 */ + u64 smenq_sqb : 64;/* W6 */ + u64 smenq_next_sqb : 64;/* W7 */ + u64 head_sqb : 64;/* W8 */ +#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */ + u64 rsvd_639_630 : 10; + u64 vfi_lso_vld : 1; + u64 vfi_lso_vlan1_ins_ena : 1; + u64 vfi_lso_vlan0_ins_ena : 1; + u64 vfi_lso_mps : 14; + u64 vfi_lso_sb : 8; + u64 vfi_lso_sizem1 : 3; + u64 vfi_lso_total : 18; + u64 rsvd_583_576 : 8; +#else + u64 rsvd_583_576 : 8; + u64 vfi_lso_total : 18; + u64 vfi_lso_sizem1 : 3; + u64 vfi_lso_sb : 8; + u64 vfi_lso_mps : 14; + u64 vfi_lso_vlan0_ins_ena : 1; + u64 vfi_lso_vlan1_ins_ena : 1; + u64 vfi_lso_vld : 1; + u64 rsvd_639_630 : 10; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W10 */ + u64 rsvd_703_658 : 46; + u64 scm_lso_rem : 18; +#else + u64 scm_lso_rem : 18; + u64 rsvd_703_658 : 46; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W11 */ + u64 rsvd_767_752 : 16; + u64 octs : 48; +#else + u64 octs : 48; + u64 rsvd_767_752 : 16; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W12 */ + u64 rsvd_831_816 : 16; + u64 pkts : 48; +#else + u64 pkts : 48; + u64 rsvd_831_816 : 16; +#endif + u64 rsvd_895_832 : 64;/* W13 */ +#if defined(__BIG_ENDIAN_BITFIELD) /* W14 */ + u64 rsvd_959_944 : 16; + u64 dropped_octs : 48; +#else + u64 dropped_octs : 48; + u64 rsvd_959_944 : 16; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W15 */ + u64 rsvd_1023_1008 : 16; + u64 dropped_pkts : 48; +#else + u64 dropped_pkts : 48; + u64 rsvd_1023_1008 : 16; +#endif +}; + +/* NIX Receive side scaling entry structure*/ +struct nix_rsse_s { +#if defined(__BIG_ENDIAN_BITFIELD) + uint32_t reserved_20_31 : 12; + uint32_t rq : 20; +#else + uint32_t rq : 20; + uint32_t reserved_20_31 : 12; + +#endif +}; + +/* NIX receive multicast/mirror entry structure */ +struct nix_rx_mce_s { +#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */ + uint64_t next : 16; + uint64_t pf_func : 16; + uint64_t rsvd_31_24 : 8; + uint64_t index : 20; + uint64_t eol : 1; + uint64_t rsvd_2 : 1; + uint64_t op : 2; +#else + uint64_t op : 2; + uint64_t rsvd_2 : 1; + uint64_t eol : 1; + uint64_t index : 20; + uint64_t rsvd_31_24 : 8; + uint64_t pf_func : 16; + uint64_t next : 16; +#endif +}; + +enum nix_lsoalg { + NIX_LSOALG_NOP, + NIX_LSOALG_ADD_SEGNUM, + NIX_LSOALG_ADD_PAYLEN, + NIX_LSOALG_ADD_OFFSET, + NIX_LSOALG_TCP_FLAGS, +}; + +enum nix_txlayer { + NIX_TXLAYER_OL3, + NIX_TXLAYER_OL4, + NIX_TXLAYER_IL3, + NIX_TXLAYER_IL4, +}; + +struct nix_lso_format { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 rsvd_19_63 : 45; + u64 alg : 3; + u64 rsvd_14_15 : 2; + u64 sizem1 : 2; + u64 rsvd_10_11 : 2; + u64 layer : 2; + u64 offset : 8; +#else + u64 offset : 8; + u64 layer : 2; + u64 rsvd_10_11 : 2; + u64 sizem1 : 2; + u64 rsvd_14_15 : 2; + u64 alg : 3; + u64 rsvd_19_63 : 45; +#endif +}; + +struct nix_rx_flowkey_alg { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 reserved_35_63 :29; + u64 ltype_match :4; + u64 ltype_mask :4; + u64 sel_chan :1; + u64 ena :1; + u64 reserved_24_24 :1; + u64 lid :3; + u64 bytesm1 :5; + u64 hdr_offset :8; + u64 fn_mask :1; + u64 ln_mask :1; + u64 key_offset :6; +#else + u64 key_offset :6; + u64 ln_mask :1; + u64 fn_mask :1; + u64 hdr_offset :8; + u64 bytesm1 :5; + u64 lid :3; + u64 reserved_24_24 :1; + u64 ena :1; + u64 sel_chan :1; + u64 ltype_mask :4; + u64 ltype_match :4; + u64 reserved_35_63 :29; +#endif +}; + +/* NIX VTAG size */ +enum nix_vtag_size { + VTAGSIZE_T4 = 0x0, + VTAGSIZE_T8 = 0x1, +}; +#endif /* RVU_STRUCT_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c new file mode 100644 index 000000000..56f90cf9c --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver tracepoints + * + * Copyright (C) 2020 Marvell International Ltd. + */ + +#define CREATE_TRACE_POINTS +#include "rvu_trace.h" + +EXPORT_TRACEPOINT_SYMBOL(otx2_msg_alloc); +EXPORT_TRACEPOINT_SYMBOL(otx2_msg_interrupt); +EXPORT_TRACEPOINT_SYMBOL(otx2_msg_process); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h new file mode 100644 index 000000000..e6609068e --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 RVU Admin Function driver tracepoints + * + * Copyright (C) 2020 Marvell International Ltd. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM rvu + +#if !defined(__RVU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define __RVU_TRACE_H + +#include <linux/types.h> +#include <linux/tracepoint.h> +#include <linux/pci.h> + +TRACE_EVENT(otx2_msg_alloc, + TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size), + TP_ARGS(pdev, id, size), + TP_STRUCT__entry(__string(dev, pci_name(pdev)) + __field(u16, id) + __field(u64, size) + ), + TP_fast_assign(__assign_str(dev, pci_name(pdev)) + __entry->id = id; + __entry->size = size; + ), + TP_printk("[%s] msg:(0x%x) size:%lld\n", __get_str(dev), + __entry->id, __entry->size) +); + +TRACE_EVENT(otx2_msg_send, + TP_PROTO(const struct pci_dev *pdev, u16 num_msgs, u64 msg_size), + TP_ARGS(pdev, num_msgs, msg_size), + TP_STRUCT__entry(__string(dev, pci_name(pdev)) + __field(u16, num_msgs) + __field(u64, msg_size) + ), + TP_fast_assign(__assign_str(dev, pci_name(pdev)) + __entry->num_msgs = num_msgs; + __entry->msg_size = msg_size; + ), + TP_printk("[%s] sent %d msg(s) of size:%lld\n", __get_str(dev), + __entry->num_msgs, __entry->msg_size) +); + +TRACE_EVENT(otx2_msg_check, + TP_PROTO(const struct pci_dev *pdev, u16 reqid, u16 rspid, int rc), + TP_ARGS(pdev, reqid, rspid, rc), + TP_STRUCT__entry(__string(dev, pci_name(pdev)) + __field(u16, reqid) + __field(u16, rspid) + __field(int, rc) + ), + TP_fast_assign(__assign_str(dev, pci_name(pdev)) + __entry->reqid = reqid; + __entry->rspid = rspid; + __entry->rc = rc; + ), + TP_printk("[%s] req->id:0x%x rsp->id:0x%x resp_code:%d\n", + __get_str(dev), __entry->reqid, + __entry->rspid, __entry->rc) +); + +TRACE_EVENT(otx2_msg_interrupt, + TP_PROTO(const struct pci_dev *pdev, const char *msg, u64 intr), + TP_ARGS(pdev, msg, intr), + TP_STRUCT__entry(__string(dev, pci_name(pdev)) + __string(str, msg) + __field(u64, intr) + ), + TP_fast_assign(__assign_str(dev, pci_name(pdev)) + __assign_str(str, msg) + __entry->intr = intr; + ), + TP_printk("[%s] mbox interrupt %s (0x%llx)\n", __get_str(dev), + __get_str(str), __entry->intr) +); + +TRACE_EVENT(otx2_msg_process, + TP_PROTO(const struct pci_dev *pdev, u16 id, int err), + TP_ARGS(pdev, id, err), + TP_STRUCT__entry(__string(dev, pci_name(pdev)) + __field(u16, id) + __field(int, err) + ), + TP_fast_assign(__assign_str(dev, pci_name(pdev)) + __entry->id = id; + __entry->err = err; + ), + TP_printk("[%s] msg:(0x%x) error:%d\n", __get_str(dev), + __entry->id, __entry->err) +); + +#endif /* __RVU_TRACE_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . + +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE rvu_trace + +#include <trace/define_trace.h> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile new file mode 100644 index 000000000..b2c638570 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for Marvell's OcteonTX2 ethernet device drivers +# + +obj-$(CONFIG_OCTEONTX2_PF) += octeontx2_nicpf.o +obj-$(CONFIG_OCTEONTX2_VF) += octeontx2_nicvf.o + +octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ + otx2_ptp.o +octeontx2_nicvf-y := otx2_vf.o + +ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c new file mode 100644 index 000000000..b062ed062 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -0,0 +1,1597 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Ethernet driver + * + * Copyright (C) 2020 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <net/tso.h> + +#include "otx2_reg.h" +#include "otx2_common.h" +#include "otx2_struct.h" + +static void otx2_nix_rq_op_stats(struct queue_stats *stats, + struct otx2_nic *pfvf, int qidx) +{ + u64 incr = (u64)qidx << 32; + u64 *ptr; + + ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS); + stats->bytes = otx2_atomic64_add(incr, ptr); + + ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS); + stats->pkts = otx2_atomic64_add(incr, ptr); +} + +static void otx2_nix_sq_op_stats(struct queue_stats *stats, + struct otx2_nic *pfvf, int qidx) +{ + u64 incr = (u64)qidx << 32; + u64 *ptr; + + ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS); + stats->bytes = otx2_atomic64_add(incr, ptr); + + ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS); + stats->pkts = otx2_atomic64_add(incr, ptr); +} + +void otx2_update_lmac_stats(struct otx2_nic *pfvf) +{ + struct msg_req *req; + + if (!netif_running(pfvf->netdev)) + return; + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_cgx_stats(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return; + } + + otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); +} + +int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx) +{ + struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx]; + + if (!pfvf->qset.rq) + return 0; + + otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx); + return 1; +} + +int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx) +{ + struct otx2_snd_queue *sq = &pfvf->qset.sq[qidx]; + + if (!pfvf->qset.sq) + return 0; + + otx2_nix_sq_op_stats(&sq->stats, pfvf, qidx); + return 1; +} + +void otx2_get_dev_stats(struct otx2_nic *pfvf) +{ + struct otx2_dev_stats *dev_stats = &pfvf->hw.dev_stats; + +#define OTX2_GET_RX_STATS(reg) \ + otx2_read64(pfvf, NIX_LF_RX_STATX(reg)) +#define OTX2_GET_TX_STATS(reg) \ + otx2_read64(pfvf, NIX_LF_TX_STATX(reg)) + + dev_stats->rx_bytes = OTX2_GET_RX_STATS(RX_OCTS); + dev_stats->rx_drops = OTX2_GET_RX_STATS(RX_DROP); + dev_stats->rx_bcast_frames = OTX2_GET_RX_STATS(RX_BCAST); + dev_stats->rx_mcast_frames = OTX2_GET_RX_STATS(RX_MCAST); + dev_stats->rx_ucast_frames = OTX2_GET_RX_STATS(RX_UCAST); + dev_stats->rx_frames = dev_stats->rx_bcast_frames + + dev_stats->rx_mcast_frames + + dev_stats->rx_ucast_frames; + + dev_stats->tx_bytes = OTX2_GET_TX_STATS(TX_OCTS); + dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP); + dev_stats->tx_bcast_frames = OTX2_GET_TX_STATS(TX_BCAST); + dev_stats->tx_mcast_frames = OTX2_GET_TX_STATS(TX_MCAST); + dev_stats->tx_ucast_frames = OTX2_GET_TX_STATS(TX_UCAST); + dev_stats->tx_frames = dev_stats->tx_bcast_frames + + dev_stats->tx_mcast_frames + + dev_stats->tx_ucast_frames; +} + +void otx2_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + struct otx2_dev_stats *dev_stats; + + otx2_get_dev_stats(pfvf); + + dev_stats = &pfvf->hw.dev_stats; + stats->rx_bytes = dev_stats->rx_bytes; + stats->rx_packets = dev_stats->rx_frames; + stats->rx_dropped = dev_stats->rx_drops; + stats->multicast = dev_stats->rx_mcast_frames; + + stats->tx_bytes = dev_stats->tx_bytes; + stats->tx_packets = dev_stats->tx_frames; + stats->tx_dropped = dev_stats->tx_drops; +} +EXPORT_SYMBOL(otx2_get_stats64); + +/* Sync MAC address with RVU AF */ +static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac) +{ + struct nix_set_mac_addr *req; + int err; + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_nix_set_mac_addr(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + ether_addr_copy(req->mac_addr, mac); + + err = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); + return err; +} + +static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf, + struct net_device *netdev) +{ + struct nix_get_mac_addr_rsp *rsp; + struct mbox_msghdr *msghdr; + struct msg_req *req; + int err; + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_nix_get_mac_addr(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) { + mutex_unlock(&pfvf->mbox.lock); + return err; + } + + msghdr = otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(msghdr)) { + mutex_unlock(&pfvf->mbox.lock); + return PTR_ERR(msghdr); + } + rsp = (struct nix_get_mac_addr_rsp *)msghdr; + ether_addr_copy(netdev->dev_addr, rsp->mac_addr); + mutex_unlock(&pfvf->mbox.lock); + + return 0; +} + +int otx2_set_mac_address(struct net_device *netdev, void *p) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + else + return -EPERM; + + return 0; +} +EXPORT_SYMBOL(otx2_set_mac_address); + +int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu) +{ + struct nix_frs_cfg *req; + int err; + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + pfvf->max_frs = mtu + OTX2_ETH_HLEN; + req->maxlen = pfvf->max_frs; + + err = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); + return err; +} + +int otx2_config_pause_frm(struct otx2_nic *pfvf) +{ + struct cgx_pause_frm_cfg *req; + int err; + + if (is_otx2_lbkvf(pfvf->pdev)) + return 0; + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox); + if (!req) { + err = -ENOMEM; + goto unlock; + } + + req->rx_pause = !!(pfvf->flags & OTX2_FLAG_RX_PAUSE_ENABLED); + req->tx_pause = !!(pfvf->flags & OTX2_FLAG_TX_PAUSE_ENABLED); + req->set = 1; + + err = otx2_sync_mbox_msg(&pfvf->mbox); +unlock: + mutex_unlock(&pfvf->mbox.lock); + return err; +} + +int otx2_set_flowkey_cfg(struct otx2_nic *pfvf) +{ + struct otx2_rss_info *rss = &pfvf->hw.rss_info; + struct nix_rss_flowkey_cfg *req; + int err; + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_nix_rss_flowkey_cfg(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + req->mcam_index = -1; /* Default or reserved index */ + req->flowkey_cfg = rss->flowkey_cfg; + req->group = DEFAULT_RSS_CONTEXT_GROUP; + + err = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); + return err; +} + +int otx2_set_rss_table(struct otx2_nic *pfvf) +{ + struct otx2_rss_info *rss = &pfvf->hw.rss_info; + struct mbox *mbox = &pfvf->mbox; + struct nix_aq_enq_req *aq; + int idx, err; + + mutex_lock(&mbox->lock); + /* Get memory to put this msg */ + for (idx = 0; idx < rss->rss_size; idx++) { + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + if (!aq) { + /* The shared memory buffer can be full. + * Flush it and retry + */ + err = otx2_sync_mbox_msg(mbox); + if (err) { + mutex_unlock(&mbox->lock); + return err; + } + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + if (!aq) { + mutex_unlock(&mbox->lock); + return -ENOMEM; + } + } + + aq->rss.rq = rss->ind_tbl[idx]; + + /* Fill AQ info */ + aq->qidx = idx; + aq->ctype = NIX_AQ_CTYPE_RSS; + aq->op = NIX_AQ_INSTOP_INIT; + } + err = otx2_sync_mbox_msg(mbox); + mutex_unlock(&mbox->lock); + return err; +} + +void otx2_set_rss_key(struct otx2_nic *pfvf) +{ + struct otx2_rss_info *rss = &pfvf->hw.rss_info; + u64 *key = (u64 *)&rss->key[4]; + int idx; + + /* 352bit or 44byte key needs to be configured as below + * NIX_LF_RX_SECRETX0 = key<351:288> + * NIX_LF_RX_SECRETX1 = key<287:224> + * NIX_LF_RX_SECRETX2 = key<223:160> + * NIX_LF_RX_SECRETX3 = key<159:96> + * NIX_LF_RX_SECRETX4 = key<95:32> + * NIX_LF_RX_SECRETX5<63:32> = key<31:0> + */ + otx2_write64(pfvf, NIX_LF_RX_SECRETX(5), + (u64)(*((u32 *)&rss->key)) << 32); + idx = sizeof(rss->key) / sizeof(u64); + while (idx > 0) { + idx--; + otx2_write64(pfvf, NIX_LF_RX_SECRETX(idx), *key++); + } +} + +int otx2_rss_init(struct otx2_nic *pfvf) +{ + struct otx2_rss_info *rss = &pfvf->hw.rss_info; + int idx, ret = 0; + + rss->rss_size = sizeof(rss->ind_tbl); + + /* Init RSS key if it is not setup already */ + if (!rss->enable) + netdev_rss_key_fill(rss->key, sizeof(rss->key)); + otx2_set_rss_key(pfvf); + + if (!netif_is_rxfh_configured(pfvf->netdev)) { + /* Default indirection table */ + for (idx = 0; idx < rss->rss_size; idx++) + rss->ind_tbl[idx] = + ethtool_rxfh_indir_default(idx, + pfvf->hw.rx_queues); + } + ret = otx2_set_rss_table(pfvf); + if (ret) + return ret; + + /* Flowkey or hash config to be used for generating flow tag */ + rss->flowkey_cfg = rss->enable ? rss->flowkey_cfg : + NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6 | + NIX_FLOW_KEY_TYPE_TCP | NIX_FLOW_KEY_TYPE_UDP | + NIX_FLOW_KEY_TYPE_SCTP | NIX_FLOW_KEY_TYPE_VLAN; + + ret = otx2_set_flowkey_cfg(pfvf); + if (ret) + return ret; + + rss->enable = true; + return 0; +} + +/* Setup UDP segmentation algorithm in HW */ +static void otx2_setup_udp_segmentation(struct nix_lso_format_cfg *lso, bool v4) +{ + struct nix_lso_format *field; + + field = (struct nix_lso_format *)&lso->fields[0]; + lso->field_mask = GENMASK(18, 0); + + /* IP's Length field */ + field->layer = NIX_TXLAYER_OL3; + /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */ + field->offset = v4 ? 2 : 4; + field->sizem1 = 1; /* i.e 2 bytes */ + field->alg = NIX_LSOALG_ADD_PAYLEN; + field++; + + /* No ID field in IPv6 header */ + if (v4) { + /* Increment IPID */ + field->layer = NIX_TXLAYER_OL3; + field->offset = 4; + field->sizem1 = 1; /* i.e 2 bytes */ + field->alg = NIX_LSOALG_ADD_SEGNUM; + field++; + } + + /* Update length in UDP header */ + field->layer = NIX_TXLAYER_OL4; + field->offset = 4; + field->sizem1 = 1; + field->alg = NIX_LSOALG_ADD_PAYLEN; +} + +/* Setup segmentation algorithms in HW and retrieve algorithm index */ +void otx2_setup_segmentation(struct otx2_nic *pfvf) +{ + struct nix_lso_format_cfg_rsp *rsp; + struct nix_lso_format_cfg *lso; + struct otx2_hw *hw = &pfvf->hw; + int err; + + mutex_lock(&pfvf->mbox.lock); + + /* UDPv4 segmentation */ + lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox); + if (!lso) + goto fail; + + /* Setup UDP/IP header fields that HW should update per segment */ + otx2_setup_udp_segmentation(lso, true); + + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) + goto fail; + + rsp = (struct nix_lso_format_cfg_rsp *) + otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr); + if (IS_ERR(rsp)) + goto fail; + + hw->lso_udpv4_idx = rsp->lso_format_idx; + + /* UDPv6 segmentation */ + lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox); + if (!lso) + goto fail; + + /* Setup UDP/IP header fields that HW should update per segment */ + otx2_setup_udp_segmentation(lso, false); + + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) + goto fail; + + rsp = (struct nix_lso_format_cfg_rsp *) + otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr); + if (IS_ERR(rsp)) + goto fail; + + hw->lso_udpv6_idx = rsp->lso_format_idx; + mutex_unlock(&pfvf->mbox.lock); + return; +fail: + mutex_unlock(&pfvf->mbox.lock); + netdev_info(pfvf->netdev, + "Failed to get LSO index for UDP GSO offload, disabling\n"); + pfvf->netdev->hw_features &= ~NETIF_F_GSO_UDP_L4; +} + +void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx) +{ + /* Configure CQE interrupt coalescing parameters + * + * HW triggers an irq when ECOUNT > cq_ecount_wait, hence + * set 1 less than cq_ecount_wait. And cq_time_wait is in + * usecs, convert that to 100ns count. + */ + otx2_write64(pfvf, NIX_LF_CINTX_WAIT(qidx), + ((u64)(pfvf->hw.cq_time_wait * 10) << 48) | + ((u64)pfvf->hw.cq_qcount_wait << 32) | + (pfvf->hw.cq_ecount_wait - 1)); +} + +dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool) +{ + dma_addr_t iova; + u8 *buf; + + buf = napi_alloc_frag(pool->rbsize + OTX2_ALIGN); + if (unlikely(!buf)) + return -ENOMEM; + + buf = PTR_ALIGN(buf, OTX2_ALIGN); + iova = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize, + DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); + if (unlikely(dma_mapping_error(pfvf->dev, iova))) { + page_frag_free(buf); + return -ENOMEM; + } + + return iova; +} + +static dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool) +{ + dma_addr_t addr; + + local_bh_disable(); + addr = __otx2_alloc_rbuf(pfvf, pool); + local_bh_enable(); + return addr; +} + +void otx2_tx_timeout(struct net_device *netdev, unsigned int txq) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + + schedule_work(&pfvf->reset_task); +} +EXPORT_SYMBOL(otx2_tx_timeout); + +void otx2_get_mac_from_af(struct net_device *netdev) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + int err; + + err = otx2_hw_get_mac_addr(pfvf, netdev); + if (err) + dev_warn(pfvf->dev, "Failed to read mac from hardware\n"); + + /* If AF doesn't provide a valid MAC, generate a random one */ + if (!is_valid_ether_addr(netdev->dev_addr)) + eth_hw_addr_random(netdev); +} +EXPORT_SYMBOL(otx2_get_mac_from_af); + +static int otx2_get_link(struct otx2_nic *pfvf) +{ + int link = 0; + u16 map; + + /* cgx lmac link */ + if (pfvf->hw.tx_chan_base >= CGX_CHAN_BASE) { + map = pfvf->hw.tx_chan_base & 0x7FF; + link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF); + } + /* LBK channel */ + if (pfvf->hw.tx_chan_base < SDP_CHAN_BASE) + link = 12; + + return link; +} + +int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) +{ + struct otx2_hw *hw = &pfvf->hw; + struct nix_txschq_config *req; + u64 schq, parent; + + req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox); + if (!req) + return -ENOMEM; + + req->lvl = lvl; + req->num_regs = 1; + + schq = hw->txschq_list[lvl][0]; + /* Set topology e.t.c configuration */ + if (lvl == NIX_TXSCH_LVL_SMQ) { + req->reg[0] = NIX_AF_SMQX_CFG(schq); + req->regval[0] = ((OTX2_MAX_MTU + OTX2_ETH_HLEN) << 8) | + OTX2_MIN_MTU; + + req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) | + (0x2ULL << 36); + req->num_regs++; + /* MDQ config */ + parent = hw->txschq_list[NIX_TXSCH_LVL_TL4][0]; + req->reg[1] = NIX_AF_MDQX_PARENT(schq); + req->regval[1] = parent << 16; + req->num_regs++; + /* Set DWRR quantum */ + req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq); + req->regval[2] = DFLT_RR_QTM; + } else if (lvl == NIX_TXSCH_LVL_TL4) { + parent = hw->txschq_list[NIX_TXSCH_LVL_TL3][0]; + req->reg[0] = NIX_AF_TL4X_PARENT(schq); + req->regval[0] = parent << 16; + req->num_regs++; + req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq); + req->regval[1] = DFLT_RR_QTM; + } else if (lvl == NIX_TXSCH_LVL_TL3) { + parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0]; + req->reg[0] = NIX_AF_TL3X_PARENT(schq); + req->regval[0] = parent << 16; + req->num_regs++; + req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq); + req->regval[1] = DFLT_RR_QTM; + } else if (lvl == NIX_TXSCH_LVL_TL2) { + parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0]; + req->reg[0] = NIX_AF_TL2X_PARENT(schq); + req->regval[0] = parent << 16; + + req->num_regs++; + req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq); + req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | DFLT_RR_QTM; + + req->num_regs++; + req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, + otx2_get_link(pfvf)); + /* Enable this queue and backpressure */ + req->regval[2] = BIT_ULL(13) | BIT_ULL(12); + + } else if (lvl == NIX_TXSCH_LVL_TL1) { + /* Default config for TL1. + * For VF this is always ignored. + */ + + /* Set DWRR quantum */ + req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq); + req->regval[0] = TXSCH_TL1_DFLT_RR_QTM; + + req->num_regs++; + req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq); + req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1); + + req->num_regs++; + req->reg[2] = NIX_AF_TL1X_CIR(schq); + req->regval[2] = 0; + } + + return otx2_sync_mbox_msg(&pfvf->mbox); +} + +int otx2_txsch_alloc(struct otx2_nic *pfvf) +{ + struct nix_txsch_alloc_req *req; + int lvl; + + /* Get memory to put this msg */ + req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox); + if (!req) + return -ENOMEM; + + /* Request one schq per level */ + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) + req->schq[lvl] = 1; + + return otx2_sync_mbox_msg(&pfvf->mbox); +} + +int otx2_txschq_stop(struct otx2_nic *pfvf) +{ + struct nix_txsch_free_req *free_req; + int lvl, schq, err; + + mutex_lock(&pfvf->mbox.lock); + /* Free the transmit schedulers */ + free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox); + if (!free_req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + free_req->flags = TXSCHQ_FREE_ALL; + err = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); + + /* Clear the txschq list */ + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++) + pfvf->hw.txschq_list[lvl][schq] = 0; + } + return err; +} + +void otx2_sqb_flush(struct otx2_nic *pfvf) +{ + int qidx, sqe_tail, sqe_head; + u64 incr, *ptr, val; + int timeout = 1000; + + ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); + for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { + incr = (u64)qidx << 32; + while (timeout) { + val = otx2_atomic64_add(incr, ptr); + sqe_head = (val >> 20) & 0x3F; + sqe_tail = (val >> 28) & 0x3F; + if (sqe_head == sqe_tail) + break; + usleep_range(1, 3); + timeout--; + } + } +} + +/* RED and drop levels of CQ on packet reception. + * For CQ level is measure of emptiness ( 0x0 = full, 255 = empty). + */ +#define RQ_PASS_LVL_CQ(skid, qsize) ((((skid) + 16) * 256) / (qsize)) +#define RQ_DROP_LVL_CQ(skid, qsize) (((skid) * 256) / (qsize)) + +/* RED and drop levels of AURA for packet reception. + * For AURA level is measure of fullness (0x0 = empty, 255 = full). + * Eg: For RQ length 1K, for pass/drop level 204/230. + * RED accepts pkts if free pointers > 102 & <= 205. + * Drops pkts if free pointers < 102. + */ +#define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100)) /* BP when 85% is full */ +#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */ +#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */ + +/* Send skid of 2000 packets required for CQ size of 4K CQEs. */ +#define SEND_CQ_SKID 2000 + +static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura) +{ + struct otx2_qset *qset = &pfvf->qset; + struct nix_aq_enq_req *aq; + + /* Get memory to put this msg */ + aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); + if (!aq) + return -ENOMEM; + + aq->rq.cq = qidx; + aq->rq.ena = 1; + aq->rq.pb_caching = 1; + aq->rq.lpb_aura = lpb_aura; /* Use large packet buffer aura */ + aq->rq.lpb_sizem1 = (DMA_BUFFER_LEN(pfvf->rbsize) / 8) - 1; + aq->rq.xqe_imm_size = 0; /* Copying of packet to CQE not needed */ + aq->rq.flow_tagw = 32; /* Copy full 32bit flow_tag to CQE header */ + aq->rq.qint_idx = 0; + aq->rq.lpb_drop_ena = 1; /* Enable RED dropping for AURA */ + aq->rq.xqe_drop_ena = 1; /* Enable RED dropping for CQ/SSO */ + aq->rq.xqe_pass = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); + aq->rq.xqe_drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); + aq->rq.lpb_aura_pass = RQ_PASS_LVL_AURA; + aq->rq.lpb_aura_drop = RQ_DROP_LVL_AURA; + + /* Fill AQ info */ + aq->qidx = qidx; + aq->ctype = NIX_AQ_CTYPE_RQ; + aq->op = NIX_AQ_INSTOP_INIT; + + return otx2_sync_mbox_msg(&pfvf->mbox); +} + +static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) +{ + struct otx2_qset *qset = &pfvf->qset; + struct otx2_snd_queue *sq; + struct nix_aq_enq_req *aq; + struct otx2_pool *pool; + int err; + + pool = &pfvf->qset.pool[sqb_aura]; + sq = &qset->sq[qidx]; + sq->sqe_size = NIX_SQESZ_W16 ? 64 : 128; + sq->sqe_cnt = qset->sqe_cnt; + + err = qmem_alloc(pfvf->dev, &sq->sqe, 1, sq->sqe_size); + if (err) + return err; + + err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt, + TSO_HEADER_SIZE); + if (err) + return err; + + sq->sqe_base = sq->sqe->base; + sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL); + if (!sq->sg) + return -ENOMEM; + + if (pfvf->ptp) { + err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt, + sizeof(*sq->timestamps)); + if (err) + return err; + } + + sq->head = 0; + sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1; + sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb; + /* Set SQE threshold to 10% of total SQEs */ + sq->sqe_thresh = ((sq->num_sqbs * sq->sqe_per_sqb) * 10) / 100; + sq->aura_id = sqb_aura; + sq->aura_fc_addr = pool->fc_addr->base; + sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx)); + sq->io_addr = (__force u64)otx2_get_regaddr(pfvf, NIX_LF_OP_SENDX(0)); + + sq->stats.bytes = 0; + sq->stats.pkts = 0; + + /* Get memory to put this msg */ + aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); + if (!aq) + return -ENOMEM; + + aq->sq.cq = pfvf->hw.rx_queues + qidx; + aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */ + aq->sq.cq_ena = 1; + aq->sq.ena = 1; + /* Only one SMQ is allocated, map all SQ's to that SMQ */ + aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0]; + aq->sq.smq_rr_quantum = DFLT_RR_QTM; + aq->sq.default_chan = pfvf->hw.tx_chan_base; + aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ + aq->sq.sqb_aura = sqb_aura; + aq->sq.sq_int_ena = NIX_SQINT_BITS; + aq->sq.qint_idx = 0; + /* Due pipelining impact minimum 2000 unused SQ CQE's + * need to maintain to avoid CQ overflow. + */ + aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (sq->sqe_cnt)); + + /* Fill AQ info */ + aq->qidx = qidx; + aq->ctype = NIX_AQ_CTYPE_SQ; + aq->op = NIX_AQ_INSTOP_INIT; + + return otx2_sync_mbox_msg(&pfvf->mbox); +} + +static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) +{ + struct otx2_qset *qset = &pfvf->qset; + struct nix_aq_enq_req *aq; + struct otx2_cq_queue *cq; + int err, pool_id; + + cq = &qset->cq[qidx]; + cq->cq_idx = qidx; + if (qidx < pfvf->hw.rx_queues) { + cq->cq_type = CQ_RX; + cq->cint_idx = qidx; + cq->cqe_cnt = qset->rqe_cnt; + } else { + cq->cq_type = CQ_TX; + cq->cint_idx = qidx - pfvf->hw.rx_queues; + cq->cqe_cnt = qset->sqe_cnt; + } + cq->cqe_size = pfvf->qset.xqe_size; + + /* Allocate memory for CQEs */ + err = qmem_alloc(pfvf->dev, &cq->cqe, cq->cqe_cnt, cq->cqe_size); + if (err) + return err; + + /* Save CQE CPU base for faster reference */ + cq->cqe_base = cq->cqe->base; + /* In case where all RQs auras point to single pool, + * all CQs receive buffer pool also point to same pool. + */ + pool_id = ((cq->cq_type == CQ_RX) && + (pfvf->hw.rqpool_cnt != pfvf->hw.rx_queues)) ? 0 : qidx; + cq->rbpool = &qset->pool[pool_id]; + cq->refill_task_sched = false; + + /* Get memory to put this msg */ + aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); + if (!aq) + return -ENOMEM; + + aq->cq.ena = 1; + aq->cq.qsize = Q_SIZE(cq->cqe_cnt, 4); + aq->cq.caching = 1; + aq->cq.base = cq->cqe->iova; + aq->cq.cint_idx = cq->cint_idx; + aq->cq.cq_err_int_ena = NIX_CQERRINT_BITS; + aq->cq.qint_idx = 0; + aq->cq.avg_level = 255; + + if (qidx < pfvf->hw.rx_queues) { + aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt); + aq->cq.drop_ena = 1; + + /* Enable receive CQ backpressure */ + aq->cq.bp_ena = 1; + aq->cq.bpid = pfvf->bpid[0]; + + /* Set backpressure level is same as cq pass level */ + aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); + } + + /* Fill AQ info */ + aq->qidx = qidx; + aq->ctype = NIX_AQ_CTYPE_CQ; + aq->op = NIX_AQ_INSTOP_INIT; + + return otx2_sync_mbox_msg(&pfvf->mbox); +} + +static void otx2_pool_refill_task(struct work_struct *work) +{ + struct otx2_cq_queue *cq; + struct otx2_pool *rbpool; + struct refill_work *wrk; + int qidx, free_ptrs = 0; + struct otx2_nic *pfvf; + s64 bufptr; + + wrk = container_of(work, struct refill_work, pool_refill_work.work); + pfvf = wrk->pf; + qidx = wrk - pfvf->refill_wrk; + cq = &pfvf->qset.cq[qidx]; + rbpool = cq->rbpool; + free_ptrs = cq->pool_ptrs; + + while (cq->pool_ptrs) { + bufptr = otx2_alloc_rbuf(pfvf, rbpool); + if (bufptr <= 0) { + /* Schedule a WQ if we fails to free atleast half of the + * pointers else enable napi for this RQ. + */ + if (!((free_ptrs - cq->pool_ptrs) > free_ptrs / 2)) { + struct delayed_work *dwork; + + dwork = &wrk->pool_refill_work; + schedule_delayed_work(dwork, + msecs_to_jiffies(100)); + } else { + cq->refill_task_sched = false; + } + return; + } + otx2_aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM); + cq->pool_ptrs--; + } + cq->refill_task_sched = false; +} + +int otx2_config_nix_queues(struct otx2_nic *pfvf) +{ + int qidx, err; + + /* Initialize RX queues */ + for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { + u16 lpb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx); + + err = otx2_rq_init(pfvf, qidx, lpb_aura); + if (err) + return err; + } + + /* Initialize TX queues */ + for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { + u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); + + err = otx2_sq_init(pfvf, qidx, sqb_aura); + if (err) + return err; + } + + /* Initialize completion queues */ + for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) { + err = otx2_cq_init(pfvf, qidx); + if (err) + return err; + } + + /* Initialize work queue for receive buffer refill */ + pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt, + sizeof(struct refill_work), GFP_KERNEL); + if (!pfvf->refill_wrk) + return -ENOMEM; + + for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) { + pfvf->refill_wrk[qidx].pf = pfvf; + INIT_DELAYED_WORK(&pfvf->refill_wrk[qidx].pool_refill_work, + otx2_pool_refill_task); + } + return 0; +} + +int otx2_config_nix(struct otx2_nic *pfvf) +{ + struct nix_lf_alloc_req *nixlf; + struct nix_lf_alloc_rsp *rsp; + int err; + + pfvf->qset.xqe_size = NIX_XQESZ_W16 ? 128 : 512; + + /* Get memory to put this msg */ + nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox); + if (!nixlf) + return -ENOMEM; + + /* Set RQ/SQ/CQ counts */ + nixlf->rq_cnt = pfvf->hw.rx_queues; + nixlf->sq_cnt = pfvf->hw.tx_queues; + nixlf->cq_cnt = pfvf->qset.cq_cnt; + nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE; + nixlf->rss_grps = 1; /* Single RSS indir table supported, for now */ + nixlf->xqe_sz = NIX_XQESZ_W16; + /* We don't know absolute NPA LF idx attached. + * AF will replace 'RVU_DEFAULT_PF_FUNC' with + * NPA LF attached to this RVU PF/VF. + */ + nixlf->npa_func = RVU_DEFAULT_PF_FUNC; + /* Disable alignment pad, enable L2 length check, + * enable L4 TCP/UDP checksum verification. + */ + nixlf->rx_cfg = BIT_ULL(33) | BIT_ULL(35) | BIT_ULL(37); + + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) + return err; + + rsp = (struct nix_lf_alloc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, + &nixlf->hdr); + if (IS_ERR(rsp)) + return PTR_ERR(rsp); + + if (rsp->qints < 1) + return -ENXIO; + + return rsp->hdr.rc; +} + +void otx2_sq_free_sqbs(struct otx2_nic *pfvf) +{ + struct otx2_qset *qset = &pfvf->qset; + struct otx2_hw *hw = &pfvf->hw; + struct otx2_snd_queue *sq; + int sqb, qidx; + u64 iova, pa; + + for (qidx = 0; qidx < hw->tx_queues; qidx++) { + sq = &qset->sq[qidx]; + if (!sq->sqb_ptrs) + continue; + for (sqb = 0; sqb < sq->sqb_count; sqb++) { + if (!sq->sqb_ptrs[sqb]) + continue; + iova = sq->sqb_ptrs[sqb]; + pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); + dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size, + DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + put_page(virt_to_page(phys_to_virt(pa))); + } + sq->sqb_count = 0; + } +} + +void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type) +{ + int pool_id, pool_start = 0, pool_end = 0, size = 0; + u64 iova, pa; + + if (type == AURA_NIX_SQ) { + pool_start = otx2_get_pool_idx(pfvf, type, 0); + pool_end = pool_start + pfvf->hw.sqpool_cnt; + size = pfvf->hw.sqb_size; + } + if (type == AURA_NIX_RQ) { + pool_start = otx2_get_pool_idx(pfvf, type, 0); + pool_end = pfvf->hw.rqpool_cnt; + size = pfvf->rbsize; + } + + /* Free SQB and RQB pointers from the aura pool */ + for (pool_id = pool_start; pool_id < pool_end; pool_id++) { + iova = otx2_aura_allocptr(pfvf, pool_id); + while (iova) { + if (type == AURA_NIX_RQ) + iova -= OTX2_HEAD_ROOM; + + pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); + dma_unmap_page_attrs(pfvf->dev, iova, size, + DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + put_page(virt_to_page(phys_to_virt(pa))); + iova = otx2_aura_allocptr(pfvf, pool_id); + } + } +} + +void otx2_aura_pool_free(struct otx2_nic *pfvf) +{ + struct otx2_pool *pool; + int pool_id; + + if (!pfvf->qset.pool) + return; + + for (pool_id = 0; pool_id < pfvf->hw.pool_cnt; pool_id++) { + pool = &pfvf->qset.pool[pool_id]; + qmem_free(pfvf->dev, pool->stack); + qmem_free(pfvf->dev, pool->fc_addr); + } + devm_kfree(pfvf->dev, pfvf->qset.pool); + pfvf->qset.pool = NULL; +} + +static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, + int pool_id, int numptrs) +{ + struct npa_aq_enq_req *aq; + struct otx2_pool *pool; + int err; + + pool = &pfvf->qset.pool[pool_id]; + + /* Allocate memory for HW to update Aura count. + * Alloc one cache line, so that it fits all FC_STYPE modes. + */ + if (!pool->fc_addr) { + err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN); + if (err) + return err; + } + + /* Initialize this aura's context via AF */ + aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); + if (!aq) { + /* Shared mbox memory buffer is full, flush it and retry */ + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) + return err; + aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); + if (!aq) + return -ENOMEM; + } + + aq->aura_id = aura_id; + /* Will be filled by AF with correct pool context address */ + aq->aura.pool_addr = pool_id; + aq->aura.pool_caching = 1; + aq->aura.shift = ilog2(numptrs) - 8; + aq->aura.count = numptrs; + aq->aura.limit = numptrs; + aq->aura.avg_level = 255; + aq->aura.ena = 1; + aq->aura.fc_ena = 1; + aq->aura.fc_addr = pool->fc_addr->iova; + aq->aura.fc_hyst_bits = 0; /* Store count on all updates */ + + /* Enable backpressure for RQ aura */ + if (aura_id < pfvf->hw.rqpool_cnt) { + aq->aura.bp_ena = 0; + /* If NIX1 LF is attached then specify NIX1_RX. + * + * Below NPA_AURA_S[BP_ENA] is set according to the + * NPA_BPINTF_E enumeration given as: + * 0x0 + a*0x1 where 'a' is 0 for NIX0_RX and 1 for NIX1_RX so + * NIX0_RX is 0x0 + 0*0x1 = 0 + * NIX1_RX is 0x0 + 1*0x1 = 1 + * But in HRM it is given that + * "NPA_AURA_S[BP_ENA](w1[33:32]) - Enable aura backpressure to + * NIX-RX based on [BP] level. One bit per NIX-RX; index + * enumerated by NPA_BPINTF_E." + */ + if (pfvf->nix_blkaddr == BLKADDR_NIX1) + aq->aura.bp_ena = 1; + aq->aura.nix0_bpid = pfvf->bpid[0]; + + /* Set backpressure level for RQ's Aura */ + aq->aura.bp = RQ_BP_LVL_AURA; + } + + /* Fill AQ info */ + aq->ctype = NPA_AQ_CTYPE_AURA; + aq->op = NPA_AQ_INSTOP_INIT; + + return 0; +} + +static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, + int stack_pages, int numptrs, int buf_size) +{ + struct npa_aq_enq_req *aq; + struct otx2_pool *pool; + int err; + + pool = &pfvf->qset.pool[pool_id]; + /* Alloc memory for stack which is used to store buffer pointers */ + err = qmem_alloc(pfvf->dev, &pool->stack, + stack_pages, pfvf->hw.stack_pg_bytes); + if (err) + return err; + + pool->rbsize = buf_size; + + /* Initialize this pool's context via AF */ + aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); + if (!aq) { + /* Shared mbox memory buffer is full, flush it and retry */ + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) { + qmem_free(pfvf->dev, pool->stack); + return err; + } + aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); + if (!aq) { + qmem_free(pfvf->dev, pool->stack); + return -ENOMEM; + } + } + + aq->aura_id = pool_id; + aq->pool.stack_base = pool->stack->iova; + aq->pool.stack_caching = 1; + aq->pool.ena = 1; + aq->pool.buf_size = buf_size / 128; + aq->pool.stack_max_pages = stack_pages; + aq->pool.shift = ilog2(numptrs) - 8; + aq->pool.ptr_start = 0; + aq->pool.ptr_end = ~0ULL; + + /* Fill AQ info */ + aq->ctype = NPA_AQ_CTYPE_POOL; + aq->op = NPA_AQ_INSTOP_INIT; + + return 0; +} + +int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) +{ + int qidx, pool_id, stack_pages, num_sqbs; + struct otx2_qset *qset = &pfvf->qset; + struct otx2_hw *hw = &pfvf->hw; + struct otx2_snd_queue *sq; + struct otx2_pool *pool; + int err, ptr; + s64 bufptr; + + /* Calculate number of SQBs needed. + * + * For a 128byte SQE, and 4K size SQB, 31 SQEs will fit in one SQB. + * Last SQE is used for pointing to next SQB. + */ + num_sqbs = (hw->sqb_size / 128) - 1; + num_sqbs = (qset->sqe_cnt + num_sqbs) / num_sqbs; + + /* Get no of stack pages needed */ + stack_pages = + (num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs; + + for (qidx = 0; qidx < hw->tx_queues; qidx++) { + pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); + /* Initialize aura context */ + err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs); + if (err) + goto fail; + + /* Initialize pool context */ + err = otx2_pool_init(pfvf, pool_id, stack_pages, + num_sqbs, hw->sqb_size); + if (err) + goto fail; + } + + /* Flush accumulated messages */ + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) + goto fail; + + /* Allocate pointers and free them to aura/pool */ + for (qidx = 0; qidx < hw->tx_queues; qidx++) { + pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); + pool = &pfvf->qset.pool[pool_id]; + + sq = &qset->sq[qidx]; + sq->sqb_count = 0; + sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(u64 *), GFP_KERNEL); + if (!sq->sqb_ptrs) + return -ENOMEM; + + for (ptr = 0; ptr < num_sqbs; ptr++) { + bufptr = otx2_alloc_rbuf(pfvf, pool); + if (bufptr <= 0) + return bufptr; + otx2_aura_freeptr(pfvf, pool_id, bufptr); + sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr; + } + } + + return 0; +fail: + otx2_mbox_reset(&pfvf->mbox.mbox, 0); + otx2_aura_pool_free(pfvf); + return err; +} + +int otx2_rq_aura_pool_init(struct otx2_nic *pfvf) +{ + struct otx2_hw *hw = &pfvf->hw; + int stack_pages, pool_id, rq; + struct otx2_pool *pool; + int err, ptr, num_ptrs; + s64 bufptr; + + num_ptrs = pfvf->qset.rqe_cnt; + + stack_pages = + (num_ptrs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs; + + for (rq = 0; rq < hw->rx_queues; rq++) { + pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, rq); + /* Initialize aura context */ + err = otx2_aura_init(pfvf, pool_id, pool_id, num_ptrs); + if (err) + goto fail; + } + for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { + err = otx2_pool_init(pfvf, pool_id, stack_pages, + num_ptrs, pfvf->rbsize); + if (err) + goto fail; + } + + /* Flush accumulated messages */ + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) + goto fail; + + /* Allocate pointers and free them to aura/pool */ + for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { + pool = &pfvf->qset.pool[pool_id]; + for (ptr = 0; ptr < num_ptrs; ptr++) { + bufptr = otx2_alloc_rbuf(pfvf, pool); + if (bufptr <= 0) + return bufptr; + otx2_aura_freeptr(pfvf, pool_id, + bufptr + OTX2_HEAD_ROOM); + } + } + + return 0; +fail: + otx2_mbox_reset(&pfvf->mbox.mbox, 0); + otx2_aura_pool_free(pfvf); + return err; +} + +int otx2_config_npa(struct otx2_nic *pfvf) +{ + struct otx2_qset *qset = &pfvf->qset; + struct npa_lf_alloc_req *npalf; + struct otx2_hw *hw = &pfvf->hw; + int aura_cnt; + + /* Pool - Stack of free buffer pointers + * Aura - Alloc/frees pointers from/to pool for NIX DMA. + */ + + if (!hw->pool_cnt) + return -EINVAL; + + qset->pool = devm_kcalloc(pfvf->dev, hw->pool_cnt, + sizeof(struct otx2_pool), GFP_KERNEL); + if (!qset->pool) + return -ENOMEM; + + /* Get memory to put this msg */ + npalf = otx2_mbox_alloc_msg_npa_lf_alloc(&pfvf->mbox); + if (!npalf) + return -ENOMEM; + + /* Set aura and pool counts */ + npalf->nr_pools = hw->pool_cnt; + aura_cnt = ilog2(roundup_pow_of_two(hw->pool_cnt)); + npalf->aura_sz = (aura_cnt >= ilog2(128)) ? (aura_cnt - 6) : 1; + + return otx2_sync_mbox_msg(&pfvf->mbox); +} + +int otx2_detach_resources(struct mbox *mbox) +{ + struct rsrc_detach *detach; + + mutex_lock(&mbox->lock); + detach = otx2_mbox_alloc_msg_detach_resources(mbox); + if (!detach) { + mutex_unlock(&mbox->lock); + return -ENOMEM; + } + + /* detach all */ + detach->partial = false; + + /* Send detach request to AF */ + otx2_mbox_msg_send(&mbox->mbox, 0); + mutex_unlock(&mbox->lock); + return 0; +} +EXPORT_SYMBOL(otx2_detach_resources); + +int otx2_attach_npa_nix(struct otx2_nic *pfvf) +{ + struct rsrc_attach *attach; + struct msg_req *msix; + int err; + + mutex_lock(&pfvf->mbox.lock); + /* Get memory to put this msg */ + attach = otx2_mbox_alloc_msg_attach_resources(&pfvf->mbox); + if (!attach) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + attach->npalf = true; + attach->nixlf = true; + + /* Send attach request to AF */ + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) { + mutex_unlock(&pfvf->mbox.lock); + return err; + } + + pfvf->nix_blkaddr = BLKADDR_NIX0; + + /* If the platform has two NIX blocks then LF may be + * allocated from NIX1. + */ + if (otx2_read64(pfvf, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_NIX1)) & 0x1FFULL) + pfvf->nix_blkaddr = BLKADDR_NIX1; + + /* Get NPA and NIX MSIX vector offsets */ + msix = otx2_mbox_alloc_msg_msix_offset(&pfvf->mbox); + if (!msix) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) { + mutex_unlock(&pfvf->mbox.lock); + return err; + } + mutex_unlock(&pfvf->mbox.lock); + + if (pfvf->hw.npa_msixoff == MSIX_VECTOR_INVALID || + pfvf->hw.nix_msixoff == MSIX_VECTOR_INVALID) { + dev_err(pfvf->dev, + "RVUPF: Invalid MSIX vector offset for NPA/NIX\n"); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(otx2_attach_npa_nix); + +void otx2_ctx_disable(struct mbox *mbox, int type, bool npa) +{ + struct hwctx_disable_req *req; + + mutex_lock(&mbox->lock); + /* Request AQ to disable this context */ + if (npa) + req = otx2_mbox_alloc_msg_npa_hwctx_disable(mbox); + else + req = otx2_mbox_alloc_msg_nix_hwctx_disable(mbox); + + if (!req) { + mutex_unlock(&mbox->lock); + return; + } + + req->ctype = type; + + if (otx2_sync_mbox_msg(mbox)) + dev_err(mbox->pfvf->dev, "%s failed to disable context\n", + __func__); + + mutex_unlock(&mbox->lock); +} + +int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable) +{ + struct nix_bp_cfg_req *req; + + if (enable) + req = otx2_mbox_alloc_msg_nix_bp_enable(&pfvf->mbox); + else + req = otx2_mbox_alloc_msg_nix_bp_disable(&pfvf->mbox); + + if (!req) + return -ENOMEM; + + req->chan_base = 0; + req->chan_cnt = 1; + req->bpid_per_chan = 0; + + return otx2_sync_mbox_msg(&pfvf->mbox); +} + +/* Mbox message handlers */ +void mbox_handler_cgx_stats(struct otx2_nic *pfvf, + struct cgx_stats_rsp *rsp) +{ + int id; + + for (id = 0; id < CGX_RX_STATS_COUNT; id++) + pfvf->hw.cgx_rx_stats[id] = rsp->rx_stats[id]; + for (id = 0; id < CGX_TX_STATS_COUNT; id++) + pfvf->hw.cgx_tx_stats[id] = rsp->tx_stats[id]; +} + +void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf, + struct nix_txsch_alloc_rsp *rsp) +{ + int lvl, schq; + + /* Setup transmit scheduler list */ + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) + for (schq = 0; schq < rsp->schq[lvl]; schq++) + pf->hw.txschq_list[lvl][schq] = + rsp->schq_list[lvl][schq]; +} +EXPORT_SYMBOL(mbox_handler_nix_txsch_alloc); + +void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf, + struct npa_lf_alloc_rsp *rsp) +{ + pfvf->hw.stack_pg_ptrs = rsp->stack_pg_ptrs; + pfvf->hw.stack_pg_bytes = rsp->stack_pg_bytes; +} +EXPORT_SYMBOL(mbox_handler_npa_lf_alloc); + +void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, + struct nix_lf_alloc_rsp *rsp) +{ + pfvf->hw.sqb_size = rsp->sqb_size; + pfvf->hw.rx_chan_base = rsp->rx_chan_base; + pfvf->hw.tx_chan_base = rsp->tx_chan_base; + pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx; + pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx; +} +EXPORT_SYMBOL(mbox_handler_nix_lf_alloc); + +void mbox_handler_msix_offset(struct otx2_nic *pfvf, + struct msix_offset_rsp *rsp) +{ + pfvf->hw.npa_msixoff = rsp->npa_msixoff; + pfvf->hw.nix_msixoff = rsp->nix_msixoff; +} +EXPORT_SYMBOL(mbox_handler_msix_offset); + +void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf, + struct nix_bp_cfg_rsp *rsp) +{ + int chan, chan_id; + + for (chan = 0; chan < rsp->chan_cnt; chan++) { + chan_id = ((rsp->chan_bpid[chan] >> 10) & 0x7F); + pfvf->bpid[chan_id] = rsp->chan_bpid[chan] & 0x3FF; + } +} +EXPORT_SYMBOL(mbox_handler_nix_bp_enable); + +void otx2_free_cints(struct otx2_nic *pfvf, int n) +{ + struct otx2_qset *qset = &pfvf->qset; + struct otx2_hw *hw = &pfvf->hw; + int irq, qidx; + + for (qidx = 0, irq = hw->nix_msixoff + NIX_LF_CINT_VEC_START; + qidx < n; + qidx++, irq++) { + int vector = pci_irq_vector(pfvf->pdev, irq); + + irq_set_affinity_hint(vector, NULL); + free_cpumask_var(hw->affinity_mask[irq]); + free_irq(vector, &qset->napi[qidx]); + } +} + +void otx2_set_cints_affinity(struct otx2_nic *pfvf) +{ + struct otx2_hw *hw = &pfvf->hw; + int vec, cpu, irq, cint; + + vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START; + cpu = cpumask_first(cpu_online_mask); + + /* CQ interrupts */ + for (cint = 0; cint < pfvf->hw.cint_cnt; cint++, vec++) { + if (!alloc_cpumask_var(&hw->affinity_mask[vec], GFP_KERNEL)) + return; + + cpumask_set_cpu(cpu, hw->affinity_mask[vec]); + + irq = pci_irq_vector(pfvf->pdev, vec); + irq_set_affinity_hint(irq, hw->affinity_mask[vec]); + + cpu = cpumask_next(cpu, cpu_online_mask); + if (unlikely(cpu >= nr_cpu_ids)) + cpu = 0; + } +} + +#define M(_name, _id, _fn_name, _req_type, _rsp_type) \ +int __weak \ +otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ + struct _req_type *req, \ + struct _rsp_type *rsp) \ +{ \ + /* Nothing to do here */ \ + return 0; \ +} \ +EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name); +MBOX_UP_CGX_MESSAGES +#undef M diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h new file mode 100644 index 000000000..d6253f2a4 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -0,0 +1,645 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 RVU Ethernet driver + * + * Copyright (C) 2020 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef OTX2_COMMON_H +#define OTX2_COMMON_H + +#include <linux/pci.h> +#include <linux/iommu.h> +#include <linux/net_tstamp.h> +#include <linux/ptp_clock_kernel.h> +#include <linux/timecounter.h> + +#include <mbox.h> +#include "otx2_reg.h" +#include "otx2_txrx.h" +#include <rvu_trace.h> + +/* PCI device IDs */ +#define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063 +#define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064 +#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8 + +#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200 + +/* PCI BAR nos */ +#define PCI_CFG_REG_BAR_NUM 2 +#define PCI_MBOX_BAR_NUM 4 + +#define NAME_SIZE 32 + +enum arua_mapped_qtypes { + AURA_NIX_RQ, + AURA_NIX_SQ, +}; + +/* NIX LF interrupts range*/ +#define NIX_LF_QINT_VEC_START 0x00 +#define NIX_LF_CINT_VEC_START 0x40 +#define NIX_LF_GINT_VEC 0x80 +#define NIX_LF_ERR_VEC 0x81 +#define NIX_LF_POISON_VEC 0x82 + +/* RSS configuration */ +struct otx2_rss_info { + u8 enable; + u32 flowkey_cfg; + u16 rss_size; + u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE]; +#define RSS_HASH_KEY_SIZE 44 /* 352 bit key */ + u8 key[RSS_HASH_KEY_SIZE]; +}; + +/* NIX (or NPC) RX errors */ +enum otx2_errlvl { + NPC_ERRLVL_RE, + NPC_ERRLVL_LID_LA, + NPC_ERRLVL_LID_LB, + NPC_ERRLVL_LID_LC, + NPC_ERRLVL_LID_LD, + NPC_ERRLVL_LID_LE, + NPC_ERRLVL_LID_LF, + NPC_ERRLVL_LID_LG, + NPC_ERRLVL_LID_LH, + NPC_ERRLVL_NIX = 0x0F, +}; + +enum otx2_errcodes_re { + /* NPC_ERRLVL_RE errcodes */ + ERRCODE_FCS = 0x7, + ERRCODE_FCS_RCV = 0x8, + ERRCODE_UNDERSIZE = 0x10, + ERRCODE_OVERSIZE = 0x11, + ERRCODE_OL2_LEN_MISMATCH = 0x12, + /* NPC_ERRLVL_NIX errcodes */ + ERRCODE_OL3_LEN = 0x10, + ERRCODE_OL4_LEN = 0x11, + ERRCODE_OL4_CSUM = 0x12, + ERRCODE_IL3_LEN = 0x20, + ERRCODE_IL4_LEN = 0x21, + ERRCODE_IL4_CSUM = 0x22, +}; + +/* NIX TX stats */ +enum nix_stat_lf_tx { + TX_UCAST = 0x0, + TX_BCAST = 0x1, + TX_MCAST = 0x2, + TX_DROP = 0x3, + TX_OCTS = 0x4, + TX_STATS_ENUM_LAST, +}; + +/* NIX RX stats */ +enum nix_stat_lf_rx { + RX_OCTS = 0x0, + RX_UCAST = 0x1, + RX_BCAST = 0x2, + RX_MCAST = 0x3, + RX_DROP = 0x4, + RX_DROP_OCTS = 0x5, + RX_FCS = 0x6, + RX_ERR = 0x7, + RX_DRP_BCAST = 0x8, + RX_DRP_MCAST = 0x9, + RX_DRP_L3BCAST = 0xa, + RX_DRP_L3MCAST = 0xb, + RX_STATS_ENUM_LAST, +}; + +struct otx2_dev_stats { + u64 rx_bytes; + u64 rx_frames; + u64 rx_ucast_frames; + u64 rx_bcast_frames; + u64 rx_mcast_frames; + u64 rx_drops; + + u64 tx_bytes; + u64 tx_frames; + u64 tx_ucast_frames; + u64 tx_bcast_frames; + u64 tx_mcast_frames; + u64 tx_drops; +}; + +/* Driver counted stats */ +struct otx2_drv_stats { + atomic_t rx_fcs_errs; + atomic_t rx_oversize_errs; + atomic_t rx_undersize_errs; + atomic_t rx_csum_errs; + atomic_t rx_len_errs; + atomic_t rx_other_errs; +}; + +struct mbox { + struct otx2_mbox mbox; + struct work_struct mbox_wrk; + struct otx2_mbox mbox_up; + struct work_struct mbox_up_wrk; + struct otx2_nic *pfvf; + void *bbuf_base; /* Bounce buffer for mbox memory */ + struct mutex lock; /* serialize mailbox access */ + int num_msgs; /* mbox number of messages */ + int up_num_msgs; /* mbox_up number of messages */ +}; + +struct otx2_hw { + struct pci_dev *pdev; + struct otx2_rss_info rss_info; + u16 rx_queues; + u16 tx_queues; + u16 max_queues; + u16 pool_cnt; + u16 rqpool_cnt; + u16 sqpool_cnt; + + /* NPA */ + u32 stack_pg_ptrs; /* No of ptrs per stack page */ + u32 stack_pg_bytes; /* Size of stack page */ + u16 sqb_size; + + /* NIX */ + u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; + + /* HW settings, coalescing etc */ + u16 rx_chan_base; + u16 tx_chan_base; + u16 cq_qcount_wait; + u16 cq_ecount_wait; + u16 rq_skid; + u8 cq_time_wait; + + /* Segmentation */ + u8 lso_tsov4_idx; + u8 lso_tsov6_idx; + u8 lso_udpv4_idx; + u8 lso_udpv6_idx; + u8 hw_tso; + + /* MSI-X */ + u8 cint_cnt; /* CQ interrupt count */ + u16 npa_msixoff; /* Offset of NPA vectors */ + u16 nix_msixoff; /* Offset of NIX vectors */ + char *irq_name; + cpumask_var_t *affinity_mask; + + /* Stats */ + struct otx2_dev_stats dev_stats; + struct otx2_drv_stats drv_stats; + u64 cgx_rx_stats[CGX_RX_STATS_COUNT]; + u64 cgx_tx_stats[CGX_TX_STATS_COUNT]; +}; + +struct otx2_vf_config { + struct otx2_nic *pf; + struct delayed_work link_event_work; + bool intf_down; /* interface was either configured or not */ +}; + +struct flr_work { + struct work_struct work; + struct otx2_nic *pf; +}; + +struct refill_work { + struct delayed_work pool_refill_work; + struct otx2_nic *pf; +}; + +struct otx2_ptp { + struct ptp_clock_info ptp_info; + struct ptp_clock *ptp_clock; + struct otx2_nic *nic; + + struct cyclecounter cycle_counter; + struct timecounter time_counter; +}; + +#define OTX2_HW_TIMESTAMP_LEN 8 + +struct otx2_nic { + void __iomem *reg_base; + struct net_device *netdev; + void *iommu_domain; + u16 max_frs; + u16 rbsize; /* Receive buffer size */ + +#define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0) +#define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1) +#define OTX2_FLAG_INTF_DOWN BIT_ULL(2) +#define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9) +#define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10) + u64 flags; + + struct otx2_qset qset; + struct otx2_hw hw; + struct pci_dev *pdev; + struct device *dev; + + /* Mbox */ + struct mbox mbox; + struct mbox *mbox_pfvf; + struct workqueue_struct *mbox_wq; + struct workqueue_struct *mbox_pfvf_wq; + + u8 total_vfs; + u16 pcifunc; /* RVU PF_FUNC */ + u16 bpid[NIX_MAX_BPID_CHAN]; + struct otx2_vf_config *vf_configs; + struct cgx_link_user_info linfo; + + u64 reset_count; + struct work_struct reset_task; + struct workqueue_struct *flr_wq; + struct flr_work *flr_wrk; + struct refill_work *refill_wrk; + struct workqueue_struct *otx2_wq; + struct work_struct rx_mode_work; + + /* Ethtool stuff */ + u32 msg_enable; + + /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */ + int nix_blkaddr; + + struct otx2_ptp *ptp; + struct hwtstamp_config tstamp; +}; + +static inline bool is_otx2_lbkvf(struct pci_dev *pdev) +{ + return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF; +} + +static inline bool is_96xx_A0(struct pci_dev *pdev) +{ + return (pdev->revision == 0x00) && + (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); +} + +static inline bool is_96xx_B0(struct pci_dev *pdev) +{ + return (pdev->revision == 0x01) && + (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); +} + +static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) +{ + struct otx2_hw *hw = &pfvf->hw; + + pfvf->hw.cq_time_wait = CQ_TIMER_THRESH_DEFAULT; + pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT; + pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT; + + hw->hw_tso = true; + + if (is_96xx_A0(pfvf->pdev)) { + hw->hw_tso = false; + + /* Time based irq coalescing is not supported */ + pfvf->hw.cq_qcount_wait = 0x0; + + /* Due to HW issue previous silicons required minimum + * 600 unused CQE to avoid CQ overflow. + */ + pfvf->hw.rq_skid = 600; + pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K); + } +} + +/* Register read/write APIs */ +static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset) +{ + u64 blkaddr; + + switch ((offset >> RVU_FUNC_BLKADDR_SHIFT) & RVU_FUNC_BLKADDR_MASK) { + case BLKTYPE_NIX: + blkaddr = nic->nix_blkaddr; + break; + case BLKTYPE_NPA: + blkaddr = BLKADDR_NPA; + break; + default: + blkaddr = BLKADDR_RVUM; + break; + } + + offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT); + offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT); + + return nic->reg_base + offset; +} + +static inline void otx2_write64(struct otx2_nic *nic, u64 offset, u64 val) +{ + void __iomem *addr = otx2_get_regaddr(nic, offset); + + writeq(val, addr); +} + +static inline u64 otx2_read64(struct otx2_nic *nic, u64 offset) +{ + void __iomem *addr = otx2_get_regaddr(nic, offset); + + return readq(addr); +} + +/* Mbox bounce buffer APIs */ +static inline int otx2_mbox_bbuf_init(struct mbox *mbox, struct pci_dev *pdev) +{ + struct otx2_mbox *otx2_mbox; + struct otx2_mbox_dev *mdev; + + mbox->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL); + if (!mbox->bbuf_base) + return -ENOMEM; + + /* Overwrite mbox mbase to point to bounce buffer, so that PF/VF + * prepare all mbox messages in bounce buffer instead of directly + * in hw mbox memory. + */ + otx2_mbox = &mbox->mbox; + mdev = &otx2_mbox->dev[0]; + mdev->mbase = mbox->bbuf_base; + + otx2_mbox = &mbox->mbox_up; + mdev = &otx2_mbox->dev[0]; + mdev->mbase = mbox->bbuf_base; + return 0; +} + +static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid) +{ + u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); + void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE); + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + struct mbox_hdr *hdr; + u64 msg_size; + + if (mdev->mbase == hw_mbase) + return; + + hdr = hw_mbase + mbox->rx_start; + msg_size = hdr->msg_size; + + if (msg_size > mbox->rx_size - msgs_offset) + msg_size = mbox->rx_size - msgs_offset; + + /* Copy mbox messages from mbox memory to bounce buffer */ + memcpy(mdev->mbase + mbox->rx_start, + hw_mbase + mbox->rx_start, msg_size + msgs_offset); +} + +/* With the absence of API for 128-bit IO memory access for arm64, + * implement required operations at place. + */ +#if defined(CONFIG_ARM64) +static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr) +{ + __asm__ volatile("stp %x[x0], %x[x1], [%x[p1],#0]!" + ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr)); +} + +static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr) +{ + u64 result; + + __asm__ volatile(".cpu generic+lse\n" + "ldadd %x[i], %x[r], [%[b]]" + : [r]"=r"(result), "+m"(*ptr) + : [i]"r"(incr), [b]"r"(ptr) + : "memory"); + return result; +} + +static inline u64 otx2_lmt_flush(uint64_t addr) +{ + u64 result = 0; + + __asm__ volatile(".cpu generic+lse\n" + "ldeor xzr,%x[rf],[%[rs]]" + : [rf]"=r"(result) + : [rs]"r"(addr)); + return result; +} + +#else +#define otx2_write128(lo, hi, addr) +#define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; }) +#define otx2_lmt_flush(addr) ({ 0; }) +#endif + +/* Alloc pointer from pool/aura */ +static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura) +{ + u64 *ptr = (u64 *)otx2_get_regaddr(pfvf, + NPA_LF_AURA_OP_ALLOCX(0)); + u64 incr = (u64)aura | BIT_ULL(63); + + return otx2_atomic64_add(incr, ptr); +} + +/* Free pointer to a pool/aura */ +static inline void otx2_aura_freeptr(struct otx2_nic *pfvf, + int aura, s64 buf) +{ + otx2_write128((u64)buf, (u64)aura | BIT_ULL(63), + otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0)); +} + +static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx) +{ + if (type == AURA_NIX_SQ) + return pfvf->hw.rqpool_cnt + idx; + + /* AURA_NIX_RQ */ + return idx; +} + +/* Mbox APIs */ +static inline int otx2_sync_mbox_msg(struct mbox *mbox) +{ + int err; + + if (!otx2_mbox_nonempty(&mbox->mbox, 0)) + return 0; + otx2_mbox_msg_send(&mbox->mbox, 0); + err = otx2_mbox_wait_for_rsp(&mbox->mbox, 0); + if (err) + return err; + + return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0); +} + +static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid) +{ + int err; + + if (!otx2_mbox_nonempty(&mbox->mbox_up, devid)) + return 0; + otx2_mbox_msg_send(&mbox->mbox_up, devid); + err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid); + if (err) + return err; + + return otx2_mbox_check_rsp_msgs(&mbox->mbox_up, devid); +} + +/* Use this API to send mbox msgs in atomic context + * where sleeping is not allowed + */ +static inline int otx2_sync_mbox_msg_busy_poll(struct mbox *mbox) +{ + int err; + + if (!otx2_mbox_nonempty(&mbox->mbox, 0)) + return 0; + otx2_mbox_msg_send(&mbox->mbox, 0); + err = otx2_mbox_busy_poll_for_rsp(&mbox->mbox, 0); + if (err) + return err; + + return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0); +} + +#define M(_name, _id, _fn_name, _req_type, _rsp_type) \ +static struct _req_type __maybe_unused \ +*otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \ +{ \ + struct _req_type *req; \ + \ + req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ + &mbox->mbox, 0, sizeof(struct _req_type), \ + sizeof(struct _rsp_type)); \ + if (!req) \ + return NULL; \ + req->hdr.sig = OTX2_MBOX_REQ_SIG; \ + req->hdr.id = _id; \ + trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req)); \ + return req; \ +} + +MBOX_MESSAGES +#undef M + +#define M(_name, _id, _fn_name, _req_type, _rsp_type) \ +int \ +otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ + struct _req_type *req, \ + struct _rsp_type *rsp); \ + +MBOX_UP_CGX_MESSAGES +#undef M + +/* Time to wait before watchdog kicks off */ +#define OTX2_TX_TIMEOUT (100 * HZ) + +#define RVU_PFVF_PF_SHIFT 10 +#define RVU_PFVF_PF_MASK 0x3F +#define RVU_PFVF_FUNC_SHIFT 0 +#define RVU_PFVF_FUNC_MASK 0x3FF + +static inline int rvu_get_pf(u16 pcifunc) +{ + return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; +} + +static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf, + struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir) +{ + dma_addr_t iova; + + iova = dma_map_page_attrs(pfvf->dev, page, + offset, size, dir, DMA_ATTR_SKIP_CPU_SYNC); + if (unlikely(dma_mapping_error(pfvf->dev, iova))) + return (dma_addr_t)NULL; + return iova; +} + +static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf, + dma_addr_t addr, size_t size, + enum dma_data_direction dir) +{ + dma_unmap_page_attrs(pfvf->dev, addr, size, + dir, DMA_ATTR_SKIP_CPU_SYNC); +} + +/* MSI-X APIs */ +void otx2_free_cints(struct otx2_nic *pfvf, int n); +void otx2_set_cints_affinity(struct otx2_nic *pfvf); +int otx2_set_mac_address(struct net_device *netdev, void *p); +int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu); +void otx2_tx_timeout(struct net_device *netdev, unsigned int txq); +void otx2_get_mac_from_af(struct net_device *netdev); +void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx); +int otx2_config_pause_frm(struct otx2_nic *pfvf); +void otx2_setup_segmentation(struct otx2_nic *pfvf); + +/* RVU block related APIs */ +int otx2_attach_npa_nix(struct otx2_nic *pfvf); +int otx2_detach_resources(struct mbox *mbox); +int otx2_config_npa(struct otx2_nic *pfvf); +int otx2_sq_aura_pool_init(struct otx2_nic *pfvf); +int otx2_rq_aura_pool_init(struct otx2_nic *pfvf); +void otx2_aura_pool_free(struct otx2_nic *pfvf); +void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type); +void otx2_sq_free_sqbs(struct otx2_nic *pfvf); +int otx2_config_nix(struct otx2_nic *pfvf); +int otx2_config_nix_queues(struct otx2_nic *pfvf); +int otx2_txschq_config(struct otx2_nic *pfvf, int lvl); +int otx2_txsch_alloc(struct otx2_nic *pfvf); +int otx2_txschq_stop(struct otx2_nic *pfvf); +void otx2_sqb_flush(struct otx2_nic *pfvf); +dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool); +int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable); +void otx2_ctx_disable(struct mbox *mbox, int type, bool npa); +int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable); +void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); +void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); + +/* RSS configuration APIs*/ +int otx2_rss_init(struct otx2_nic *pfvf); +int otx2_set_flowkey_cfg(struct otx2_nic *pfvf); +void otx2_set_rss_key(struct otx2_nic *pfvf); +int otx2_set_rss_table(struct otx2_nic *pfvf); + +/* Mbox handlers */ +void mbox_handler_msix_offset(struct otx2_nic *pfvf, + struct msix_offset_rsp *rsp); +void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf, + struct npa_lf_alloc_rsp *rsp); +void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, + struct nix_lf_alloc_rsp *rsp); +void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf, + struct nix_txsch_alloc_rsp *rsp); +void mbox_handler_cgx_stats(struct otx2_nic *pfvf, + struct cgx_stats_rsp *rsp); +void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf, + struct nix_bp_cfg_rsp *rsp); + +/* Device stats APIs */ +void otx2_get_dev_stats(struct otx2_nic *pfvf); +void otx2_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats); +void otx2_update_lmac_stats(struct otx2_nic *pfvf); +int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx); +int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx); +void otx2_set_ethtool_ops(struct net_device *netdev); +void otx2vf_set_ethtool_ops(struct net_device *netdev); + +int otx2_open(struct net_device *netdev); +int otx2_stop(struct net_device *netdev); +int otx2_set_real_num_queues(struct net_device *netdev, + int tx_queues, int rx_queues); +#endif /* OTX2_COMMON_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c new file mode 100644 index 000000000..fc4ca8246 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -0,0 +1,832 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Ethernet driver + * + * Copyright (C) 2020 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/pci.h> +#include <linux/ethtool.h> +#include <linux/stddef.h> +#include <linux/etherdevice.h> +#include <linux/log2.h> +#include <linux/net_tstamp.h> + +#include "otx2_common.h" +#include "otx2_ptp.h" + +#define DRV_NAME "octeontx2-nicpf" +#define DRV_VF_NAME "octeontx2-nicvf" + +struct otx2_stat { + char name[ETH_GSTRING_LEN]; + unsigned int index; +}; + +/* HW device stats */ +#define OTX2_DEV_STAT(stat) { \ + .name = #stat, \ + .index = offsetof(struct otx2_dev_stats, stat) / sizeof(u64), \ +} + +static const struct otx2_stat otx2_dev_stats[] = { + OTX2_DEV_STAT(rx_ucast_frames), + OTX2_DEV_STAT(rx_bcast_frames), + OTX2_DEV_STAT(rx_mcast_frames), + + OTX2_DEV_STAT(tx_ucast_frames), + OTX2_DEV_STAT(tx_bcast_frames), + OTX2_DEV_STAT(tx_mcast_frames), +}; + +/* Driver level stats */ +#define OTX2_DRV_STAT(stat) { \ + .name = #stat, \ + .index = offsetof(struct otx2_drv_stats, stat) / sizeof(atomic_t), \ +} + +static const struct otx2_stat otx2_drv_stats[] = { + OTX2_DRV_STAT(rx_fcs_errs), + OTX2_DRV_STAT(rx_oversize_errs), + OTX2_DRV_STAT(rx_undersize_errs), + OTX2_DRV_STAT(rx_csum_errs), + OTX2_DRV_STAT(rx_len_errs), + OTX2_DRV_STAT(rx_other_errs), +}; + +static const struct otx2_stat otx2_queue_stats[] = { + { "bytes", 0 }, + { "frames", 1 }, +}; + +static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats); +static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats); +static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats); + +static void otx2_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info)); +} + +static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset) +{ + int start_qidx = qset * pfvf->hw.rx_queues; + int qidx, stats; + + for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { + for (stats = 0; stats < otx2_n_queue_stats; stats++) { + sprintf(*data, "rxq%d: %s", qidx + start_qidx, + otx2_queue_stats[stats].name); + *data += ETH_GSTRING_LEN; + } + } + for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { + for (stats = 0; stats < otx2_n_queue_stats; stats++) { + sprintf(*data, "txq%d: %s", qidx + start_qidx, + otx2_queue_stats[stats].name); + *data += ETH_GSTRING_LEN; + } + } +} + +static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + int stats; + + if (sset != ETH_SS_STATS) + return; + + for (stats = 0; stats < otx2_n_dev_stats; stats++) { + memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + + for (stats = 0; stats < otx2_n_drv_stats; stats++) { + memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + + otx2_get_qset_strings(pfvf, &data, 0); + + for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) { + sprintf(data, "cgx_rxstat%d: ", stats); + data += ETH_GSTRING_LEN; + } + + for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) { + sprintf(data, "cgx_txstat%d: ", stats); + data += ETH_GSTRING_LEN; + } + + strcpy(data, "reset_count"); + data += ETH_GSTRING_LEN; +} + +static void otx2_get_qset_stats(struct otx2_nic *pfvf, + struct ethtool_stats *stats, u64 **data) +{ + int stat, qidx; + + if (!pfvf) + return; + for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { + if (!otx2_update_rq_stats(pfvf, qidx)) { + for (stat = 0; stat < otx2_n_queue_stats; stat++) + *((*data)++) = 0; + continue; + } + for (stat = 0; stat < otx2_n_queue_stats; stat++) + *((*data)++) = ((u64 *)&pfvf->qset.rq[qidx].stats) + [otx2_queue_stats[stat].index]; + } + + for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { + if (!otx2_update_sq_stats(pfvf, qidx)) { + for (stat = 0; stat < otx2_n_queue_stats; stat++) + *((*data)++) = 0; + continue; + } + for (stat = 0; stat < otx2_n_queue_stats; stat++) + *((*data)++) = ((u64 *)&pfvf->qset.sq[qidx].stats) + [otx2_queue_stats[stat].index]; + } +} + +/* Get device and per queue statistics */ +static void otx2_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + int stat; + + otx2_get_dev_stats(pfvf); + for (stat = 0; stat < otx2_n_dev_stats; stat++) + *(data++) = ((u64 *)&pfvf->hw.dev_stats) + [otx2_dev_stats[stat].index]; + + for (stat = 0; stat < otx2_n_drv_stats; stat++) + *(data++) = atomic_read(&((atomic_t *)&pfvf->hw.drv_stats) + [otx2_drv_stats[stat].index]); + + otx2_get_qset_stats(pfvf, stats, &data); + otx2_update_lmac_stats(pfvf); + for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++) + *(data++) = pfvf->hw.cgx_rx_stats[stat]; + for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++) + *(data++) = pfvf->hw.cgx_tx_stats[stat]; + *(data++) = pfvf->reset_count; +} + +static int otx2_get_sset_count(struct net_device *netdev, int sset) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + int qstats_count; + + if (sset != ETH_SS_STATS) + return -EINVAL; + + qstats_count = otx2_n_queue_stats * + (pfvf->hw.rx_queues + pfvf->hw.tx_queues); + + return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + + CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + 1; +} + +/* Get no of queues device supports and current queue count */ +static void otx2_get_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct otx2_nic *pfvf = netdev_priv(dev); + + channel->max_rx = pfvf->hw.max_queues; + channel->max_tx = pfvf->hw.max_queues; + + channel->rx_count = pfvf->hw.rx_queues; + channel->tx_count = pfvf->hw.tx_queues; +} + +/* Set no of Tx, Rx queues to be used */ +static int otx2_set_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct otx2_nic *pfvf = netdev_priv(dev); + bool if_up = netif_running(dev); + int err = 0; + + if (!channel->rx_count || !channel->tx_count) + return -EINVAL; + + if (if_up) + dev->netdev_ops->ndo_stop(dev); + + err = otx2_set_real_num_queues(dev, channel->tx_count, + channel->rx_count); + if (err) + return err; + + pfvf->hw.rx_queues = channel->rx_count; + pfvf->hw.tx_queues = channel->tx_count; + pfvf->qset.cq_cnt = pfvf->hw.tx_queues + pfvf->hw.rx_queues; + + if (if_up) + err = dev->netdev_ops->ndo_open(dev); + + netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n", + pfvf->hw.tx_queues, pfvf->hw.rx_queues); + + return err; +} + +static void otx2_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + struct cgx_pause_frm_cfg *req, *rsp; + + if (is_otx2_lbkvf(pfvf->pdev)) + return; + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return; + } + + if (!otx2_sync_mbox_msg(&pfvf->mbox)) { + rsp = (struct cgx_pause_frm_cfg *) + otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); + pause->rx_pause = rsp->rx_pause; + pause->tx_pause = rsp->tx_pause; + } + mutex_unlock(&pfvf->mbox.lock); +} + +static int otx2_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + + if (pause->autoneg) + return -EOPNOTSUPP; + + if (is_otx2_lbkvf(pfvf->pdev)) + return -EOPNOTSUPP; + + if (pause->rx_pause) + pfvf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED; + else + pfvf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED; + + if (pause->tx_pause) + pfvf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED; + else + pfvf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED; + + return otx2_config_pause_frm(pfvf); +} + +static void otx2_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + struct otx2_qset *qs = &pfvf->qset; + + ring->rx_max_pending = Q_COUNT(Q_SIZE_MAX); + ring->rx_pending = qs->rqe_cnt ? qs->rqe_cnt : Q_COUNT(Q_SIZE_256); + ring->tx_max_pending = Q_COUNT(Q_SIZE_MAX); + ring->tx_pending = qs->sqe_cnt ? qs->sqe_cnt : Q_COUNT(Q_SIZE_4K); +} + +static int otx2_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + bool if_up = netif_running(netdev); + struct otx2_qset *qs = &pfvf->qset; + u32 rx_count, tx_count; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + /* Permitted lengths are 16 64 256 1K 4K 16K 64K 256K 1M */ + rx_count = ring->rx_pending; + /* On some silicon variants a skid or reserved CQEs are + * needed to avoid CQ overflow. + */ + if (rx_count < pfvf->hw.rq_skid) + rx_count = pfvf->hw.rq_skid; + rx_count = Q_COUNT(Q_SIZE(rx_count, 3)); + + /* Due pipelining impact minimum 2000 unused SQ CQE's + * need to be maintained to avoid CQ overflow, hence the + * minimum 4K size. + */ + tx_count = clamp_t(u32, ring->tx_pending, + Q_COUNT(Q_SIZE_4K), Q_COUNT(Q_SIZE_MAX)); + tx_count = Q_COUNT(Q_SIZE(tx_count, 3)); + + if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt) + return 0; + + if (if_up) + netdev->netdev_ops->ndo_stop(netdev); + + /* Assigned to the nearest possible exponent. */ + qs->sqe_cnt = tx_count; + qs->rqe_cnt = rx_count; + + if (if_up) + return netdev->netdev_ops->ndo_open(netdev); + + return 0; +} + +static int otx2_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *cmd) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + struct otx2_hw *hw = &pfvf->hw; + + cmd->rx_coalesce_usecs = hw->cq_time_wait; + cmd->rx_max_coalesced_frames = hw->cq_ecount_wait; + cmd->tx_coalesce_usecs = hw->cq_time_wait; + cmd->tx_max_coalesced_frames = hw->cq_ecount_wait; + + return 0; +} + +static int otx2_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + struct otx2_hw *hw = &pfvf->hw; + int qidx; + + if (!ec->rx_max_coalesced_frames || !ec->tx_max_coalesced_frames) + return 0; + + /* 'cq_time_wait' is 8bit and is in multiple of 100ns, + * so clamp the user given value to the range of 1 to 25usec. + */ + ec->rx_coalesce_usecs = clamp_t(u32, ec->rx_coalesce_usecs, + 1, CQ_TIMER_THRESH_MAX); + ec->tx_coalesce_usecs = clamp_t(u32, ec->tx_coalesce_usecs, + 1, CQ_TIMER_THRESH_MAX); + + /* Rx and Tx are mapped to same CQ, check which one + * is changed, if both then choose the min. + */ + if (hw->cq_time_wait == ec->rx_coalesce_usecs) + hw->cq_time_wait = ec->tx_coalesce_usecs; + else if (hw->cq_time_wait == ec->tx_coalesce_usecs) + hw->cq_time_wait = ec->rx_coalesce_usecs; + else + hw->cq_time_wait = min_t(u8, ec->rx_coalesce_usecs, + ec->tx_coalesce_usecs); + + /* Max ecount_wait supported is 16bit, + * so clamp the user given value to the range of 1 to 64k. + */ + ec->rx_max_coalesced_frames = clamp_t(u32, ec->rx_max_coalesced_frames, + 1, U16_MAX); + ec->tx_max_coalesced_frames = clamp_t(u32, ec->tx_max_coalesced_frames, + 1, U16_MAX); + + /* Rx and Tx are mapped to same CQ, check which one + * is changed, if both then choose the min. + */ + if (hw->cq_ecount_wait == ec->rx_max_coalesced_frames) + hw->cq_ecount_wait = ec->tx_max_coalesced_frames; + else if (hw->cq_ecount_wait == ec->tx_max_coalesced_frames) + hw->cq_ecount_wait = ec->rx_max_coalesced_frames; + else + hw->cq_ecount_wait = min_t(u16, ec->rx_max_coalesced_frames, + ec->tx_max_coalesced_frames); + + if (netif_running(netdev)) { + for (qidx = 0; qidx < pfvf->hw.cint_cnt; qidx++) + otx2_config_irq_coalescing(pfvf, qidx); + } + + return 0; +} + +static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf, + struct ethtool_rxnfc *nfc) +{ + struct otx2_rss_info *rss = &pfvf->hw.rss_info; + + if (!(rss->flowkey_cfg & + (NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6))) + return 0; + + /* Mimimum is IPv4 and IPv6, SIP/DIP */ + nfc->data = RXH_IP_SRC | RXH_IP_DST; + if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_VLAN) + nfc->data |= RXH_VLAN; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_TCP) + nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V4_FLOW: + case UDP_V6_FLOW: + if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_UDP) + nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_SCTP) + nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + break; + default: + return -EINVAL; + } + return 0; +} + +static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf, + struct ethtool_rxnfc *nfc) +{ + struct otx2_rss_info *rss = &pfvf->hw.rss_info; + u32 rxh_l4 = RXH_L4_B_0_1 | RXH_L4_B_2_3; + u32 rss_cfg = rss->flowkey_cfg; + + if (!rss->enable) { + netdev_err(pfvf->netdev, + "RSS is disabled, cannot change settings\n"); + return -EIO; + } + + /* Mimimum is IPv4 and IPv6, SIP/DIP */ + if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST)) + return -EINVAL; + + if (nfc->data & RXH_VLAN) + rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN; + else + rss_cfg &= ~NIX_FLOW_KEY_TYPE_VLAN; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + /* Different config for v4 and v6 is not supported. + * Both of them have to be either 4-tuple or 2-tuple. + */ + switch (nfc->data & rxh_l4) { + case 0: + rss_cfg &= ~NIX_FLOW_KEY_TYPE_TCP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + rss_cfg |= NIX_FLOW_KEY_TYPE_TCP; + break; + default: + return -EINVAL; + } + break; + case UDP_V4_FLOW: + case UDP_V6_FLOW: + switch (nfc->data & rxh_l4) { + case 0: + rss_cfg &= ~NIX_FLOW_KEY_TYPE_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + rss_cfg |= NIX_FLOW_KEY_TYPE_UDP; + break; + default: + return -EINVAL; + } + break; + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + switch (nfc->data & rxh_l4) { + case 0: + rss_cfg &= ~NIX_FLOW_KEY_TYPE_SCTP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + rss_cfg |= NIX_FLOW_KEY_TYPE_SCTP; + break; + default: + return -EINVAL; + } + break; + case IPV4_FLOW: + case IPV6_FLOW: + rss_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6; + break; + default: + return -EINVAL; + } + + rss->flowkey_cfg = rss_cfg; + otx2_set_flowkey_cfg(pfvf); + return 0; +} + +static int otx2_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *nfc, u32 *rules) +{ + struct otx2_nic *pfvf = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (nfc->cmd) { + case ETHTOOL_GRXRINGS: + nfc->data = pfvf->hw.rx_queues; + ret = 0; + break; + case ETHTOOL_GRXFH: + return otx2_get_rss_hash_opts(pfvf, nfc); + default: + break; + } + return ret; +} + +static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc) +{ + struct otx2_nic *pfvf = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (nfc->cmd) { + case ETHTOOL_SRXFH: + ret = otx2_set_rss_hash_opts(pfvf, nfc); + break; + default: + break; + } + + return ret; +} + +static u32 otx2_get_rxfh_key_size(struct net_device *netdev) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + struct otx2_rss_info *rss; + + rss = &pfvf->hw.rss_info; + + return sizeof(rss->key); +} + +static u32 otx2_get_rxfh_indir_size(struct net_device *dev) +{ + struct otx2_nic *pfvf = netdev_priv(dev); + + return pfvf->hw.rss_info.rss_size; +} + +/* Get RSS configuration */ +static int otx2_get_rxfh(struct net_device *dev, u32 *indir, + u8 *hkey, u8 *hfunc) +{ + struct otx2_nic *pfvf = netdev_priv(dev); + struct otx2_rss_info *rss; + int idx; + + rss = &pfvf->hw.rss_info; + + if (indir) { + for (idx = 0; idx < rss->rss_size; idx++) + indir[idx] = rss->ind_tbl[idx]; + } + + if (hkey) + memcpy(hkey, rss->key, sizeof(rss->key)); + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + return 0; +} + +/* Configure RSS table and hash key */ +static int otx2_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *hkey, const u8 hfunc) +{ + struct otx2_nic *pfvf = netdev_priv(dev); + struct otx2_rss_info *rss; + int idx; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + rss = &pfvf->hw.rss_info; + + if (!rss->enable) { + netdev_err(dev, "RSS is disabled, cannot change settings\n"); + return -EIO; + } + + if (indir) { + for (idx = 0; idx < rss->rss_size; idx++) + rss->ind_tbl[idx] = indir[idx]; + } + + if (hkey) { + memcpy(rss->key, hkey, sizeof(rss->key)); + otx2_set_rss_key(pfvf); + } + + otx2_set_rss_table(pfvf); + return 0; +} + +static u32 otx2_get_msglevel(struct net_device *netdev) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + + return pfvf->msg_enable; +} + +static void otx2_set_msglevel(struct net_device *netdev, u32 val) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + + pfvf->msg_enable = val; +} + +static u32 otx2_get_link(struct net_device *netdev) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + + /* LBK link is internal and always UP */ + if (is_otx2_lbkvf(pfvf->pdev)) + return 1; + return pfvf->linfo.link_up; +} + +static int otx2_get_ts_info(struct net_device *netdev, + struct ethtool_ts_info *info) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + + if (!pfvf->ptp) + return ethtool_op_get_ts_info(netdev, info); + + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->phc_index = otx2_ptp_clock_index(pfvf); + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + + return 0; +} + +static const struct ethtool_ops otx2_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES, + .get_link = otx2_get_link, + .get_drvinfo = otx2_get_drvinfo, + .get_strings = otx2_get_strings, + .get_ethtool_stats = otx2_get_ethtool_stats, + .get_sset_count = otx2_get_sset_count, + .set_channels = otx2_set_channels, + .get_channels = otx2_get_channels, + .get_ringparam = otx2_get_ringparam, + .set_ringparam = otx2_set_ringparam, + .get_coalesce = otx2_get_coalesce, + .set_coalesce = otx2_set_coalesce, + .get_rxnfc = otx2_get_rxnfc, + .set_rxnfc = otx2_set_rxnfc, + .get_rxfh_key_size = otx2_get_rxfh_key_size, + .get_rxfh_indir_size = otx2_get_rxfh_indir_size, + .get_rxfh = otx2_get_rxfh, + .set_rxfh = otx2_set_rxfh, + .get_msglevel = otx2_get_msglevel, + .set_msglevel = otx2_set_msglevel, + .get_pauseparam = otx2_get_pauseparam, + .set_pauseparam = otx2_set_pauseparam, + .get_ts_info = otx2_get_ts_info, +}; + +void otx2_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &otx2_ethtool_ops; +} + +/* VF's ethtool APIs */ +static void otx2vf_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct otx2_nic *vf = netdev_priv(netdev); + + strlcpy(info->driver, DRV_VF_NAME, sizeof(info->driver)); + strlcpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info)); +} + +static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data) +{ + struct otx2_nic *vf = netdev_priv(netdev); + int stats; + + if (sset != ETH_SS_STATS) + return; + + for (stats = 0; stats < otx2_n_dev_stats; stats++) { + memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + + for (stats = 0; stats < otx2_n_drv_stats; stats++) { + memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + + otx2_get_qset_strings(vf, &data, 0); + + strcpy(data, "reset_count"); + data += ETH_GSTRING_LEN; +} + +static void otx2vf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct otx2_nic *vf = netdev_priv(netdev); + int stat; + + otx2_get_dev_stats(vf); + for (stat = 0; stat < otx2_n_dev_stats; stat++) + *(data++) = ((u64 *)&vf->hw.dev_stats) + [otx2_dev_stats[stat].index]; + + for (stat = 0; stat < otx2_n_drv_stats; stat++) + *(data++) = atomic_read(&((atomic_t *)&vf->hw.drv_stats) + [otx2_drv_stats[stat].index]); + + otx2_get_qset_stats(vf, stats, &data); + *(data++) = vf->reset_count; +} + +static int otx2vf_get_sset_count(struct net_device *netdev, int sset) +{ + struct otx2_nic *vf = netdev_priv(netdev); + int qstats_count; + + if (sset != ETH_SS_STATS) + return -EINVAL; + + qstats_count = otx2_n_queue_stats * + (vf->hw.rx_queues + vf->hw.tx_queues); + + return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 1; +} + +static const struct ethtool_ops otx2vf_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES, + .get_link = otx2_get_link, + .get_drvinfo = otx2vf_get_drvinfo, + .get_strings = otx2vf_get_strings, + .get_ethtool_stats = otx2vf_get_ethtool_stats, + .get_sset_count = otx2vf_get_sset_count, + .set_channels = otx2_set_channels, + .get_channels = otx2_get_channels, + .get_rxnfc = otx2_get_rxnfc, + .set_rxnfc = otx2_set_rxnfc, + .get_rxfh_key_size = otx2_get_rxfh_key_size, + .get_rxfh_indir_size = otx2_get_rxfh_indir_size, + .get_rxfh = otx2_get_rxfh, + .set_rxfh = otx2_set_rxfh, + .get_ringparam = otx2_get_ringparam, + .set_ringparam = otx2_set_ringparam, + .get_coalesce = otx2_get_coalesce, + .set_coalesce = otx2_set_coalesce, + .get_msglevel = otx2_get_msglevel, + .set_msglevel = otx2_set_msglevel, + .get_pauseparam = otx2_get_pauseparam, + .set_pauseparam = otx2_set_pauseparam, +}; + +void otx2vf_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &otx2vf_ethtool_ops; +} +EXPORT_SYMBOL(otx2vf_set_ethtool_ops); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c new file mode 100644 index 000000000..d6f7a2a58 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -0,0 +1,2363 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Physcial Function ethernet driver + * + * Copyright (C) 2020 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/etherdevice.h> +#include <linux/of.h> +#include <linux/if_vlan.h> +#include <linux/iommu.h> +#include <net/ip.h> + +#include "otx2_reg.h" +#include "otx2_common.h" +#include "otx2_txrx.h" +#include "otx2_struct.h" +#include "otx2_ptp.h" +#include <rvu_trace.h> + +#define DRV_NAME "octeontx2-nicpf" +#define DRV_STRING "Marvell OcteonTX2 NIC Physical Function Driver" + +/* Supported devices */ +static const struct pci_device_id otx2_pf_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF) }, + { 0, } /* end of table */ +}; + +MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>"); +MODULE_DESCRIPTION(DRV_STRING); +MODULE_LICENSE("GPL v2"); +MODULE_DEVICE_TABLE(pci, otx2_pf_id_table); + +enum { + TYPE_PFAF, + TYPE_PFVF, +}; + +static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable); +static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable); + +static int otx2_change_mtu(struct net_device *netdev, int new_mtu) +{ + bool if_up = netif_running(netdev); + int err = 0; + + if (if_up) + otx2_stop(netdev); + + netdev_info(netdev, "Changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (if_up) + err = otx2_open(netdev); + + return err; +} + +static void otx2_disable_flr_me_intr(struct otx2_nic *pf) +{ + int irq, vfs = pf->total_vfs; + + /* Disable VFs ME interrupts */ + otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs)); + irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0); + free_irq(irq, pf); + + /* Disable VFs FLR interrupts */ + otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs)); + irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0); + free_irq(irq, pf); + + if (vfs <= 64) + return; + + otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); + irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME1); + free_irq(irq, pf); + + otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); + irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR1); + free_irq(irq, pf); +} + +static void otx2_flr_wq_destroy(struct otx2_nic *pf) +{ + if (!pf->flr_wq) + return; + destroy_workqueue(pf->flr_wq); + pf->flr_wq = NULL; + devm_kfree(pf->dev, pf->flr_wrk); +} + +static void otx2_flr_handler(struct work_struct *work) +{ + struct flr_work *flrwork = container_of(work, struct flr_work, work); + struct otx2_nic *pf = flrwork->pf; + struct mbox *mbox = &pf->mbox; + struct msg_req *req; + int vf, reg = 0; + + vf = flrwork - pf->flr_wrk; + + mutex_lock(&mbox->lock); + req = otx2_mbox_alloc_msg_vf_flr(mbox); + if (!req) { + mutex_unlock(&mbox->lock); + return; + } + req->hdr.pcifunc &= RVU_PFVF_FUNC_MASK; + req->hdr.pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK; + + if (!otx2_sync_mbox_msg(&pf->mbox)) { + if (vf >= 64) { + reg = 1; + vf = vf - 64; + } + /* clear transcation pending bit */ + otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf)); + otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf)); + } + + mutex_unlock(&mbox->lock); +} + +static irqreturn_t otx2_pf_flr_intr_handler(int irq, void *pf_irq) +{ + struct otx2_nic *pf = (struct otx2_nic *)pf_irq; + int reg, dev, vf, start_vf, num_reg = 1; + u64 intr; + + if (pf->total_vfs > 64) + num_reg = 2; + + for (reg = 0; reg < num_reg; reg++) { + intr = otx2_read64(pf, RVU_PF_VFFLR_INTX(reg)); + if (!intr) + continue; + start_vf = 64 * reg; + for (vf = 0; vf < 64; vf++) { + if (!(intr & BIT_ULL(vf))) + continue; + dev = vf + start_vf; + queue_work(pf->flr_wq, &pf->flr_wrk[dev].work); + /* Clear interrupt */ + otx2_write64(pf, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf)); + /* Disable the interrupt */ + otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(reg), + BIT_ULL(vf)); + } + } + return IRQ_HANDLED; +} + +static irqreturn_t otx2_pf_me_intr_handler(int irq, void *pf_irq) +{ + struct otx2_nic *pf = (struct otx2_nic *)pf_irq; + int vf, reg, num_reg = 1; + u64 intr; + + if (pf->total_vfs > 64) + num_reg = 2; + + for (reg = 0; reg < num_reg; reg++) { + intr = otx2_read64(pf, RVU_PF_VFME_INTX(reg)); + if (!intr) + continue; + for (vf = 0; vf < 64; vf++) { + if (!(intr & BIT_ULL(vf))) + continue; + /* clear trpend bit */ + otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf)); + /* clear interrupt */ + otx2_write64(pf, RVU_PF_VFME_INTX(reg), BIT_ULL(vf)); + } + } + return IRQ_HANDLED; +} + +static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs) +{ + struct otx2_hw *hw = &pf->hw; + char *irq_name; + int ret; + + /* Register ME interrupt handler*/ + irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE]; + snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc)); + ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0), + otx2_pf_me_intr_handler, 0, irq_name, pf); + if (ret) { + dev_err(pf->dev, + "RVUPF: IRQ registration failed for ME0\n"); + } + + /* Register FLR interrupt handler */ + irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE]; + snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc)); + ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0), + otx2_pf_flr_intr_handler, 0, irq_name, pf); + if (ret) { + dev_err(pf->dev, + "RVUPF: IRQ registration failed for FLR0\n"); + return ret; + } + + if (numvfs > 64) { + irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE]; + snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1", + rvu_get_pf(pf->pcifunc)); + ret = request_irq(pci_irq_vector + (pf->pdev, RVU_PF_INT_VEC_VFME1), + otx2_pf_me_intr_handler, 0, irq_name, pf); + if (ret) { + dev_err(pf->dev, + "RVUPF: IRQ registration failed for ME1\n"); + } + irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE]; + snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1", + rvu_get_pf(pf->pcifunc)); + ret = request_irq(pci_irq_vector + (pf->pdev, RVU_PF_INT_VEC_VFFLR1), + otx2_pf_flr_intr_handler, 0, irq_name, pf); + if (ret) { + dev_err(pf->dev, + "RVUPF: IRQ registration failed for FLR1\n"); + return ret; + } + } + + /* Enable ME interrupt for all VFs*/ + otx2_write64(pf, RVU_PF_VFME_INTX(0), INTR_MASK(numvfs)); + otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(numvfs)); + + /* Enable FLR interrupt for all VFs*/ + otx2_write64(pf, RVU_PF_VFFLR_INTX(0), INTR_MASK(numvfs)); + otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(numvfs)); + + if (numvfs > 64) { + numvfs -= 64; + + otx2_write64(pf, RVU_PF_VFME_INTX(1), INTR_MASK(numvfs)); + otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(1), + INTR_MASK(numvfs)); + + otx2_write64(pf, RVU_PF_VFFLR_INTX(1), INTR_MASK(numvfs)); + otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(1), + INTR_MASK(numvfs)); + } + return 0; +} + +static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs) +{ + int vf; + + pf->flr_wq = alloc_workqueue("otx2_pf_flr_wq", + WQ_UNBOUND | WQ_HIGHPRI, 1); + if (!pf->flr_wq) + return -ENOMEM; + + pf->flr_wrk = devm_kcalloc(pf->dev, num_vfs, + sizeof(struct flr_work), GFP_KERNEL); + if (!pf->flr_wrk) { + destroy_workqueue(pf->flr_wq); + return -ENOMEM; + } + + for (vf = 0; vf < num_vfs; vf++) { + pf->flr_wrk[vf].pf = pf; + INIT_WORK(&pf->flr_wrk[vf].work, otx2_flr_handler); + } + + return 0; +} + +static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq, + int first, int mdevs, u64 intr, int type) +{ + struct otx2_mbox_dev *mdev; + struct otx2_mbox *mbox; + struct mbox_hdr *hdr; + int i; + + for (i = first; i < mdevs; i++) { + /* start from 0 */ + if (!(intr & BIT_ULL(i - first))) + continue; + + mbox = &mw->mbox; + mdev = &mbox->dev[i]; + if (type == TYPE_PFAF) + otx2_sync_mbox_bbuf(mbox, i); + hdr = mdev->mbase + mbox->rx_start; + /* The hdr->num_msgs is set to zero immediately in the interrupt + * handler to ensure that it holds a correct value next time + * when the interrupt handler is called. + * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler + * pf>mbox.up_num_msgs holds the data for use in + * pfaf_mbox_up_handler. + */ + if (hdr->num_msgs) { + mw[i].num_msgs = hdr->num_msgs; + hdr->num_msgs = 0; + if (type == TYPE_PFAF) + memset(mbox->hwbase + mbox->rx_start, 0, + ALIGN(sizeof(struct mbox_hdr), + sizeof(u64))); + + queue_work(mbox_wq, &mw[i].mbox_wrk); + } + + mbox = &mw->mbox_up; + mdev = &mbox->dev[i]; + if (type == TYPE_PFAF) + otx2_sync_mbox_bbuf(mbox, i); + hdr = mdev->mbase + mbox->rx_start; + if (hdr->num_msgs) { + mw[i].up_num_msgs = hdr->num_msgs; + hdr->num_msgs = 0; + if (type == TYPE_PFAF) + memset(mbox->hwbase + mbox->rx_start, 0, + ALIGN(sizeof(struct mbox_hdr), + sizeof(u64))); + + queue_work(mbox_wq, &mw[i].mbox_up_wrk); + } + } +} + +static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev, + struct otx2_mbox *pfvf_mbox, void *bbuf_base, + int devid) +{ + struct otx2_mbox_dev *src_mdev = mdev; + int offset; + + /* Msgs are already copied, trigger VF's mbox irq */ + smp_wmb(); + + offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift); + writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset); + + /* Restore VF's mbox bounce buffer region address */ + src_mdev->mbase = bbuf_base; +} + +static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf, + struct otx2_mbox *src_mbox, + int dir, int vf, int num_msgs) +{ + struct otx2_mbox_dev *src_mdev, *dst_mdev; + struct mbox_hdr *mbox_hdr; + struct mbox_hdr *req_hdr; + struct mbox *dst_mbox; + int dst_size, err; + + if (dir == MBOX_DIR_PFAF) { + /* Set VF's mailbox memory as PF's bounce buffer memory, so + * that explicit copying of VF's msgs to PF=>AF mbox region + * and AF=>PF responses to VF's mbox region can be avoided. + */ + src_mdev = &src_mbox->dev[vf]; + mbox_hdr = src_mbox->hwbase + + src_mbox->rx_start + (vf * MBOX_SIZE); + + dst_mbox = &pf->mbox; + dst_size = dst_mbox->mbox.tx_size - + ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN); + /* Check if msgs fit into destination area and has valid size */ + if (mbox_hdr->msg_size > dst_size || !mbox_hdr->msg_size) + return -EINVAL; + + dst_mdev = &dst_mbox->mbox.dev[0]; + + mutex_lock(&pf->mbox.lock); + dst_mdev->mbase = src_mdev->mbase; + dst_mdev->msg_size = mbox_hdr->msg_size; + dst_mdev->num_msgs = num_msgs; + err = otx2_sync_mbox_msg(dst_mbox); + /* Error code -EIO indicate there is a communication failure + * to the AF. Rest of the error codes indicate that AF processed + * VF messages and set the error codes in response messages + * (if any) so simply forward responses to VF. + */ + if (err == -EIO) { + dev_warn(pf->dev, + "AF not responding to VF%d messages\n", vf); + /* restore PF mbase and exit */ + dst_mdev->mbase = pf->mbox.bbuf_base; + mutex_unlock(&pf->mbox.lock); + return err; + } + /* At this point, all the VF messages sent to AF are acked + * with proper responses and responses are copied to VF + * mailbox hence raise interrupt to VF. + */ + req_hdr = (struct mbox_hdr *)(dst_mdev->mbase + + dst_mbox->mbox.rx_start); + req_hdr->num_msgs = num_msgs; + + otx2_forward_msg_pfvf(dst_mdev, &pf->mbox_pfvf[0].mbox, + pf->mbox.bbuf_base, vf); + mutex_unlock(&pf->mbox.lock); + } else if (dir == MBOX_DIR_PFVF_UP) { + src_mdev = &src_mbox->dev[0]; + mbox_hdr = src_mbox->hwbase + src_mbox->rx_start; + req_hdr = (struct mbox_hdr *)(src_mdev->mbase + + src_mbox->rx_start); + req_hdr->num_msgs = num_msgs; + + dst_mbox = &pf->mbox_pfvf[0]; + dst_size = dst_mbox->mbox_up.tx_size - + ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN); + /* Check if msgs fit into destination area */ + if (mbox_hdr->msg_size > dst_size) + return -EINVAL; + + dst_mdev = &dst_mbox->mbox_up.dev[vf]; + dst_mdev->mbase = src_mdev->mbase; + dst_mdev->msg_size = mbox_hdr->msg_size; + dst_mdev->num_msgs = mbox_hdr->num_msgs; + err = otx2_sync_mbox_up_msg(dst_mbox, vf); + if (err) { + dev_warn(pf->dev, + "VF%d is not responding to mailbox\n", vf); + return err; + } + } else if (dir == MBOX_DIR_VFPF_UP) { + req_hdr = (struct mbox_hdr *)(src_mbox->dev[0].mbase + + src_mbox->rx_start); + req_hdr->num_msgs = num_msgs; + otx2_forward_msg_pfvf(&pf->mbox_pfvf->mbox_up.dev[vf], + &pf->mbox.mbox_up, + pf->mbox_pfvf[vf].bbuf_base, + 0); + } + + return 0; +} + +static void otx2_pfvf_mbox_handler(struct work_struct *work) +{ + struct mbox_msghdr *msg = NULL; + int offset, vf_idx, id, err; + struct otx2_mbox_dev *mdev; + struct mbox_hdr *req_hdr; + struct otx2_mbox *mbox; + struct mbox *vf_mbox; + struct otx2_nic *pf; + + vf_mbox = container_of(work, struct mbox, mbox_wrk); + pf = vf_mbox->pfvf; + vf_idx = vf_mbox - pf->mbox_pfvf; + + mbox = &pf->mbox_pfvf[0].mbox; + mdev = &mbox->dev[vf_idx]; + req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + + offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); + + for (id = 0; id < vf_mbox->num_msgs; id++) { + msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start + + offset); + + if (msg->sig != OTX2_MBOX_REQ_SIG) + goto inval_msg; + + /* Set VF's number in each of the msg */ + msg->pcifunc &= RVU_PFVF_FUNC_MASK; + msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK; + offset = msg->next_msgoff; + } + err = otx2_forward_vf_mbox_msgs(pf, mbox, MBOX_DIR_PFAF, vf_idx, + vf_mbox->num_msgs); + if (err) + goto inval_msg; + return; + +inval_msg: + otx2_reply_invalid_msg(mbox, vf_idx, 0, msg->id); + otx2_mbox_msg_send(mbox, vf_idx); +} + +static void otx2_pfvf_mbox_up_handler(struct work_struct *work) +{ + struct mbox *vf_mbox = container_of(work, struct mbox, mbox_up_wrk); + struct otx2_nic *pf = vf_mbox->pfvf; + struct otx2_mbox_dev *mdev; + int offset, id, vf_idx = 0; + struct mbox_hdr *rsp_hdr; + struct mbox_msghdr *msg; + struct otx2_mbox *mbox; + + vf_idx = vf_mbox - pf->mbox_pfvf; + mbox = &pf->mbox_pfvf[0].mbox_up; + mdev = &mbox->dev[vf_idx]; + + rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); + + for (id = 0; id < vf_mbox->up_num_msgs; id++) { + msg = mdev->mbase + offset; + + if (msg->id >= MBOX_MSG_MAX) { + dev_err(pf->dev, + "Mbox msg with unknown ID 0x%x\n", msg->id); + goto end; + } + + if (msg->sig != OTX2_MBOX_RSP_SIG) { + dev_err(pf->dev, + "Mbox msg with wrong signature %x, ID 0x%x\n", + msg->sig, msg->id); + goto end; + } + + switch (msg->id) { + case MBOX_MSG_CGX_LINK_EVENT: + break; + default: + if (msg->rc) + dev_err(pf->dev, + "Mbox msg response has err %d, ID 0x%x\n", + msg->rc, msg->id); + break; + } + +end: + offset = mbox->rx_start + msg->next_msgoff; + if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1)) + __otx2_mbox_reset(mbox, 0); + mdev->msgs_acked++; + } +} + +static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq) +{ + struct otx2_nic *pf = (struct otx2_nic *)(pf_irq); + int vfs = pf->total_vfs; + struct mbox *mbox; + u64 intr; + + mbox = pf->mbox_pfvf; + /* Handle VF interrupts */ + if (vfs > 64) { + intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1)); + otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr); + otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr, + TYPE_PFVF); + if (intr) + trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr); + vfs = 64; + } + + intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0)); + otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr); + + otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF); + + if (intr) + trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr); + + return IRQ_HANDLED; +} + +static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs) +{ + void __iomem *hwbase; + struct mbox *mbox; + int err, vf; + u64 base; + + if (!numvfs) + return -EINVAL; + + pf->mbox_pfvf = devm_kcalloc(&pf->pdev->dev, numvfs, + sizeof(struct mbox), GFP_KERNEL); + if (!pf->mbox_pfvf) + return -ENOMEM; + + pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox", + WQ_UNBOUND | WQ_HIGHPRI | + WQ_MEM_RECLAIM, 1); + if (!pf->mbox_pfvf_wq) + return -ENOMEM; + + base = readq((void __iomem *)((u64)pf->reg_base + RVU_PF_VF_BAR4_ADDR)); + hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs); + + if (!hwbase) { + err = -ENOMEM; + goto free_wq; + } + + mbox = &pf->mbox_pfvf[0]; + err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base, + MBOX_DIR_PFVF, numvfs); + if (err) + goto free_iomem; + + err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base, + MBOX_DIR_PFVF_UP, numvfs); + if (err) + goto free_iomem; + + for (vf = 0; vf < numvfs; vf++) { + mbox->pfvf = pf; + INIT_WORK(&mbox->mbox_wrk, otx2_pfvf_mbox_handler); + INIT_WORK(&mbox->mbox_up_wrk, otx2_pfvf_mbox_up_handler); + mbox++; + } + + return 0; + +free_iomem: + if (hwbase) + iounmap(hwbase); +free_wq: + destroy_workqueue(pf->mbox_pfvf_wq); + return err; +} + +static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf) +{ + struct mbox *mbox = &pf->mbox_pfvf[0]; + + if (!mbox) + return; + + if (pf->mbox_pfvf_wq) { + destroy_workqueue(pf->mbox_pfvf_wq); + pf->mbox_pfvf_wq = NULL; + } + + if (mbox->mbox.hwbase) + iounmap(mbox->mbox.hwbase); + + otx2_mbox_destroy(&mbox->mbox); +} + +static void otx2_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) +{ + /* Clear PF <=> VF mailbox IRQ */ + otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull); + otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull); + + /* Enable PF <=> VF mailbox IRQ */ + otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(numvfs)); + if (numvfs > 64) { + numvfs -= 64; + otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1), + INTR_MASK(numvfs)); + } +} + +static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) +{ + int vector; + + /* Disable PF <=> VF mailbox IRQ */ + otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull); + otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull); + + otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull); + vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0); + free_irq(vector, pf); + + if (numvfs > 64) { + otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull); + vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1); + free_irq(vector, pf); + } +} + +static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) +{ + struct otx2_hw *hw = &pf->hw; + char *irq_name; + int err; + + /* Register MBOX0 interrupt handler */ + irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE]; + if (pf->pcifunc) + snprintf(irq_name, NAME_SIZE, + "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc)); + else + snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0"); + err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0), + otx2_pfvf_mbox_intr_handler, 0, irq_name, pf); + if (err) { + dev_err(pf->dev, + "RVUPF: IRQ registration failed for PFVF mbox0 irq\n"); + return err; + } + + if (numvfs > 64) { + /* Register MBOX1 interrupt handler */ + irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE]; + if (pf->pcifunc) + snprintf(irq_name, NAME_SIZE, + "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc)); + else + snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1"); + err = request_irq(pci_irq_vector(pf->pdev, + RVU_PF_INT_VEC_VFPF_MBOX1), + otx2_pfvf_mbox_intr_handler, + 0, irq_name, pf); + if (err) { + dev_err(pf->dev, + "RVUPF: IRQ registration failed for PFVF mbox1 irq\n"); + return err; + } + } + + otx2_enable_pfvf_mbox_intr(pf, numvfs); + + return 0; +} + +static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf, + struct mbox_msghdr *msg) +{ + int devid; + + if (msg->id >= MBOX_MSG_MAX) { + dev_err(pf->dev, + "Mbox msg with unknown ID 0x%x\n", msg->id); + return; + } + + if (msg->sig != OTX2_MBOX_RSP_SIG) { + dev_err(pf->dev, + "Mbox msg with wrong signature %x, ID 0x%x\n", + msg->sig, msg->id); + return; + } + + /* message response heading VF */ + devid = msg->pcifunc & RVU_PFVF_FUNC_MASK; + if (devid) { + struct otx2_vf_config *config = &pf->vf_configs[devid - 1]; + struct delayed_work *dwork; + + switch (msg->id) { + case MBOX_MSG_NIX_LF_START_RX: + config->intf_down = false; + dwork = &config->link_event_work; + schedule_delayed_work(dwork, msecs_to_jiffies(100)); + break; + case MBOX_MSG_NIX_LF_STOP_RX: + config->intf_down = true; + break; + } + + return; + } + + switch (msg->id) { + case MBOX_MSG_READY: + pf->pcifunc = msg->pcifunc; + break; + case MBOX_MSG_MSIX_OFFSET: + mbox_handler_msix_offset(pf, (struct msix_offset_rsp *)msg); + break; + case MBOX_MSG_NPA_LF_ALLOC: + mbox_handler_npa_lf_alloc(pf, (struct npa_lf_alloc_rsp *)msg); + break; + case MBOX_MSG_NIX_LF_ALLOC: + mbox_handler_nix_lf_alloc(pf, (struct nix_lf_alloc_rsp *)msg); + break; + case MBOX_MSG_NIX_TXSCH_ALLOC: + mbox_handler_nix_txsch_alloc(pf, + (struct nix_txsch_alloc_rsp *)msg); + break; + case MBOX_MSG_NIX_BP_ENABLE: + mbox_handler_nix_bp_enable(pf, (struct nix_bp_cfg_rsp *)msg); + break; + case MBOX_MSG_CGX_STATS: + mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg); + break; + default: + if (msg->rc) + dev_err(pf->dev, + "Mbox msg response has err %d, ID 0x%x\n", + msg->rc, msg->id); + break; + } +} + +static void otx2_pfaf_mbox_handler(struct work_struct *work) +{ + struct otx2_mbox_dev *mdev; + struct mbox_hdr *rsp_hdr; + struct mbox_msghdr *msg; + struct otx2_mbox *mbox; + struct mbox *af_mbox; + struct otx2_nic *pf; + int offset, id; + + af_mbox = container_of(work, struct mbox, mbox_wrk); + mbox = &af_mbox->mbox; + mdev = &mbox->dev[0]; + rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + + offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); + pf = af_mbox->pfvf; + + for (id = 0; id < af_mbox->num_msgs; id++) { + msg = (struct mbox_msghdr *)(mdev->mbase + offset); + otx2_process_pfaf_mbox_msg(pf, msg); + offset = mbox->rx_start + msg->next_msgoff; + if (mdev->msgs_acked == (af_mbox->num_msgs - 1)) + __otx2_mbox_reset(mbox, 0); + mdev->msgs_acked++; + } + +} + +static void otx2_handle_link_event(struct otx2_nic *pf) +{ + struct cgx_link_user_info *linfo = &pf->linfo; + struct net_device *netdev = pf->netdev; + + pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name, + linfo->link_up ? "UP" : "DOWN", linfo->speed, + linfo->full_duplex ? "Full" : "Half"); + if (linfo->link_up) { + netif_carrier_on(netdev); + netif_tx_start_all_queues(netdev); + } else { + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + } +} + +int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf, + struct cgx_link_info_msg *msg, + struct msg_rsp *rsp) +{ + int i; + + /* Copy the link info sent by AF */ + pf->linfo = msg->link_info; + + /* notify VFs about link event */ + for (i = 0; i < pci_num_vf(pf->pdev); i++) { + struct otx2_vf_config *config = &pf->vf_configs[i]; + struct delayed_work *dwork = &config->link_event_work; + + if (config->intf_down) + continue; + + schedule_delayed_work(dwork, msecs_to_jiffies(100)); + } + + /* interface has not been fully configured yet */ + if (pf->flags & OTX2_FLAG_INTF_DOWN) + return 0; + + otx2_handle_link_event(pf); + return 0; +} + +static int otx2_process_mbox_msg_up(struct otx2_nic *pf, + struct mbox_msghdr *req) +{ + /* Check if valid, if not reply with a invalid msg */ + if (req->sig != OTX2_MBOX_REQ_SIG) { + otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id); + return -ENODEV; + } + + switch (req->id) { +#define M(_name, _id, _fn_name, _req_type, _rsp_type) \ + case _id: { \ + struct _rsp_type *rsp; \ + int err; \ + \ + rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \ + &pf->mbox.mbox_up, 0, \ + sizeof(struct _rsp_type)); \ + if (!rsp) \ + return -ENOMEM; \ + \ + rsp->hdr.id = _id; \ + rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \ + rsp->hdr.pcifunc = 0; \ + rsp->hdr.rc = 0; \ + \ + err = otx2_mbox_up_handler_ ## _fn_name( \ + pf, (struct _req_type *)req, rsp); \ + return err; \ + } +MBOX_UP_CGX_MESSAGES +#undef M + break; + default: + otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id); + return -ENODEV; + } + return 0; +} + +static void otx2_pfaf_mbox_up_handler(struct work_struct *work) +{ + struct mbox *af_mbox = container_of(work, struct mbox, mbox_up_wrk); + struct otx2_mbox *mbox = &af_mbox->mbox_up; + struct otx2_mbox_dev *mdev = &mbox->dev[0]; + struct otx2_nic *pf = af_mbox->pfvf; + int offset, id, devid = 0; + struct mbox_hdr *rsp_hdr; + struct mbox_msghdr *msg; + + rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + + offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); + + for (id = 0; id < af_mbox->up_num_msgs; id++) { + msg = (struct mbox_msghdr *)(mdev->mbase + offset); + + devid = msg->pcifunc & RVU_PFVF_FUNC_MASK; + /* Skip processing VF's messages */ + if (!devid) + otx2_process_mbox_msg_up(pf, msg); + offset = mbox->rx_start + msg->next_msgoff; + } + if (devid) { + otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up, + MBOX_DIR_PFVF_UP, devid - 1, + af_mbox->up_num_msgs); + return; + } + + otx2_mbox_msg_send(mbox, 0); +} + +static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq) +{ + struct otx2_nic *pf = (struct otx2_nic *)pf_irq; + struct mbox *mbox; + + /* Clear the IRQ */ + otx2_write64(pf, RVU_PF_INT, BIT_ULL(0)); + + mbox = &pf->mbox; + + trace_otx2_msg_interrupt(mbox->mbox.pdev, "AF to PF", BIT_ULL(0)); + + otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF); + + return IRQ_HANDLED; +} + +static void otx2_disable_mbox_intr(struct otx2_nic *pf) +{ + int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX); + + /* Disable AF => PF mailbox IRQ */ + otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0)); + free_irq(vector, pf); +} + +static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af) +{ + struct otx2_hw *hw = &pf->hw; + struct msg_req *req; + char *irq_name; + int err; + + /* Register mailbox interrupt handler */ + irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE]; + snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox"); + err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX), + otx2_pfaf_mbox_intr_handler, 0, irq_name, pf); + if (err) { + dev_err(pf->dev, + "RVUPF: IRQ registration failed for PFAF mbox irq\n"); + return err; + } + + /* Enable mailbox interrupt for msgs coming from AF. + * First clear to avoid spurious interrupts, if any. + */ + otx2_write64(pf, RVU_PF_INT, BIT_ULL(0)); + otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0)); + + if (!probe_af) + return 0; + + /* Check mailbox communication with AF */ + req = otx2_mbox_alloc_msg_ready(&pf->mbox); + if (!req) { + otx2_disable_mbox_intr(pf); + return -ENOMEM; + } + err = otx2_sync_mbox_msg(&pf->mbox); + if (err) { + dev_warn(pf->dev, + "AF not responding to mailbox, deferring probe\n"); + otx2_disable_mbox_intr(pf); + return -EPROBE_DEFER; + } + + return 0; +} + +static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf) +{ + struct mbox *mbox = &pf->mbox; + + if (pf->mbox_wq) { + destroy_workqueue(pf->mbox_wq); + pf->mbox_wq = NULL; + } + + if (mbox->mbox.hwbase) + iounmap((void __iomem *)mbox->mbox.hwbase); + + otx2_mbox_destroy(&mbox->mbox); + otx2_mbox_destroy(&mbox->mbox_up); +} + +static int otx2_pfaf_mbox_init(struct otx2_nic *pf) +{ + struct mbox *mbox = &pf->mbox; + void __iomem *hwbase; + int err; + + mbox->pfvf = pf; + pf->mbox_wq = alloc_workqueue("otx2_pfaf_mailbox", + WQ_UNBOUND | WQ_HIGHPRI | + WQ_MEM_RECLAIM, 1); + if (!pf->mbox_wq) + return -ENOMEM; + + /* Mailbox is a reserved memory (in RAM) region shared between + * admin function (i.e AF) and this PF, shouldn't be mapped as + * device memory to allow unaligned accesses. + */ + hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM), + pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM)); + if (!hwbase) { + dev_err(pf->dev, "Unable to map PFAF mailbox region\n"); + err = -ENOMEM; + goto exit; + } + + err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base, + MBOX_DIR_PFAF, 1); + if (err) + goto exit; + + err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base, + MBOX_DIR_PFAF_UP, 1); + if (err) + goto exit; + + err = otx2_mbox_bbuf_init(mbox, pf->pdev); + if (err) + goto exit; + + INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler); + INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler); + mutex_init(&mbox->lock); + + return 0; +exit: + otx2_pfaf_mbox_destroy(pf); + return err; +} + +static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable) +{ + struct msg_req *msg; + int err; + + mutex_lock(&pf->mbox.lock); + if (enable) + msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox); + else + msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox); + + if (!msg) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&pf->mbox); + mutex_unlock(&pf->mbox.lock); + return err; +} + +static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable) +{ + struct msg_req *msg; + int err; + + mutex_lock(&pf->mbox.lock); + if (enable) + msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox); + else + msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox); + + if (!msg) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&pf->mbox); + mutex_unlock(&pf->mbox.lock); + return err; +} + +int otx2_set_real_num_queues(struct net_device *netdev, + int tx_queues, int rx_queues) +{ + int err; + + err = netif_set_real_num_tx_queues(netdev, tx_queues); + if (err) { + netdev_err(netdev, + "Failed to set no of Tx queues: %d\n", tx_queues); + return err; + } + + err = netif_set_real_num_rx_queues(netdev, rx_queues); + if (err) + netdev_err(netdev, + "Failed to set no of Rx queues: %d\n", rx_queues); + return err; +} +EXPORT_SYMBOL(otx2_set_real_num_queues); + +static irqreturn_t otx2_q_intr_handler(int irq, void *data) +{ + struct otx2_nic *pf = data; + u64 val, *ptr; + u64 qidx = 0; + + /* CQ */ + for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) { + ptr = otx2_get_regaddr(pf, NIX_LF_CQ_OP_INT); + val = otx2_atomic64_add((qidx << 44), ptr); + + otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) | + (val & NIX_CQERRINT_BITS)); + if (!(val & (NIX_CQERRINT_BITS | BIT_ULL(42)))) + continue; + + if (val & BIT_ULL(42)) { + netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n", + qidx, otx2_read64(pf, NIX_LF_ERR_INT)); + } else { + if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR)) + netdev_err(pf->netdev, "CQ%lld: Doorbell error", + qidx); + if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT)) + netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM", + qidx); + } + + schedule_work(&pf->reset_task); + } + + /* SQ */ + for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) { + ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT); + val = otx2_atomic64_add((qidx << 44), ptr); + otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) | + (val & NIX_SQINT_BITS)); + + if (!(val & (NIX_SQINT_BITS | BIT_ULL(42)))) + continue; + + if (val & BIT_ULL(42)) { + netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n", + qidx, otx2_read64(pf, NIX_LF_ERR_INT)); + } else { + if (val & BIT_ULL(NIX_SQINT_LMT_ERR)) { + netdev_err(pf->netdev, "SQ%lld: LMT store error NIX_LF_SQ_OP_ERR_DBG:0x%llx", + qidx, + otx2_read64(pf, + NIX_LF_SQ_OP_ERR_DBG)); + otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, + BIT_ULL(44)); + } + if (val & BIT_ULL(NIX_SQINT_MNQ_ERR)) { + netdev_err(pf->netdev, "SQ%lld: Meta-descriptor enqueue error NIX_LF_MNQ_ERR_DGB:0x%llx\n", + qidx, + otx2_read64(pf, NIX_LF_MNQ_ERR_DBG)); + otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, + BIT_ULL(44)); + } + if (val & BIT_ULL(NIX_SQINT_SEND_ERR)) { + netdev_err(pf->netdev, "SQ%lld: Send error, NIX_LF_SEND_ERR_DBG 0x%llx", + qidx, + otx2_read64(pf, + NIX_LF_SEND_ERR_DBG)); + otx2_write64(pf, NIX_LF_SEND_ERR_DBG, + BIT_ULL(44)); + } + if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL)) + netdev_err(pf->netdev, "SQ%lld: SQB allocation failed", + qidx); + } + + schedule_work(&pf->reset_task); + } + + return IRQ_HANDLED; +} + +static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq) +{ + struct otx2_cq_poll *cq_poll = (struct otx2_cq_poll *)cq_irq; + struct otx2_nic *pf = (struct otx2_nic *)cq_poll->dev; + int qidx = cq_poll->cint_idx; + + /* Disable interrupts. + * + * Completion interrupts behave in a level-triggered interrupt + * fashion, and hence have to be cleared only after it is serviced. + */ + otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0)); + + /* Schedule NAPI */ + napi_schedule_irqoff(&cq_poll->napi); + + return IRQ_HANDLED; +} + +static void otx2_disable_napi(struct otx2_nic *pf) +{ + struct otx2_qset *qset = &pf->qset; + struct otx2_cq_poll *cq_poll; + int qidx; + + for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { + cq_poll = &qset->napi[qidx]; + napi_disable(&cq_poll->napi); + netif_napi_del(&cq_poll->napi); + } +} + +static void otx2_free_cq_res(struct otx2_nic *pf) +{ + struct otx2_qset *qset = &pf->qset; + struct otx2_cq_queue *cq; + int qidx; + + /* Disable CQs */ + otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_CQ, false); + for (qidx = 0; qidx < qset->cq_cnt; qidx++) { + cq = &qset->cq[qidx]; + qmem_free(pf->dev, cq->cqe); + } +} + +static void otx2_free_sq_res(struct otx2_nic *pf) +{ + struct otx2_qset *qset = &pf->qset; + struct otx2_snd_queue *sq; + int qidx; + + /* Disable SQs */ + otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false); + /* Free SQB pointers */ + otx2_sq_free_sqbs(pf); + for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) { + sq = &qset->sq[qidx]; + qmem_free(pf->dev, sq->sqe); + qmem_free(pf->dev, sq->tso_hdrs); + kfree(sq->sg); + kfree(sq->sqb_ptrs); + } +} + +static int otx2_init_hw_resources(struct otx2_nic *pf) +{ + struct mbox *mbox = &pf->mbox; + struct otx2_hw *hw = &pf->hw; + struct msg_req *req; + int err = 0, lvl; + + /* Set required NPA LF's pool counts + * Auras and Pools are used in a 1:1 mapping, + * so, aura count = pool count. + */ + hw->rqpool_cnt = hw->rx_queues; + hw->sqpool_cnt = hw->tx_queues; + hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt; + + /* Get the size of receive buffers to allocate */ + pf->rbsize = RCV_FRAG_LEN(OTX2_HW_TIMESTAMP_LEN + pf->netdev->mtu + + OTX2_ETH_HLEN); + + mutex_lock(&mbox->lock); + /* NPA init */ + err = otx2_config_npa(pf); + if (err) + goto exit; + + /* NIX init */ + err = otx2_config_nix(pf); + if (err) + goto err_free_npa_lf; + + /* Enable backpressure for CGX mapped PF/VFs */ + if (!is_otx2_lbkvf(pf->pdev)) + otx2_nix_config_bp(pf, true); + + /* Init Auras and pools used by NIX RQ, for free buffer ptrs */ + err = otx2_rq_aura_pool_init(pf); + if (err) { + mutex_unlock(&mbox->lock); + goto err_free_nix_lf; + } + /* Init Auras and pools used by NIX SQ, for queueing SQEs */ + err = otx2_sq_aura_pool_init(pf); + if (err) { + mutex_unlock(&mbox->lock); + goto err_free_rq_ptrs; + } + + err = otx2_txsch_alloc(pf); + if (err) { + mutex_unlock(&mbox->lock); + goto err_free_sq_ptrs; + } + + err = otx2_config_nix_queues(pf); + if (err) { + mutex_unlock(&mbox->lock); + goto err_free_txsch; + } + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + err = otx2_txschq_config(pf, lvl); + if (err) { + mutex_unlock(&mbox->lock); + goto err_free_nix_queues; + } + } + mutex_unlock(&mbox->lock); + return err; + +err_free_nix_queues: + otx2_free_sq_res(pf); + otx2_free_cq_res(pf); + otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false); +err_free_txsch: + if (otx2_txschq_stop(pf)) + dev_err(pf->dev, "%s failed to stop TX schedulers\n", __func__); +err_free_sq_ptrs: + otx2_sq_free_sqbs(pf); +err_free_rq_ptrs: + otx2_free_aura_ptr(pf, AURA_NIX_RQ); + otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true); + otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true); + otx2_aura_pool_free(pf); +err_free_nix_lf: + mutex_lock(&mbox->lock); + req = otx2_mbox_alloc_msg_nix_lf_free(mbox); + if (req) { + if (otx2_sync_mbox_msg(mbox)) + dev_err(pf->dev, "%s failed to free nixlf\n", __func__); + } +err_free_npa_lf: + /* Reset NPA LF */ + req = otx2_mbox_alloc_msg_npa_lf_free(mbox); + if (req) { + if (otx2_sync_mbox_msg(mbox)) + dev_err(pf->dev, "%s failed to free npalf\n", __func__); + } +exit: + mutex_unlock(&mbox->lock); + return err; +} + +static void otx2_free_hw_resources(struct otx2_nic *pf) +{ + struct otx2_qset *qset = &pf->qset; + struct mbox *mbox = &pf->mbox; + struct otx2_cq_queue *cq; + struct msg_req *req; + int qidx, err; + + /* Ensure all SQE are processed */ + otx2_sqb_flush(pf); + + /* Stop transmission */ + err = otx2_txschq_stop(pf); + if (err) + dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n"); + + mutex_lock(&mbox->lock); + /* Disable backpressure */ + if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK)) + otx2_nix_config_bp(pf, false); + mutex_unlock(&mbox->lock); + + /* Disable RQs */ + otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false); + + /*Dequeue all CQEs */ + for (qidx = 0; qidx < qset->cq_cnt; qidx++) { + cq = &qset->cq[qidx]; + if (cq->cq_type == CQ_RX) + otx2_cleanup_rx_cqes(pf, cq); + else + otx2_cleanup_tx_cqes(pf, cq); + } + + otx2_free_sq_res(pf); + + /* Free RQ buffer pointers*/ + otx2_free_aura_ptr(pf, AURA_NIX_RQ); + + otx2_free_cq_res(pf); + + mutex_lock(&mbox->lock); + /* Reset NIX LF */ + req = otx2_mbox_alloc_msg_nix_lf_free(mbox); + if (req) { + if (otx2_sync_mbox_msg(mbox)) + dev_err(pf->dev, "%s failed to free nixlf\n", __func__); + } + mutex_unlock(&mbox->lock); + + /* Disable NPA Pool and Aura hw context */ + otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true); + otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true); + otx2_aura_pool_free(pf); + + mutex_lock(&mbox->lock); + /* Reset NPA LF */ + req = otx2_mbox_alloc_msg_npa_lf_free(mbox); + if (req) { + if (otx2_sync_mbox_msg(mbox)) + dev_err(pf->dev, "%s failed to free npalf\n", __func__); + } + mutex_unlock(&mbox->lock); +} + +int otx2_open(struct net_device *netdev) +{ + struct otx2_nic *pf = netdev_priv(netdev); + struct otx2_cq_poll *cq_poll = NULL; + struct otx2_qset *qset = &pf->qset; + int err = 0, qidx, vec; + char *irq_name; + + netif_carrier_off(netdev); + + pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tx_queues; + /* RQ and SQs are mapped to different CQs, + * so find out max CQ IRQs (i.e CINTs) needed. + */ + pf->hw.cint_cnt = max(pf->hw.rx_queues, pf->hw.tx_queues); + qset->napi = kcalloc(pf->hw.cint_cnt, sizeof(*cq_poll), GFP_KERNEL); + if (!qset->napi) + return -ENOMEM; + + /* CQ size of RQ */ + qset->rqe_cnt = qset->rqe_cnt ? qset->rqe_cnt : Q_COUNT(Q_SIZE_256); + /* CQ size of SQ */ + qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K); + + err = -ENOMEM; + qset->cq = kcalloc(pf->qset.cq_cnt, + sizeof(struct otx2_cq_queue), GFP_KERNEL); + if (!qset->cq) + goto err_free_mem; + + qset->sq = kcalloc(pf->hw.tx_queues, + sizeof(struct otx2_snd_queue), GFP_KERNEL); + if (!qset->sq) + goto err_free_mem; + + qset->rq = kcalloc(pf->hw.rx_queues, + sizeof(struct otx2_rcv_queue), GFP_KERNEL); + if (!qset->rq) + goto err_free_mem; + + err = otx2_init_hw_resources(pf); + if (err) + goto err_free_mem; + + /* Register NAPI handler */ + for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { + cq_poll = &qset->napi[qidx]; + cq_poll->cint_idx = qidx; + /* RQ0 & SQ0 are mapped to CINT0 and so on.. + * 'cq_ids[0]' points to RQ's CQ and + * 'cq_ids[1]' points to SQ's CQ and + */ + cq_poll->cq_ids[CQ_RX] = + (qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ; + cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ? + qidx + pf->hw.rx_queues : CINT_INVALID_CQ; + cq_poll->dev = (void *)pf; + netif_napi_add(netdev, &cq_poll->napi, + otx2_napi_handler, NAPI_POLL_WEIGHT); + napi_enable(&cq_poll->napi); + } + + /* Set maximum frame size allowed in HW */ + err = otx2_hw_set_mtu(pf, netdev->mtu); + if (err) + goto err_disable_napi; + + /* Setup segmentation algorithms, if failed, clear offload capability */ + otx2_setup_segmentation(pf); + + /* Initialize RSS */ + err = otx2_rss_init(pf); + if (err) + goto err_disable_napi; + + /* Register Queue IRQ handlers */ + vec = pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START; + irq_name = &pf->hw.irq_name[vec * NAME_SIZE]; + + snprintf(irq_name, NAME_SIZE, "%s-qerr", pf->netdev->name); + + err = request_irq(pci_irq_vector(pf->pdev, vec), + otx2_q_intr_handler, 0, irq_name, pf); + if (err) { + dev_err(pf->dev, + "RVUPF%d: IRQ registration failed for QERR\n", + rvu_get_pf(pf->pcifunc)); + goto err_disable_napi; + } + + /* Enable QINT IRQ */ + otx2_write64(pf, NIX_LF_QINTX_ENA_W1S(0), BIT_ULL(0)); + + /* Register CQ IRQ handlers */ + vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START; + for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { + irq_name = &pf->hw.irq_name[vec * NAME_SIZE]; + + snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", pf->netdev->name, + qidx); + + err = request_irq(pci_irq_vector(pf->pdev, vec), + otx2_cq_intr_handler, 0, irq_name, + &qset->napi[qidx]); + if (err) { + dev_err(pf->dev, + "RVUPF%d: IRQ registration failed for CQ%d\n", + rvu_get_pf(pf->pcifunc), qidx); + goto err_free_cints; + } + vec++; + + otx2_config_irq_coalescing(pf, qidx); + + /* Enable CQ IRQ */ + otx2_write64(pf, NIX_LF_CINTX_INT(qidx), BIT_ULL(0)); + otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0)); + } + + otx2_set_cints_affinity(pf); + + /* When reinitializing enable time stamping if it is enabled before */ + if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) { + pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED; + otx2_config_hw_tx_tstamp(pf, true); + } + if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) { + pf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED; + otx2_config_hw_rx_tstamp(pf, true); + } + + pf->flags &= ~OTX2_FLAG_INTF_DOWN; + /* 'intf_down' may be checked on any cpu */ + smp_wmb(); + + /* we have already received link status notification */ + if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK)) + otx2_handle_link_event(pf); + + /* Restore pause frame settings */ + otx2_config_pause_frm(pf); + + err = otx2_rxtx_enable(pf, true); + /* If a mbox communication error happens at this point then interface + * will end up in a state such that it is in down state but hardware + * mcam entries are enabled to receive the packets. Hence disable the + * packet I/O. + */ + if (err == EIO) + goto err_disable_rxtx; + else if (err) + goto err_tx_stop_queues; + + return 0; + +err_disable_rxtx: + otx2_rxtx_enable(pf, false); +err_tx_stop_queues: + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + pf->flags |= OTX2_FLAG_INTF_DOWN; +err_free_cints: + otx2_free_cints(pf, qidx); + vec = pci_irq_vector(pf->pdev, + pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START); + otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0)); + synchronize_irq(vec); + free_irq(vec, pf); +err_disable_napi: + otx2_disable_napi(pf); + otx2_free_hw_resources(pf); +err_free_mem: + kfree(qset->sq); + kfree(qset->cq); + kfree(qset->rq); + kfree(qset->napi); + return err; +} +EXPORT_SYMBOL(otx2_open); + +int otx2_stop(struct net_device *netdev) +{ + struct otx2_nic *pf = netdev_priv(netdev); + struct otx2_cq_poll *cq_poll = NULL; + struct otx2_qset *qset = &pf->qset; + struct otx2_rss_info *rss; + int qidx, vec, wrk; + + /* If the DOWN flag is set resources are already freed */ + if (pf->flags & OTX2_FLAG_INTF_DOWN) + return 0; + + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + pf->flags |= OTX2_FLAG_INTF_DOWN; + /* 'intf_down' may be checked on any cpu */ + smp_wmb(); + + /* First stop packet Rx/Tx */ + otx2_rxtx_enable(pf, false); + + /* Clear RSS enable flag */ + rss = &pf->hw.rss_info; + rss->enable = false; + + /* Cleanup Queue IRQ */ + vec = pci_irq_vector(pf->pdev, + pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START); + otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0)); + synchronize_irq(vec); + free_irq(vec, pf); + + /* Cleanup CQ NAPI and IRQ */ + vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START; + for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { + /* Disable interrupt */ + otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0)); + + synchronize_irq(pci_irq_vector(pf->pdev, vec)); + + cq_poll = &qset->napi[qidx]; + napi_synchronize(&cq_poll->napi); + vec++; + } + + netif_tx_disable(netdev); + + otx2_free_hw_resources(pf); + otx2_free_cints(pf, pf->hw.cint_cnt); + otx2_disable_napi(pf); + + for (qidx = 0; qidx < netdev->num_tx_queues; qidx++) + netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx)); + + for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++) + cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work); + devm_kfree(pf->dev, pf->refill_wrk); + + kfree(qset->sq); + kfree(qset->cq); + kfree(qset->rq); + kfree(qset->napi); + /* Do not clear RQ/SQ ringsize settings */ + memset((void *)qset + offsetof(struct otx2_qset, sqe_cnt), 0, + sizeof(*qset) - offsetof(struct otx2_qset, sqe_cnt)); + return 0; +} +EXPORT_SYMBOL(otx2_stop); + +static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct otx2_nic *pf = netdev_priv(netdev); + int qidx = skb_get_queue_mapping(skb); + struct otx2_snd_queue *sq; + struct netdev_queue *txq; + + /* Check for minimum and maximum packet length */ + if (skb->len <= ETH_HLEN || + (!skb_shinfo(skb)->gso_size && skb->len > pf->max_frs)) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + sq = &pf->qset.sq[qidx]; + txq = netdev_get_tx_queue(netdev, qidx); + + if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) { + netif_tx_stop_queue(txq); + + /* Check again, incase SQBs got freed up */ + smp_mb(); + if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb) + > sq->sqe_thresh) + netif_tx_wake_queue(txq); + + return NETDEV_TX_BUSY; + } + + return NETDEV_TX_OK; +} + +static void otx2_set_rx_mode(struct net_device *netdev) +{ + struct otx2_nic *pf = netdev_priv(netdev); + + queue_work(pf->otx2_wq, &pf->rx_mode_work); +} + +static void otx2_do_set_rx_mode(struct work_struct *work) +{ + struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work); + struct net_device *netdev = pf->netdev; + struct nix_rx_mode *req; + + if (!(netdev->flags & IFF_UP)) + return; + + mutex_lock(&pf->mbox.lock); + req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox); + if (!req) { + mutex_unlock(&pf->mbox.lock); + return; + } + + req->mode = NIX_RX_MODE_UCAST; + + /* We don't support MAC address filtering yet */ + if (netdev->flags & IFF_PROMISC) + req->mode |= NIX_RX_MODE_PROMISC; + else if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST)) + req->mode |= NIX_RX_MODE_ALLMULTI; + + otx2_sync_mbox_msg(&pf->mbox); + mutex_unlock(&pf->mbox.lock); +} + +static int otx2_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = features ^ netdev->features; + struct otx2_nic *pf = netdev_priv(netdev); + + if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev)) + return otx2_cgx_config_loopback(pf, + features & NETIF_F_LOOPBACK); + return 0; +} + +static void otx2_reset_task(struct work_struct *work) +{ + struct otx2_nic *pf = container_of(work, struct otx2_nic, reset_task); + + if (!netif_running(pf->netdev)) + return; + + rtnl_lock(); + otx2_stop(pf->netdev); + pf->reset_count++; + otx2_open(pf->netdev); + netif_trans_update(pf->netdev); + rtnl_unlock(); +} + +static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable) +{ + struct msg_req *req; + int err; + + if (pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED && enable) + return 0; + + mutex_lock(&pfvf->mbox.lock); + if (enable) + req = otx2_mbox_alloc_msg_cgx_ptp_rx_enable(&pfvf->mbox); + else + req = otx2_mbox_alloc_msg_cgx_ptp_rx_disable(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) { + mutex_unlock(&pfvf->mbox.lock); + return err; + } + + mutex_unlock(&pfvf->mbox.lock); + if (enable) + pfvf->flags |= OTX2_FLAG_RX_TSTAMP_ENABLED; + else + pfvf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED; + return 0; +} + +static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable) +{ + struct msg_req *req; + int err; + + if (pfvf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED && enable) + return 0; + + mutex_lock(&pfvf->mbox.lock); + if (enable) + req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_enable(&pfvf->mbox); + else + req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_disable(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) { + mutex_unlock(&pfvf->mbox.lock); + return err; + } + + mutex_unlock(&pfvf->mbox.lock); + if (enable) + pfvf->flags |= OTX2_FLAG_TX_TSTAMP_ENABLED; + else + pfvf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED; + return 0; +} + +static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + struct hwtstamp_config config; + + if (!pfvf->ptp) + return -ENODEV; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + otx2_config_hw_tx_tstamp(pfvf, false); + break; + case HWTSTAMP_TX_ON: + otx2_config_hw_tx_tstamp(pfvf, true); + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + otx2_config_hw_rx_tstamp(pfvf, false); + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + otx2_config_hw_rx_tstamp(pfvf, true); + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + memcpy(&pfvf->tstamp, &config, sizeof(config)); + + return copy_to_user(ifr->ifr_data, &config, + sizeof(config)) ? -EFAULT : 0; +} + +static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + struct hwtstamp_config *cfg = &pfvf->tstamp; + + switch (cmd) { + case SIOCSHWTSTAMP: + return otx2_config_hwtstamp(netdev, req); + case SIOCGHWTSTAMP: + return copy_to_user(req->ifr_data, cfg, + sizeof(*cfg)) ? -EFAULT : 0; + default: + return -EOPNOTSUPP; + } +} + +static const struct net_device_ops otx2_netdev_ops = { + .ndo_open = otx2_open, + .ndo_stop = otx2_stop, + .ndo_start_xmit = otx2_xmit, + .ndo_set_mac_address = otx2_set_mac_address, + .ndo_change_mtu = otx2_change_mtu, + .ndo_set_rx_mode = otx2_set_rx_mode, + .ndo_set_features = otx2_set_features, + .ndo_tx_timeout = otx2_tx_timeout, + .ndo_get_stats64 = otx2_get_stats64, + .ndo_do_ioctl = otx2_ioctl, +}; + +static int otx2_wq_init(struct otx2_nic *pf) +{ + pf->otx2_wq = create_singlethread_workqueue("otx2_wq"); + if (!pf->otx2_wq) + return -ENOMEM; + + INIT_WORK(&pf->rx_mode_work, otx2_do_set_rx_mode); + INIT_WORK(&pf->reset_task, otx2_reset_task); + return 0; +} + +static int otx2_check_pf_usable(struct otx2_nic *nic) +{ + u64 rev; + + rev = otx2_read64(nic, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM)); + rev = (rev >> 12) & 0xFF; + /* Check if AF has setup revision for RVUM block, + * otherwise this driver probe should be deferred + * until AF driver comes up. + */ + if (!rev) { + dev_warn(nic->dev, + "AF is not initialized, deferring probe\n"); + return -EPROBE_DEFER; + } + return 0; +} + +static int otx2_realloc_msix_vectors(struct otx2_nic *pf) +{ + struct otx2_hw *hw = &pf->hw; + int num_vec, err; + + /* NPA interrupts are inot registered, so alloc only + * upto NIX vector offset. + */ + num_vec = hw->nix_msixoff; + num_vec += NIX_LF_CINT_VEC_START + hw->max_queues; + + otx2_disable_mbox_intr(pf); + pci_free_irq_vectors(hw->pdev); + err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX); + if (err < 0) { + dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n", + __func__, num_vec); + return err; + } + + return otx2_register_mbox_intr(pf, false); +} + +static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct net_device *netdev; + struct otx2_nic *pf; + struct otx2_hw *hw; + int err, qcount; + int num_vec; + + err = pcim_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device\n"); + return err; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(dev, "PCI request regions failed 0x%x\n", err); + return err; + } + + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); + if (err) { + dev_err(dev, "DMA mask config failed, abort\n"); + goto err_release_regions; + } + + pci_set_master(pdev); + + /* Set number of queues */ + qcount = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT); + + netdev = alloc_etherdev_mqs(sizeof(*pf), qcount, qcount); + if (!netdev) { + err = -ENOMEM; + goto err_release_regions; + } + + pci_set_drvdata(pdev, netdev); + SET_NETDEV_DEV(netdev, &pdev->dev); + pf = netdev_priv(netdev); + pf->netdev = netdev; + pf->pdev = pdev; + pf->dev = dev; + pf->total_vfs = pci_sriov_get_totalvfs(pdev); + pf->flags |= OTX2_FLAG_INTF_DOWN; + + hw = &pf->hw; + hw->pdev = pdev; + hw->rx_queues = qcount; + hw->tx_queues = qcount; + hw->max_queues = qcount; + + num_vec = pci_msix_vec_count(pdev); + hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE, + GFP_KERNEL); + if (!hw->irq_name) { + err = -ENOMEM; + goto err_free_netdev; + } + + hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec, + sizeof(cpumask_var_t), GFP_KERNEL); + if (!hw->affinity_mask) { + err = -ENOMEM; + goto err_free_netdev; + } + + /* Map CSRs */ + pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); + if (!pf->reg_base) { + dev_err(dev, "Unable to map physical function CSRs, aborting\n"); + err = -ENOMEM; + goto err_free_netdev; + } + + err = otx2_check_pf_usable(pf); + if (err) + goto err_free_netdev; + + err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT, + RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX); + if (err < 0) { + dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n", + __func__, num_vec); + goto err_free_netdev; + } + + /* Init PF <=> AF mailbox stuff */ + err = otx2_pfaf_mbox_init(pf); + if (err) + goto err_free_irq_vectors; + + /* Register mailbox interrupt */ + err = otx2_register_mbox_intr(pf, true); + if (err) + goto err_mbox_destroy; + + /* Request AF to attach NPA and NIX LFs to this PF. + * NIX and NPA LFs are needed for this PF to function as a NIC. + */ + err = otx2_attach_npa_nix(pf); + if (err) + goto err_disable_mbox_intr; + + err = otx2_realloc_msix_vectors(pf); + if (err) + goto err_detach_rsrc; + + err = otx2_set_real_num_queues(netdev, hw->tx_queues, hw->rx_queues); + if (err) + goto err_detach_rsrc; + + otx2_setup_dev_hw_settings(pf); + + /* Assign default mac address */ + otx2_get_mac_from_af(netdev); + + /* Don't check for error. Proceed without ptp */ + otx2_ptp_init(pf); + + /* NPA's pool is a stack to which SW frees buffer pointers via Aura. + * HW allocates buffer pointer from stack and uses it for DMA'ing + * ingress packet. In some scenarios HW can free back allocated buffer + * pointers to pool. This makes it impossible for SW to maintain a + * parallel list where physical addresses of buffer pointers (IOVAs) + * given to HW can be saved for later reference. + * + * So the only way to convert Rx packet's buffer address is to use + * IOMMU's iova_to_phys() handler which translates the address by + * walking through the translation tables. + */ + pf->iommu_domain = iommu_get_domain_for_dev(dev); + + netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | NETIF_F_RXHASH | + NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GSO_UDP_L4); + netdev->features |= netdev->hw_features; + + netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL; + + netdev->gso_max_segs = OTX2_MAX_GSO_SEGS; + netdev->watchdog_timeo = OTX2_TX_TIMEOUT; + + netdev->netdev_ops = &otx2_netdev_ops; + + /* MTU range: 64 - 9190 */ + netdev->min_mtu = OTX2_MIN_MTU; + netdev->max_mtu = OTX2_MAX_MTU; + + err = register_netdev(netdev); + if (err) { + dev_err(dev, "Failed to register netdevice\n"); + goto err_ptp_destroy; + } + + err = otx2_wq_init(pf); + if (err) + goto err_unreg_netdev; + + otx2_set_ethtool_ops(netdev); + + /* Enable link notifications */ + otx2_cgx_config_linkevents(pf, true); + + /* Enable pause frames by default */ + pf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED; + pf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED; + + return 0; + +err_unreg_netdev: + unregister_netdev(netdev); +err_ptp_destroy: + otx2_ptp_destroy(pf); +err_detach_rsrc: + otx2_detach_resources(&pf->mbox); +err_disable_mbox_intr: + otx2_disable_mbox_intr(pf); +err_mbox_destroy: + otx2_pfaf_mbox_destroy(pf); +err_free_irq_vectors: + pci_free_irq_vectors(hw->pdev); +err_free_netdev: + pci_set_drvdata(pdev, NULL); + free_netdev(netdev); +err_release_regions: + pci_release_regions(pdev); + return err; +} + +static void otx2_vf_link_event_task(struct work_struct *work) +{ + struct otx2_vf_config *config; + struct cgx_link_info_msg *req; + struct mbox_msghdr *msghdr; + struct otx2_nic *pf; + int vf_idx; + + config = container_of(work, struct otx2_vf_config, + link_event_work.work); + vf_idx = config - config->pf->vf_configs; + pf = config->pf; + + msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx, + sizeof(*req), sizeof(struct msg_rsp)); + if (!msghdr) { + dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx); + return; + } + + req = (struct cgx_link_info_msg *)msghdr; + req->hdr.id = MBOX_MSG_CGX_LINK_EVENT; + req->hdr.sig = OTX2_MBOX_REQ_SIG; + memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info)); + + otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx); +} + +static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct otx2_nic *pf = netdev_priv(netdev); + int ret, i; + + /* Init PF <=> VF mailbox stuff */ + ret = otx2_pfvf_mbox_init(pf, numvfs); + if (ret) + return ret; + + ret = otx2_register_pfvf_mbox_intr(pf, numvfs); + if (ret) + goto free_mbox; + + pf->vf_configs = kcalloc(numvfs, sizeof(struct otx2_vf_config), + GFP_KERNEL); + if (!pf->vf_configs) { + ret = -ENOMEM; + goto free_intr; + } + + for (i = 0; i < numvfs; i++) { + pf->vf_configs[i].pf = pf; + pf->vf_configs[i].intf_down = true; + INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work, + otx2_vf_link_event_task); + } + + ret = otx2_pf_flr_init(pf, numvfs); + if (ret) + goto free_configs; + + ret = otx2_register_flr_me_intr(pf, numvfs); + if (ret) + goto free_flr; + + ret = pci_enable_sriov(pdev, numvfs); + if (ret) + goto free_flr_intr; + + return numvfs; +free_flr_intr: + otx2_disable_flr_me_intr(pf); +free_flr: + otx2_flr_wq_destroy(pf); +free_configs: + kfree(pf->vf_configs); +free_intr: + otx2_disable_pfvf_mbox_intr(pf, numvfs); +free_mbox: + otx2_pfvf_mbox_destroy(pf); + return ret; +} + +static int otx2_sriov_disable(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct otx2_nic *pf = netdev_priv(netdev); + int numvfs = pci_num_vf(pdev); + int i; + + if (!numvfs) + return 0; + + pci_disable_sriov(pdev); + + for (i = 0; i < pci_num_vf(pdev); i++) + cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work); + kfree(pf->vf_configs); + + otx2_disable_flr_me_intr(pf); + otx2_flr_wq_destroy(pf); + otx2_disable_pfvf_mbox_intr(pf, numvfs); + otx2_pfvf_mbox_destroy(pf); + + return 0; +} + +static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs) +{ + if (numvfs == 0) + return otx2_sriov_disable(pdev); + else + return otx2_sriov_enable(pdev, numvfs); +} + +static void otx2_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct otx2_nic *pf; + + if (!netdev) + return; + + pf = netdev_priv(netdev); + + if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) + otx2_config_hw_tx_tstamp(pf, false); + if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) + otx2_config_hw_rx_tstamp(pf, false); + + cancel_work_sync(&pf->reset_task); + /* Disable link notifications */ + otx2_cgx_config_linkevents(pf, false); + + unregister_netdev(netdev); + otx2_sriov_disable(pf->pdev); + if (pf->otx2_wq) + destroy_workqueue(pf->otx2_wq); + + otx2_ptp_destroy(pf); + otx2_detach_resources(&pf->mbox); + otx2_disable_mbox_intr(pf); + otx2_pfaf_mbox_destroy(pf); + pci_free_irq_vectors(pf->pdev); + pci_set_drvdata(pdev, NULL); + free_netdev(netdev); + + pci_release_regions(pdev); +} + +static struct pci_driver otx2_pf_driver = { + .name = DRV_NAME, + .id_table = otx2_pf_id_table, + .probe = otx2_probe, + .shutdown = otx2_remove, + .remove = otx2_remove, + .sriov_configure = otx2_sriov_configure +}; + +static int __init otx2_rvupf_init_module(void) +{ + pr_info("%s: %s\n", DRV_NAME, DRV_STRING); + + return pci_register_driver(&otx2_pf_driver); +} + +static void __exit otx2_rvupf_cleanup_module(void) +{ + pci_unregister_driver(&otx2_pf_driver); +} + +module_init(otx2_rvupf_init_module); +module_exit(otx2_rvupf_cleanup_module); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c new file mode 100644 index 000000000..7bcf52463 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 PTP support for ethernet driver + * + * Copyright (C) 2020 Marvell International Ltd. + */ + +#include "otx2_common.h" +#include "otx2_ptp.h" + +static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm) +{ + struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, + ptp_info); + struct ptp_req *req; + int err; + + if (!ptp->nic) + return -ENODEV; + + req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox); + if (!req) + return -ENOMEM; + + req->op = PTP_OP_ADJFINE; + req->scaled_ppm = scaled_ppm; + + err = otx2_sync_mbox_msg(&ptp->nic->mbox); + if (err) + return err; + + return 0; +} + +static u64 ptp_cc_read(const struct cyclecounter *cc) +{ + struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter); + struct ptp_req *req; + struct ptp_rsp *rsp; + int err; + + if (!ptp->nic) + return 0; + + req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox); + if (!req) + return 0; + + req->op = PTP_OP_GET_CLOCK; + + err = otx2_sync_mbox_msg(&ptp->nic->mbox); + if (err) + return 0; + + rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0, + &req->hdr); + if (IS_ERR(rsp)) + return 0; + + return rsp->clk; +} + +static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta) +{ + struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, + ptp_info); + struct otx2_nic *pfvf = ptp->nic; + + mutex_lock(&pfvf->mbox.lock); + timecounter_adjtime(&ptp->time_counter, delta); + mutex_unlock(&pfvf->mbox.lock); + + return 0; +} + +static int otx2_ptp_gettime(struct ptp_clock_info *ptp_info, + struct timespec64 *ts) +{ + struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, + ptp_info); + struct otx2_nic *pfvf = ptp->nic; + u64 nsec; + + mutex_lock(&pfvf->mbox.lock); + nsec = timecounter_read(&ptp->time_counter); + mutex_unlock(&pfvf->mbox.lock); + + *ts = ns_to_timespec64(nsec); + + return 0; +} + +static int otx2_ptp_settime(struct ptp_clock_info *ptp_info, + const struct timespec64 *ts) +{ + struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, + ptp_info); + struct otx2_nic *pfvf = ptp->nic; + u64 nsec; + + nsec = timespec64_to_ns(ts); + + mutex_lock(&pfvf->mbox.lock); + timecounter_init(&ptp->time_counter, &ptp->cycle_counter, nsec); + mutex_unlock(&pfvf->mbox.lock); + + return 0; +} + +static int otx2_ptp_enable(struct ptp_clock_info *ptp_info, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +int otx2_ptp_init(struct otx2_nic *pfvf) +{ + struct otx2_ptp *ptp_ptr; + struct cyclecounter *cc; + struct ptp_req *req; + int err; + + mutex_lock(&pfvf->mbox.lock); + /* check if PTP block is available */ + req = otx2_mbox_alloc_msg_ptp_op(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + req->op = PTP_OP_GET_CLOCK; + + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) { + mutex_unlock(&pfvf->mbox.lock); + return err; + } + mutex_unlock(&pfvf->mbox.lock); + + ptp_ptr = kzalloc(sizeof(*ptp_ptr), GFP_KERNEL); + if (!ptp_ptr) { + err = -ENOMEM; + goto error; + } + + ptp_ptr->nic = pfvf; + + cc = &ptp_ptr->cycle_counter; + cc->read = ptp_cc_read; + cc->mask = CYCLECOUNTER_MASK(64); + cc->mult = 1; + cc->shift = 0; + + timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter, + ktime_to_ns(ktime_get_real())); + + ptp_ptr->ptp_info = (struct ptp_clock_info) { + .owner = THIS_MODULE, + .name = "OcteonTX2 PTP", + .max_adj = 1000000000ull, + .n_ext_ts = 0, + .n_pins = 0, + .pps = 0, + .adjfine = otx2_ptp_adjfine, + .adjtime = otx2_ptp_adjtime, + .gettime64 = otx2_ptp_gettime, + .settime64 = otx2_ptp_settime, + .enable = otx2_ptp_enable, + }; + + ptp_ptr->ptp_clock = ptp_clock_register(&ptp_ptr->ptp_info, pfvf->dev); + if (IS_ERR_OR_NULL(ptp_ptr->ptp_clock)) { + err = ptp_ptr->ptp_clock ? + PTR_ERR(ptp_ptr->ptp_clock) : -ENODEV; + kfree(ptp_ptr); + goto error; + } + + pfvf->ptp = ptp_ptr; + +error: + return err; +} + +void otx2_ptp_destroy(struct otx2_nic *pfvf) +{ + struct otx2_ptp *ptp = pfvf->ptp; + + if (!ptp) + return; + + ptp_clock_unregister(ptp->ptp_clock); + kfree(ptp); + pfvf->ptp = NULL; +} + +int otx2_ptp_clock_index(struct otx2_nic *pfvf) +{ + if (!pfvf->ptp) + return -ENODEV; + + return ptp_clock_index(pfvf->ptp->ptp_clock); +} + +int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns) +{ + if (!pfvf->ptp) + return -ENODEV; + + *tsns = timecounter_cyc2time(&pfvf->ptp->time_counter, tstamp); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h new file mode 100644 index 000000000..706d63a43 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 PTP support for ethernet driver */ + +#ifndef OTX2_PTP_H +#define OTX2_PTP_H + +int otx2_ptp_init(struct otx2_nic *pfvf); +void otx2_ptp_destroy(struct otx2_nic *pfvf); + +int otx2_ptp_clock_index(struct otx2_nic *pfvf); +int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns); + +#endif diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h new file mode 100644 index 000000000..867f646e0 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h @@ -0,0 +1,160 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 RVU Ethernet driver + * + * Copyright (C) 2020 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef OTX2_REG_H +#define OTX2_REG_H + +#include <rvu_struct.h> + +/* RVU PF registers */ +#define RVU_PF_VFX_PFVF_MBOX0 (0x00000) +#define RVU_PF_VFX_PFVF_MBOX1 (0x00008) +#define RVU_PF_VFX_PFVF_MBOXX(a, b) (0x0 | (a) << 12 | (b) << 3) +#define RVU_PF_VF_BAR4_ADDR (0x10) +#define RVU_PF_BLOCK_ADDRX_DISC(a) (0x200 | (a) << 3) +#define RVU_PF_VFME_STATUSX(a) (0x800 | (a) << 3) +#define RVU_PF_VFTRPENDX(a) (0x820 | (a) << 3) +#define RVU_PF_VFTRPEND_W1SX(a) (0x840 | (a) << 3) +#define RVU_PF_VFPF_MBOX_INTX(a) (0x880 | (a) << 3) +#define RVU_PF_VFPF_MBOX_INT_W1SX(a) (0x8A0 | (a) << 3) +#define RVU_PF_VFPF_MBOX_INT_ENA_W1SX(a) (0x8C0 | (a) << 3) +#define RVU_PF_VFPF_MBOX_INT_ENA_W1CX(a) (0x8E0 | (a) << 3) +#define RVU_PF_VFFLR_INTX(a) (0x900 | (a) << 3) +#define RVU_PF_VFFLR_INT_W1SX(a) (0x920 | (a) << 3) +#define RVU_PF_VFFLR_INT_ENA_W1SX(a) (0x940 | (a) << 3) +#define RVU_PF_VFFLR_INT_ENA_W1CX(a) (0x960 | (a) << 3) +#define RVU_PF_VFME_INTX(a) (0x980 | (a) << 3) +#define RVU_PF_VFME_INT_W1SX(a) (0x9A0 | (a) << 3) +#define RVU_PF_VFME_INT_ENA_W1SX(a) (0x9C0 | (a) << 3) +#define RVU_PF_VFME_INT_ENA_W1CX(a) (0x9E0 | (a) << 3) +#define RVU_PF_PFAF_MBOX0 (0xC00) +#define RVU_PF_PFAF_MBOX1 (0xC08) +#define RVU_PF_PFAF_MBOXX(a) (0xC00 | (a) << 3) +#define RVU_PF_INT (0xc20) +#define RVU_PF_INT_W1S (0xc28) +#define RVU_PF_INT_ENA_W1S (0xc30) +#define RVU_PF_INT_ENA_W1C (0xc38) +#define RVU_PF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4) +#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4) +#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3) + +/* RVU VF registers */ +#define RVU_VF_VFPF_MBOX0 (0x00000) +#define RVU_VF_VFPF_MBOX1 (0x00008) +#define RVU_VF_VFPF_MBOXX(a) (0x00 | (a) << 3) +#define RVU_VF_INT (0x20) +#define RVU_VF_INT_W1S (0x28) +#define RVU_VF_INT_ENA_W1S (0x30) +#define RVU_VF_INT_ENA_W1C (0x38) +#define RVU_VF_BLOCK_ADDRX_DISC(a) (0x200 | (a) << 3) +#define RVU_VF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4) +#define RVU_VF_MSIX_VECX_CTL(a) (0x008 | (a) << 4) +#define RVU_VF_MSIX_PBAX(a) (0xF0000 | (a) << 3) + +#define RVU_FUNC_BLKADDR_SHIFT 20 +#define RVU_FUNC_BLKADDR_MASK 0x1FULL + +/* NPA LF registers */ +#define NPA_LFBASE (BLKTYPE_NPA << RVU_FUNC_BLKADDR_SHIFT) +#define NPA_LF_AURA_OP_ALLOCX(a) (NPA_LFBASE | 0x10 | (a) << 3) +#define NPA_LF_AURA_OP_FREE0 (NPA_LFBASE | 0x20) +#define NPA_LF_AURA_OP_FREE1 (NPA_LFBASE | 0x28) +#define NPA_LF_AURA_OP_CNT (NPA_LFBASE | 0x30) +#define NPA_LF_AURA_OP_LIMIT (NPA_LFBASE | 0x50) +#define NPA_LF_AURA_OP_INT (NPA_LFBASE | 0x60) +#define NPA_LF_AURA_OP_THRESH (NPA_LFBASE | 0x70) +#define NPA_LF_POOL_OP_PC (NPA_LFBASE | 0x100) +#define NPA_LF_POOL_OP_AVAILABLE (NPA_LFBASE | 0x110) +#define NPA_LF_POOL_OP_PTR_START0 (NPA_LFBASE | 0x120) +#define NPA_LF_POOL_OP_PTR_START1 (NPA_LFBASE | 0x128) +#define NPA_LF_POOL_OP_PTR_END0 (NPA_LFBASE | 0x130) +#define NPA_LF_POOL_OP_PTR_END1 (NPA_LFBASE | 0x138) +#define NPA_LF_POOL_OP_INT (NPA_LFBASE | 0x160) +#define NPA_LF_POOL_OP_THRESH (NPA_LFBASE | 0x170) +#define NPA_LF_ERR_INT (NPA_LFBASE | 0x200) +#define NPA_LF_ERR_INT_W1S (NPA_LFBASE | 0x208) +#define NPA_LF_ERR_INT_ENA_W1C (NPA_LFBASE | 0x210) +#define NPA_LF_ERR_INT_ENA_W1S (NPA_LFBASE | 0x218) +#define NPA_LF_RAS (NPA_LFBASE | 0x220) +#define NPA_LF_RAS_W1S (NPA_LFBASE | 0x228) +#define NPA_LF_RAS_ENA_W1C (NPA_LFBASE | 0x230) +#define NPA_LF_RAS_ENA_W1S (NPA_LFBASE | 0x238) +#define NPA_LF_QINTX_CNT(a) (NPA_LFBASE | 0x300 | (a) << 12) +#define NPA_LF_QINTX_INT(a) (NPA_LFBASE | 0x310 | (a) << 12) +#define NPA_LF_QINTX_INT_W1S(a) (NPA_LFBASE | 0x318 | (a) << 12) +#define NPA_LF_QINTX_ENA_W1S(a) (NPA_LFBASE | 0x320 | (a) << 12) +#define NPA_LF_QINTX_ENA_W1C(a) (NPA_LFBASE | 0x330 | (a) << 12) + +/* NIX LF registers */ +#define NIX_LFBASE (BLKTYPE_NIX << RVU_FUNC_BLKADDR_SHIFT) +#define NIX_LF_RX_SECRETX(a) (NIX_LFBASE | 0x0 | (a) << 3) +#define NIX_LF_CFG (NIX_LFBASE | 0x100) +#define NIX_LF_GINT (NIX_LFBASE | 0x200) +#define NIX_LF_GINT_W1S (NIX_LFBASE | 0x208) +#define NIX_LF_GINT_ENA_W1C (NIX_LFBASE | 0x210) +#define NIX_LF_GINT_ENA_W1S (NIX_LFBASE | 0x218) +#define NIX_LF_ERR_INT (NIX_LFBASE | 0x220) +#define NIX_LF_ERR_INT_W1S (NIX_LFBASE | 0x228) +#define NIX_LF_ERR_INT_ENA_W1C (NIX_LFBASE | 0x230) +#define NIX_LF_ERR_INT_ENA_W1S (NIX_LFBASE | 0x238) +#define NIX_LF_RAS (NIX_LFBASE | 0x240) +#define NIX_LF_RAS_W1S (NIX_LFBASE | 0x248) +#define NIX_LF_RAS_ENA_W1C (NIX_LFBASE | 0x250) +#define NIX_LF_RAS_ENA_W1S (NIX_LFBASE | 0x258) +#define NIX_LF_SQ_OP_ERR_DBG (NIX_LFBASE | 0x260) +#define NIX_LF_MNQ_ERR_DBG (NIX_LFBASE | 0x270) +#define NIX_LF_SEND_ERR_DBG (NIX_LFBASE | 0x280) +#define NIX_LF_TX_STATX(a) (NIX_LFBASE | 0x300 | (a) << 3) +#define NIX_LF_RX_STATX(a) (NIX_LFBASE | 0x400 | (a) << 3) +#define NIX_LF_OP_SENDX(a) (NIX_LFBASE | 0x800 | (a) << 3) +#define NIX_LF_RQ_OP_INT (NIX_LFBASE | 0x900) +#define NIX_LF_RQ_OP_OCTS (NIX_LFBASE | 0x910) +#define NIX_LF_RQ_OP_PKTS (NIX_LFBASE | 0x920) +#define NIX_LF_OP_IPSEC_DYNO_CN (NIX_LFBASE | 0x980) +#define NIX_LF_SQ_OP_INT (NIX_LFBASE | 0xa00) +#define NIX_LF_SQ_OP_OCTS (NIX_LFBASE | 0xa10) +#define NIX_LF_SQ_OP_PKTS (NIX_LFBASE | 0xa20) +#define NIX_LF_SQ_OP_STATUS (NIX_LFBASE | 0xa30) +#define NIX_LF_CQ_OP_INT (NIX_LFBASE | 0xb00) +#define NIX_LF_CQ_OP_DOOR (NIX_LFBASE | 0xb30) +#define NIX_LF_CQ_OP_STATUS (NIX_LFBASE | 0xb40) +#define NIX_LF_QINTX_CNT(a) (NIX_LFBASE | 0xC00 | (a) << 12) +#define NIX_LF_QINTX_INT(a) (NIX_LFBASE | 0xC10 | (a) << 12) +#define NIX_LF_QINTX_INT_W1S(a) (NIX_LFBASE | 0xC18 | (a) << 12) +#define NIX_LF_QINTX_ENA_W1S(a) (NIX_LFBASE | 0xC20 | (a) << 12) +#define NIX_LF_QINTX_ENA_W1C(a) (NIX_LFBASE | 0xC30 | (a) << 12) +#define NIX_LF_CINTX_CNT(a) (NIX_LFBASE | 0xD00 | (a) << 12) +#define NIX_LF_CINTX_WAIT(a) (NIX_LFBASE | 0xD10 | (a) << 12) +#define NIX_LF_CINTX_INT(a) (NIX_LFBASE | 0xD20 | (a) << 12) +#define NIX_LF_CINTX_INT_W1S(a) (NIX_LFBASE | 0xD30 | (a) << 12) +#define NIX_LF_CINTX_ENA_W1S(a) (NIX_LFBASE | 0xD40 | (a) << 12) +#define NIX_LF_CINTX_ENA_W1C(a) (NIX_LFBASE | 0xD50 | (a) << 12) + +/* NIX AF transmit scheduler registers */ +#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16) +#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16) +#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16) +#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16) +#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16) +#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16) +#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16) +#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16) +#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16) +#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16) +#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16) +#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16) +#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3) + +/* LMT LF registers */ +#define LMT_LFBASE BIT_ULL(RVU_FUNC_BLKADDR_SHIFT) +#define LMT_LF_LMTLINEX(a) (LMT_LFBASE | 0x000 | (a) << 12) +#define LMT_LF_LMTCANCEL (LMT_LFBASE | 0x400) + +#endif /* OTX2_REG_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h new file mode 100644 index 000000000..cba59ddf7 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h @@ -0,0 +1,276 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 RVU Ethernet driver + * + * Copyright (C) 2020 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef OTX2_STRUCT_H +#define OTX2_STRUCT_H + +/* NIX WQE/CQE size 128 byte or 512 byte */ +enum nix_cqesz_e { + NIX_XQESZ_W64 = 0x0, + NIX_XQESZ_W16 = 0x1, +}; + +enum nix_sqes_e { + NIX_SQESZ_W16 = 0x0, + NIX_SQESZ_W8 = 0x1, +}; + +enum nix_send_ldtype { + NIX_SEND_LDTYPE_LDD = 0x0, + NIX_SEND_LDTYPE_LDT = 0x1, + NIX_SEND_LDTYPE_LDWB = 0x2, +}; + +/* CSUM offload */ +enum nix_sendl3type { + NIX_SENDL3TYPE_NONE = 0x0, + NIX_SENDL3TYPE_IP4 = 0x2, + NIX_SENDL3TYPE_IP4_CKSUM = 0x3, + NIX_SENDL3TYPE_IP6 = 0x4, +}; + +enum nix_sendl4type { + NIX_SENDL4TYPE_NONE, + NIX_SENDL4TYPE_TCP_CKSUM, + NIX_SENDL4TYPE_SCTP_CKSUM, + NIX_SENDL4TYPE_UDP_CKSUM, +}; + +/* NIX wqe/cqe types */ +enum nix_xqe_type { + NIX_XQE_TYPE_INVALID = 0x0, + NIX_XQE_TYPE_RX = 0x1, + NIX_XQE_TYPE_RX_IPSECS = 0x2, + NIX_XQE_TYPE_RX_IPSECH = 0x3, + NIX_XQE_TYPE_RX_IPSECD = 0x4, + NIX_XQE_TYPE_SEND = 0x8, +}; + +/* NIX CQE/SQE subdescriptor types */ +enum nix_subdc { + NIX_SUBDC_NOP = 0x0, + NIX_SUBDC_EXT = 0x1, + NIX_SUBDC_CRC = 0x2, + NIX_SUBDC_IMM = 0x3, + NIX_SUBDC_SG = 0x4, + NIX_SUBDC_MEM = 0x5, + NIX_SUBDC_JUMP = 0x6, + NIX_SUBDC_WORK = 0x7, + NIX_SUBDC_SOD = 0xf, +}; + +/* Algorithm for nix_sqe_mem_s header (value of the `alg` field) */ +enum nix_sendmemalg { + NIX_SENDMEMALG_E_SET = 0x0, + NIX_SENDMEMALG_E_SETTSTMP = 0x1, + NIX_SENDMEMALG_E_SETRSLT = 0x2, + NIX_SENDMEMALG_E_ADD = 0x8, + NIX_SENDMEMALG_E_SUB = 0x9, + NIX_SENDMEMALG_E_ADDLEN = 0xa, + NIX_SENDMEMALG_E_SUBLEN = 0xb, + NIX_SENDMEMALG_E_ADDMBUF = 0xc, + NIX_SENDMEMALG_E_SUBMBUF = 0xd, + NIX_SENDMEMALG_E_ENUM_LAST = 0xe, +}; + +/* NIX CQE header structure */ +struct nix_cqe_hdr_s { + u64 flow_tag : 32; + u64 q : 20; + u64 reserved_52_57 : 6; + u64 node : 2; + u64 cqe_type : 4; +}; + +/* NIX CQE RX parse structure */ +struct nix_rx_parse_s { + u64 chan : 12; + u64 desc_sizem1 : 5; + u64 rsvd_17 : 1; + u64 express : 1; + u64 wqwd : 1; + u64 errlev : 4; + u64 errcode : 8; + u64 latype : 4; + u64 lbtype : 4; + u64 lctype : 4; + u64 ldtype : 4; + u64 letype : 4; + u64 lftype : 4; + u64 lgtype : 4; + u64 lhtype : 4; + u64 pkt_lenm1 : 16; /* W1 */ + u64 l2m : 1; + u64 l2b : 1; + u64 l3m : 1; + u64 l3b : 1; + u64 vtag0_valid : 1; + u64 vtag0_gone : 1; + u64 vtag1_valid : 1; + u64 vtag1_gone : 1; + u64 pkind : 6; + u64 rsvd_95_94 : 2; + u64 vtag0_tci : 16; + u64 vtag1_tci : 16; + u64 laflags : 8; /* W2 */ + u64 lbflags : 8; + u64 lcflags : 8; + u64 ldflags : 8; + u64 leflags : 8; + u64 lfflags : 8; + u64 lgflags : 8; + u64 lhflags : 8; + u64 eoh_ptr : 8; /* W3 */ + u64 wqe_aura : 20; + u64 pb_aura : 20; + u64 match_id : 16; + u64 laptr : 8; /* W4 */ + u64 lbptr : 8; + u64 lcptr : 8; + u64 ldptr : 8; + u64 leptr : 8; + u64 lfptr : 8; + u64 lgptr : 8; + u64 lhptr : 8; + u64 vtag0_ptr : 8; /* W5 */ + u64 vtag1_ptr : 8; + u64 flow_key_alg : 5; + u64 rsvd_383_341 : 43; + u64 rsvd_447_384; /* W6 */ +}; + +/* NIX CQE RX scatter/gather subdescriptor structure */ +struct nix_rx_sg_s { + u64 seg_size : 16; /* W0 */ + u64 seg2_size : 16; + u64 seg3_size : 16; + u64 segs : 2; + u64 rsvd_59_50 : 10; + u64 subdc : 4; + u64 seg_addr; + u64 seg2_addr; + u64 seg3_addr; +}; + +struct nix_send_comp_s { + u64 status : 8; + u64 sqe_id : 16; + u64 rsvd_24_63 : 40; +}; + +struct nix_cqe_rx_s { + struct nix_cqe_hdr_s hdr; + struct nix_rx_parse_s parse; + struct nix_rx_sg_s sg; +}; + +struct nix_cqe_tx_s { + struct nix_cqe_hdr_s hdr; + struct nix_send_comp_s comp; +}; + +/* NIX SQE header structure */ +struct nix_sqe_hdr_s { + u64 total : 18; /* W0 */ + u64 reserved_18 : 1; + u64 df : 1; + u64 aura : 20; + u64 sizem1 : 3; + u64 pnc : 1; + u64 sq : 20; + u64 ol3ptr : 8; /* W1 */ + u64 ol4ptr : 8; + u64 il3ptr : 8; + u64 il4ptr : 8; + u64 ol3type : 4; + u64 ol4type : 4; + u64 il3type : 4; + u64 il4type : 4; + u64 sqe_id : 16; + +}; + +/* NIX send extended header subdescriptor structure */ +struct nix_sqe_ext_s { + u64 lso_mps : 14; /* W0 */ + u64 lso : 1; + u64 tstmp : 1; + u64 lso_sb : 8; + u64 lso_format : 5; + u64 rsvd_31_29 : 3; + u64 shp_chg : 9; + u64 shp_dis : 1; + u64 shp_ra : 2; + u64 markptr : 8; + u64 markform : 7; + u64 mark_en : 1; + u64 subdc : 4; + u64 vlan0_ins_ptr : 8; /* W1 */ + u64 vlan0_ins_tci : 16; + u64 vlan1_ins_ptr : 8; + u64 vlan1_ins_tci : 16; + u64 vlan0_ins_ena : 1; + u64 vlan1_ins_ena : 1; + u64 rsvd_127_114 : 14; +}; + +struct nix_sqe_sg_s { + u64 seg1_size : 16; + u64 seg2_size : 16; + u64 seg3_size : 16; + u64 segs : 2; + u64 rsvd_54_50 : 5; + u64 i1 : 1; + u64 i2 : 1; + u64 i3 : 1; + u64 ld_type : 2; + u64 subdc : 4; +}; + +/* NIX send memory subdescriptor structure */ +struct nix_sqe_mem_s { + u64 offset : 16; /* W0 */ + u64 rsvd_52_16 : 37; + u64 wmem : 1; + u64 dsz : 2; + u64 alg : 4; + u64 subdc : 4; + u64 addr; /* W1 */ +}; + +enum nix_cqerrint_e { + NIX_CQERRINT_DOOR_ERR = 0, + NIX_CQERRINT_WR_FULL = 1, + NIX_CQERRINT_CQE_FAULT = 2, +}; + +#define NIX_CQERRINT_BITS (BIT_ULL(NIX_CQERRINT_DOOR_ERR) | \ + BIT_ULL(NIX_CQERRINT_CQE_FAULT)) + +enum nix_rqint_e { + NIX_RQINT_DROP = 0, + NIX_RQINT_RED = 1, +}; + +#define NIX_RQINT_BITS (BIT_ULL(NIX_RQINT_DROP) | BIT_ULL(NIX_RQINT_RED)) + +enum nix_sqint_e { + NIX_SQINT_LMT_ERR = 0, + NIX_SQINT_MNQ_ERR = 1, + NIX_SQINT_SEND_ERR = 2, + NIX_SQINT_SQB_ALLOC_FAIL = 3, +}; + +#define NIX_SQINT_BITS (BIT_ULL(NIX_SQINT_LMT_ERR) | \ + BIT_ULL(NIX_SQINT_MNQ_ERR) | \ + BIT_ULL(NIX_SQINT_SEND_ERR) | \ + BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL)) + +#endif /* OTX2_STRUCT_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c new file mode 100644 index 000000000..a0a6dadbc --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -0,0 +1,976 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Ethernet driver + * + * Copyright (C) 2020 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/etherdevice.h> +#include <net/ip.h> +#include <net/tso.h> + +#include "otx2_reg.h" +#include "otx2_common.h" +#include "otx2_struct.h" +#include "otx2_txrx.h" +#include "otx2_ptp.h" + +#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx))) + +static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq) +{ + struct nix_cqe_hdr_s *cqe_hdr; + + cqe_hdr = (struct nix_cqe_hdr_s *)CQE_ADDR(cq, cq->cq_head); + if (cqe_hdr->cqe_type == NIX_XQE_TYPE_INVALID) + return NULL; + + cq->cq_head++; + cq->cq_head &= (cq->cqe_cnt - 1); + + return cqe_hdr; +} + +static unsigned int frag_num(unsigned int i) +{ +#ifdef __BIG_ENDIAN + return (i & ~3) + 3 - (i & 3); +#else + return i; +#endif +} + +static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, + struct sk_buff *skb, int seg, int *len) +{ + const skb_frag_t *frag; + struct page *page; + int offset; + + /* First segment is always skb->data */ + if (!seg) { + page = virt_to_page(skb->data); + offset = offset_in_page(skb->data); + *len = skb_headlen(skb); + } else { + frag = &skb_shinfo(skb)->frags[seg - 1]; + page = skb_frag_page(frag); + offset = skb_frag_off(frag); + *len = skb_frag_size(frag); + } + return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE); +} + +static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg) +{ + int seg; + + for (seg = 0; seg < sg->num_segs; seg++) { + otx2_dma_unmap_page(pfvf, sg->dma_addr[seg], + sg->size[seg], DMA_TO_DEVICE); + } + sg->num_segs = 0; +} + +static void otx2_snd_pkt_handler(struct otx2_nic *pfvf, + struct otx2_cq_queue *cq, + struct otx2_snd_queue *sq, + struct nix_cqe_tx_s *cqe, + int budget, int *tx_pkts, int *tx_bytes) +{ + struct nix_send_comp_s *snd_comp = &cqe->comp; + struct skb_shared_hwtstamps ts; + struct sk_buff *skb = NULL; + u64 timestamp, tsns; + struct sg_list *sg; + int err; + + if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf)) + net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n", + pfvf->netdev->name, cq->cint_idx, + snd_comp->status); + + sg = &sq->sg[snd_comp->sqe_id]; + skb = (struct sk_buff *)sg->skb; + if (unlikely(!skb)) + return; + + if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) { + timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id]; + if (timestamp != 1) { + err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns); + if (!err) { + memset(&ts, 0, sizeof(ts)); + ts.hwtstamp = ns_to_ktime(tsns); + skb_tstamp_tx(skb, &ts); + } + } + } + + *tx_bytes += skb->len; + (*tx_pkts)++; + otx2_dma_unmap_skb_frags(pfvf, sg); + napi_consume_skb(skb, budget); + sg->skb = (u64)NULL; +} + +static void otx2_set_rxtstamp(struct otx2_nic *pfvf, + struct sk_buff *skb, void *data) +{ + u64 tsns; + int err; + + if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)) + return; + + /* The first 8 bytes is the timestamp */ + err = otx2_ptp_tstamp2time(pfvf, be64_to_cpu(*(__be64 *)data), &tsns); + if (err) + return; + + skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns); +} + +static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb, + u64 iova, int len, struct nix_rx_parse_s *parse) +{ + struct page *page; + int off = 0; + void *va; + + va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova)); + + if (likely(!skb_shinfo(skb)->nr_frags)) { + /* Check if data starts at some nonzero offset + * from the start of the buffer. For now the + * only possible offset is 8 bytes in the case + * where packet is prepended by a timestamp. + */ + if (parse->laptr) { + otx2_set_rxtstamp(pfvf, skb, va); + off = OTX2_HW_TIMESTAMP_LEN; + } + } + + page = virt_to_page(va); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + va - page_address(page) + off, len - off, pfvf->rbsize); + + otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM, + pfvf->rbsize, DMA_FROM_DEVICE); +} + +static void otx2_set_rxhash(struct otx2_nic *pfvf, + struct nix_cqe_rx_s *cqe, struct sk_buff *skb) +{ + enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE; + struct otx2_rss_info *rss; + u32 hash = 0; + + if (!(pfvf->netdev->features & NETIF_F_RXHASH)) + return; + + rss = &pfvf->hw.rss_info; + if (rss->flowkey_cfg) { + if (rss->flowkey_cfg & + ~(NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6)) + hash_type = PKT_HASH_TYPE_L4; + else + hash_type = PKT_HASH_TYPE_L3; + hash = cqe->hdr.flow_tag; + } + skb_set_hash(skb, hash, hash_type); +} + +static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe, + int qidx) +{ + struct nix_rx_sg_s *sg = &cqe->sg; + void *end, *start; + u64 *seg_addr; + int seg; + + start = (void *)sg; + end = start + ((cqe->parse.desc_sizem1 + 1) * 16); + while (start < end) { + sg = (struct nix_rx_sg_s *)start; + seg_addr = &sg->seg_addr; + for (seg = 0; seg < sg->segs; seg++, seg_addr++) + otx2_aura_freeptr(pfvf, qidx, *seg_addr & ~0x07ULL); + start += sizeof(*sg); + } +} + +static bool otx2_check_rcv_errors(struct otx2_nic *pfvf, + struct nix_cqe_rx_s *cqe, int qidx) +{ + struct otx2_drv_stats *stats = &pfvf->hw.drv_stats; + struct nix_rx_parse_s *parse = &cqe->parse; + + if (netif_msg_rx_err(pfvf)) + netdev_err(pfvf->netdev, + "RQ%d: Error pkt with errlev:0x%x errcode:0x%x\n", + qidx, parse->errlev, parse->errcode); + + if (parse->errlev == NPC_ERRLVL_RE) { + switch (parse->errcode) { + case ERRCODE_FCS: + case ERRCODE_FCS_RCV: + atomic_inc(&stats->rx_fcs_errs); + break; + case ERRCODE_UNDERSIZE: + atomic_inc(&stats->rx_undersize_errs); + break; + case ERRCODE_OVERSIZE: + atomic_inc(&stats->rx_oversize_errs); + break; + case ERRCODE_OL2_LEN_MISMATCH: + atomic_inc(&stats->rx_len_errs); + break; + default: + atomic_inc(&stats->rx_other_errs); + break; + } + } else if (parse->errlev == NPC_ERRLVL_NIX) { + switch (parse->errcode) { + case ERRCODE_OL3_LEN: + case ERRCODE_OL4_LEN: + case ERRCODE_IL3_LEN: + case ERRCODE_IL4_LEN: + atomic_inc(&stats->rx_len_errs); + break; + case ERRCODE_OL4_CSUM: + case ERRCODE_IL4_CSUM: + atomic_inc(&stats->rx_csum_errs); + break; + default: + atomic_inc(&stats->rx_other_errs); + break; + } + } else { + atomic_inc(&stats->rx_other_errs); + /* For now ignore all the NPC parser errors and + * pass the packets to stack. + */ + if (cqe->sg.segs == 1) + return false; + } + + /* If RXALL is enabled pass on packets to stack. */ + if (cqe->sg.segs == 1 && (pfvf->netdev->features & NETIF_F_RXALL)) + return false; + + /* Free buffer back to pool */ + if (cqe->sg.segs) + otx2_free_rcv_seg(pfvf, cqe, qidx); + return true; +} + +static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, + struct napi_struct *napi, + struct otx2_cq_queue *cq, + struct nix_cqe_rx_s *cqe) +{ + struct nix_rx_parse_s *parse = &cqe->parse; + struct sk_buff *skb = NULL; + + if (unlikely(parse->errlev || parse->errcode || cqe->sg.segs > 1)) { + if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx)) + return; + } + + skb = napi_get_frags(napi); + if (unlikely(!skb)) + return; + + otx2_skb_add_frag(pfvf, skb, cqe->sg.seg_addr, cqe->sg.seg_size, parse); + cq->pool_ptrs++; + + otx2_set_rxhash(pfvf, cqe, skb); + + skb_record_rx_queue(skb, cq->cq_idx); + if (pfvf->netdev->features & NETIF_F_RXCSUM) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + napi_gro_frags(napi); +} + +static int otx2_rx_napi_handler(struct otx2_nic *pfvf, + struct napi_struct *napi, + struct otx2_cq_queue *cq, int budget) +{ + struct nix_cqe_rx_s *cqe; + int processed_cqe = 0; + s64 bufptr; + + while (likely(processed_cqe < budget)) { + cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head); + if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID || + !cqe->sg.seg_addr) { + if (!processed_cqe) + return 0; + break; + } + cq->cq_head++; + cq->cq_head &= (cq->cqe_cnt - 1); + + otx2_rcv_pkt_handler(pfvf, napi, cq, cqe); + + cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID; + cqe->sg.seg_addr = 0x00; + processed_cqe++; + } + + /* Free CQEs to HW */ + otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, + ((u64)cq->cq_idx << 32) | processed_cqe); + + if (unlikely(!cq->pool_ptrs)) + return 0; + + /* Refill pool with new buffers */ + while (cq->pool_ptrs) { + bufptr = __otx2_alloc_rbuf(pfvf, cq->rbpool); + if (unlikely(bufptr <= 0)) { + struct refill_work *work; + struct delayed_work *dwork; + + work = &pfvf->refill_wrk[cq->cq_idx]; + dwork = &work->pool_refill_work; + /* Schedule a task if no other task is running */ + if (!cq->refill_task_sched) { + cq->refill_task_sched = true; + schedule_delayed_work(dwork, + msecs_to_jiffies(100)); + } + break; + } + otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM); + cq->pool_ptrs--; + } + + return processed_cqe; +} + +static int otx2_tx_napi_handler(struct otx2_nic *pfvf, + struct otx2_cq_queue *cq, int budget) +{ + int tx_pkts = 0, tx_bytes = 0; + struct nix_cqe_tx_s *cqe; + int processed_cqe = 0; + + while (likely(processed_cqe < budget)) { + cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq); + if (unlikely(!cqe)) { + if (!processed_cqe) + return 0; + break; + } + otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[cq->cint_idx], + cqe, budget, &tx_pkts, &tx_bytes); + + cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID; + processed_cqe++; + } + + /* Free CQEs to HW */ + otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, + ((u64)cq->cq_idx << 32) | processed_cqe); + + if (likely(tx_pkts)) { + struct netdev_queue *txq; + + txq = netdev_get_tx_queue(pfvf->netdev, cq->cint_idx); + netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); + /* Check if queue was stopped earlier due to ring full */ + smp_mb(); + if (netif_tx_queue_stopped(txq) && + netif_carrier_ok(pfvf->netdev)) + netif_tx_wake_queue(txq); + } + return 0; +} + +int otx2_napi_handler(struct napi_struct *napi, int budget) +{ + struct otx2_cq_poll *cq_poll; + int workdone = 0, cq_idx, i; + struct otx2_cq_queue *cq; + struct otx2_qset *qset; + struct otx2_nic *pfvf; + + cq_poll = container_of(napi, struct otx2_cq_poll, napi); + pfvf = (struct otx2_nic *)cq_poll->dev; + qset = &pfvf->qset; + + for (i = CQS_PER_CINT - 1; i >= 0; i--) { + cq_idx = cq_poll->cq_ids[i]; + if (unlikely(cq_idx == CINT_INVALID_CQ)) + continue; + cq = &qset->cq[cq_idx]; + if (cq->cq_type == CQ_RX) { + /* If the RQ refill WQ task is running, skip napi + * scheduler for this queue. + */ + if (cq->refill_task_sched) + continue; + workdone += otx2_rx_napi_handler(pfvf, napi, + cq, budget); + } else { + workdone += otx2_tx_napi_handler(pfvf, cq, budget); + } + } + + /* Clear the IRQ */ + otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0)); + + if (workdone < budget && napi_complete_done(napi, workdone)) { + /* If interface is going down, don't re-enable IRQ */ + if (pfvf->flags & OTX2_FLAG_INTF_DOWN) + return workdone; + + /* Re-enable interrupts */ + otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), + BIT_ULL(0)); + } + return workdone; +} + +static void otx2_sqe_flush(struct otx2_snd_queue *sq, int size) +{ + u64 status; + + /* Packet data stores should finish before SQE is flushed to HW */ + dma_wmb(); + + do { + memcpy(sq->lmt_addr, sq->sqe_base, size); + status = otx2_lmt_flush(sq->io_addr); + } while (status == 0); + + sq->head++; + sq->head &= (sq->sqe_cnt - 1); +} + +#define MAX_SEGS_PER_SG 3 +/* Add SQE scatter/gather subdescriptor structure */ +static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, + struct sk_buff *skb, int num_segs, int *offset) +{ + struct nix_sqe_sg_s *sg = NULL; + u64 dma_addr, *iova = NULL; + u16 *sg_lens = NULL; + int seg, len; + + sq->sg[sq->head].num_segs = 0; + + for (seg = 0; seg < num_segs; seg++) { + if ((seg % MAX_SEGS_PER_SG) == 0) { + sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset); + sg->ld_type = NIX_SEND_LDTYPE_LDD; + sg->subdc = NIX_SUBDC_SG; + sg->segs = 0; + sg_lens = (void *)sg; + iova = (void *)sg + sizeof(*sg); + /* Next subdc always starts at a 16byte boundary. + * So if sg->segs is whether 2 or 3, offset += 16bytes. + */ + if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1)) + *offset += sizeof(*sg) + (3 * sizeof(u64)); + else + *offset += sizeof(*sg) + sizeof(u64); + } + dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len); + if (dma_mapping_error(pfvf->dev, dma_addr)) + return false; + + sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = len; + sg->segs++; + *iova++ = dma_addr; + + /* Save DMA mapping info for later unmapping */ + sq->sg[sq->head].dma_addr[seg] = dma_addr; + sq->sg[sq->head].size[seg] = len; + sq->sg[sq->head].num_segs++; + } + + sq->sg[sq->head].skb = (u64)skb; + return true; +} + +/* Add SQE extended header subdescriptor */ +static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, + struct sk_buff *skb, int *offset) +{ + struct nix_sqe_ext_s *ext; + + ext = (struct nix_sqe_ext_s *)(sq->sqe_base + *offset); + ext->subdc = NIX_SUBDC_EXT; + if (skb_shinfo(skb)->gso_size) { + ext->lso = 1; + ext->lso_sb = skb_transport_offset(skb) + tcp_hdrlen(skb); + ext->lso_mps = skb_shinfo(skb)->gso_size; + + /* Only TSOv4 and TSOv6 GSO offloads are supported */ + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { + ext->lso_format = pfvf->hw.lso_tsov4_idx; + + /* HW adds payload size to 'ip_hdr->tot_len' while + * sending TSO segment, hence set payload length + * in IP header of the packet to just header length. + */ + ip_hdr(skb)->tot_len = + htons(ext->lso_sb - skb_network_offset(skb)); + } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { + ext->lso_format = pfvf->hw.lso_tsov6_idx; + ipv6_hdr(skb)->payload_len = htons(tcp_hdrlen(skb)); + } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { + __be16 l3_proto = vlan_get_protocol(skb); + struct udphdr *udph = udp_hdr(skb); + u16 iplen; + + ext->lso_sb = skb_transport_offset(skb) + + sizeof(struct udphdr); + + /* HW adds payload size to length fields in IP and + * UDP headers while segmentation, hence adjust the + * lengths to just header sizes. + */ + iplen = htons(ext->lso_sb - skb_network_offset(skb)); + if (l3_proto == htons(ETH_P_IP)) { + ip_hdr(skb)->tot_len = iplen; + ext->lso_format = pfvf->hw.lso_udpv4_idx; + } else { + ipv6_hdr(skb)->payload_len = iplen; + ext->lso_format = pfvf->hw.lso_udpv6_idx; + } + + udph->len = htons(sizeof(struct udphdr)); + } + } else if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + ext->tstmp = 1; + } + + *offset += sizeof(*ext); +} + +static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset, + int alg, u64 iova) +{ + struct nix_sqe_mem_s *mem; + + mem = (struct nix_sqe_mem_s *)(sq->sqe_base + *offset); + mem->subdc = NIX_SUBDC_MEM; + mem->alg = alg; + mem->wmem = 1; /* wait for the memory operation */ + mem->addr = iova; + + *offset += sizeof(*mem); +} + +/* Add SQE header subdescriptor structure */ +static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, + struct nix_sqe_hdr_s *sqe_hdr, + struct sk_buff *skb, u16 qidx) +{ + int proto = 0; + + /* Check if SQE was framed before, if yes then no need to + * set these constants again and again. + */ + if (!sqe_hdr->total) { + /* Don't free Tx buffers to Aura */ + sqe_hdr->df = 1; + sqe_hdr->aura = sq->aura_id; + /* Post a CQE Tx after pkt transmission */ + sqe_hdr->pnc = 1; + sqe_hdr->sq = qidx; + } + sqe_hdr->total = skb->len; + /* Set SQE identifier which will be used later for freeing SKB */ + sqe_hdr->sqe_id = sq->head; + + /* Offload TCP/UDP checksum to HW */ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + sqe_hdr->ol3ptr = skb_network_offset(skb); + sqe_hdr->ol4ptr = skb_transport_offset(skb); + /* get vlan protocol Ethertype */ + if (eth_type_vlan(skb->protocol)) + skb->protocol = vlan_get_protocol(skb); + + if (skb->protocol == htons(ETH_P_IP)) { + proto = ip_hdr(skb)->protocol; + /* In case of TSO, HW needs this to be explicitly set. + * So set this always, instead of adding a check. + */ + sqe_hdr->ol3type = NIX_SENDL3TYPE_IP4_CKSUM; + } else if (skb->protocol == htons(ETH_P_IPV6)) { + proto = ipv6_hdr(skb)->nexthdr; + sqe_hdr->ol3type = NIX_SENDL3TYPE_IP6; + } + + if (proto == IPPROTO_TCP) + sqe_hdr->ol4type = NIX_SENDL4TYPE_TCP_CKSUM; + else if (proto == IPPROTO_UDP) + sqe_hdr->ol4type = NIX_SENDL4TYPE_UDP_CKSUM; + } +} + +static int otx2_dma_map_tso_skb(struct otx2_nic *pfvf, + struct otx2_snd_queue *sq, + struct sk_buff *skb, int sqe, int hdr_len) +{ + int num_segs = skb_shinfo(skb)->nr_frags + 1; + struct sg_list *sg = &sq->sg[sqe]; + u64 dma_addr; + int seg, len; + + sg->num_segs = 0; + + /* Get payload length at skb->data */ + len = skb_headlen(skb) - hdr_len; + + for (seg = 0; seg < num_segs; seg++) { + /* Skip skb->data, if there is no payload */ + if (!seg && !len) + continue; + dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len); + if (dma_mapping_error(pfvf->dev, dma_addr)) + goto unmap; + + /* Save DMA mapping info for later unmapping */ + sg->dma_addr[sg->num_segs] = dma_addr; + sg->size[sg->num_segs] = len; + sg->num_segs++; + } + return 0; +unmap: + otx2_dma_unmap_skb_frags(pfvf, sg); + return -EINVAL; +} + +static u64 otx2_tso_frag_dma_addr(struct otx2_snd_queue *sq, + struct sk_buff *skb, int seg, + u64 seg_addr, int hdr_len, int sqe) +{ + struct sg_list *sg = &sq->sg[sqe]; + const skb_frag_t *frag; + int offset; + + if (seg < 0) + return sg->dma_addr[0] + (seg_addr - (u64)skb->data); + + frag = &skb_shinfo(skb)->frags[seg]; + offset = seg_addr - (u64)skb_frag_address(frag); + if (skb_headlen(skb) - hdr_len) + seg++; + return sg->dma_addr[seg] + offset; +} + +static void otx2_sqe_tso_add_sg(struct otx2_snd_queue *sq, + struct sg_list *list, int *offset) +{ + struct nix_sqe_sg_s *sg = NULL; + u16 *sg_lens = NULL; + u64 *iova = NULL; + int seg; + + /* Add SG descriptors with buffer addresses */ + for (seg = 0; seg < list->num_segs; seg++) { + if ((seg % MAX_SEGS_PER_SG) == 0) { + sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset); + sg->ld_type = NIX_SEND_LDTYPE_LDD; + sg->subdc = NIX_SUBDC_SG; + sg->segs = 0; + sg_lens = (void *)sg; + iova = (void *)sg + sizeof(*sg); + /* Next subdc always starts at a 16byte boundary. + * So if sg->segs is whether 2 or 3, offset += 16bytes. + */ + if ((list->num_segs - seg) >= (MAX_SEGS_PER_SG - 1)) + *offset += sizeof(*sg) + (3 * sizeof(u64)); + else + *offset += sizeof(*sg) + sizeof(u64); + } + sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = list->size[seg]; + *iova++ = list->dma_addr[seg]; + sg->segs++; + } +} + +static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, + struct sk_buff *skb, u16 qidx) +{ + struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx); + int hdr_len, tcp_data, seg_len, pkt_len, offset; + struct nix_sqe_hdr_s *sqe_hdr; + int first_sqe = sq->head; + struct sg_list list; + struct tso_t tso; + + hdr_len = tso_start(skb, &tso); + + /* Map SKB's fragments to DMA. + * It's done here to avoid mapping for every TSO segment's packet. + */ + if (otx2_dma_map_tso_skb(pfvf, sq, skb, first_sqe, hdr_len)) { + dev_kfree_skb_any(skb); + return; + } + + netdev_tx_sent_queue(txq, skb->len); + + tcp_data = skb->len - hdr_len; + while (tcp_data > 0) { + char *hdr; + + seg_len = min_t(int, skb_shinfo(skb)->gso_size, tcp_data); + tcp_data -= seg_len; + + /* Set SQE's SEND_HDR */ + memset(sq->sqe_base, 0, sq->sqe_size); + sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base); + otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx); + offset = sizeof(*sqe_hdr); + + /* Add TSO segment's pkt header */ + hdr = sq->tso_hdrs->base + (sq->head * TSO_HEADER_SIZE); + tso_build_hdr(skb, hdr, &tso, seg_len, tcp_data == 0); + list.dma_addr[0] = + sq->tso_hdrs->iova + (sq->head * TSO_HEADER_SIZE); + list.size[0] = hdr_len; + list.num_segs = 1; + + /* Add TSO segment's payload data fragments */ + pkt_len = hdr_len; + while (seg_len > 0) { + int size; + + size = min_t(int, tso.size, seg_len); + + list.size[list.num_segs] = size; + list.dma_addr[list.num_segs] = + otx2_tso_frag_dma_addr(sq, skb, + tso.next_frag_idx - 1, + (u64)tso.data, hdr_len, + first_sqe); + list.num_segs++; + pkt_len += size; + seg_len -= size; + tso_build_data(skb, &tso, size); + } + sqe_hdr->total = pkt_len; + otx2_sqe_tso_add_sg(sq, &list, &offset); + + /* DMA mappings and skb needs to be freed only after last + * TSO segment is transmitted out. So set 'PNC' only for + * last segment. Also point last segment's sqe_id to first + * segment's SQE index where skb address and DMA mappings + * are saved. + */ + if (!tcp_data) { + sqe_hdr->pnc = 1; + sqe_hdr->sqe_id = first_sqe; + sq->sg[first_sqe].skb = (u64)skb; + } else { + sqe_hdr->pnc = 0; + } + + sqe_hdr->sizem1 = (offset / 16) - 1; + + /* Flush SQE to HW */ + otx2_sqe_flush(sq, offset); + } +} + +static bool is_hw_tso_supported(struct otx2_nic *pfvf, + struct sk_buff *skb) +{ + int payload_len, last_seg_size; + + if (!pfvf->hw.hw_tso) + return false; + + /* HW has an issue due to which when the payload of the last LSO + * segment is shorter than 16 bytes, some header fields may not + * be correctly modified, hence don't offload such TSO segments. + */ + if (!is_96xx_B0(pfvf->pdev)) + return true; + + payload_len = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)); + last_seg_size = payload_len % skb_shinfo(skb)->gso_size; + if (last_seg_size && last_seg_size < 16) + return false; + + return true; +} + +static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb) +{ + if (!skb_shinfo(skb)->gso_size) + return 1; + + /* HW TSO */ + if (is_hw_tso_supported(pfvf, skb)) + return 1; + + /* SW TSO */ + return skb_shinfo(skb)->gso_segs; +} + +static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb, + struct otx2_snd_queue *sq, int *offset) +{ + u64 iova; + + if (!skb_shinfo(skb)->gso_size && + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + iova = sq->timestamps->iova + (sq->head * sizeof(u64)); + otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova); + } else { + skb_tx_timestamp(skb); + } +} + +bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, + struct sk_buff *skb, u16 qidx) +{ + struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx); + struct otx2_nic *pfvf = netdev_priv(netdev); + int offset, num_segs, free_sqe; + struct nix_sqe_hdr_s *sqe_hdr; + + /* Check if there is room for new SQE. + * 'Num of SQBs freed to SQ's pool - SQ's Aura count' + * will give free SQE count. + */ + free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb; + + if (free_sqe < sq->sqe_thresh || + free_sqe < otx2_get_sqe_count(pfvf, skb)) + return false; + + num_segs = skb_shinfo(skb)->nr_frags + 1; + + /* If SKB doesn't fit in a single SQE, linearize it. + * TODO: Consider adding JUMP descriptor instead. + */ + if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) { + if (__skb_linearize(skb)) { + dev_kfree_skb_any(skb); + return true; + } + num_segs = skb_shinfo(skb)->nr_frags + 1; + } + + if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) { + otx2_sq_append_tso(pfvf, sq, skb, qidx); + return true; + } + + /* Set SQE's SEND_HDR. + * Do not clear the first 64bit as it contains constant info. + */ + memset(sq->sqe_base + 8, 0, sq->sqe_size - 8); + sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base); + otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx); + offset = sizeof(*sqe_hdr); + + /* Add extended header if needed */ + otx2_sqe_add_ext(pfvf, sq, skb, &offset); + + /* Add SG subdesc with data frags */ + if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) { + otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]); + return false; + } + + otx2_set_txtstamp(pfvf, skb, sq, &offset); + + sqe_hdr->sizem1 = (offset / 16) - 1; + + netdev_tx_sent_queue(txq, skb->len); + + /* Flush SQE to HW */ + otx2_sqe_flush(sq, offset); + + return true; +} +EXPORT_SYMBOL(otx2_sq_append_skb); + +void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) +{ + struct nix_cqe_rx_s *cqe; + int processed_cqe = 0; + u64 iova, pa; + + while ((cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq))) { + if (!cqe->sg.subdc) + continue; + processed_cqe++; + if (cqe->sg.segs > 1) { + otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx); + continue; + } + iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM; + pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); + otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE); + put_page(virt_to_page(phys_to_virt(pa))); + } + + /* Free CQEs to HW */ + otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, + ((u64)cq->cq_idx << 32) | processed_cqe); +} + +void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) +{ + struct sk_buff *skb = NULL; + struct otx2_snd_queue *sq; + struct nix_cqe_tx_s *cqe; + int processed_cqe = 0; + struct sg_list *sg; + + sq = &pfvf->qset.sq[cq->cint_idx]; + + while ((cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq))) { + sg = &sq->sg[cqe->comp.sqe_id]; + skb = (struct sk_buff *)sg->skb; + if (skb) { + otx2_dma_unmap_skb_frags(pfvf, sg); + dev_kfree_skb_any(skb); + sg->skb = (u64)NULL; + } + processed_cqe++; + } + + /* Free CQEs to HW */ + otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, + ((u64)cq->cq_idx << 32) | processed_cqe); +} + +int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable) +{ + struct msg_req *msg; + int err; + + mutex_lock(&pfvf->mbox.lock); + if (enable) + msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox); + else + msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox); + + if (!msg) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); + return err; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h new file mode 100644 index 000000000..73af15685 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 RVU Ethernet driver + * + * Copyright (C) 2020 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef OTX2_TXRX_H +#define OTX2_TXRX_H + +#include <linux/etherdevice.h> +#include <linux/iommu.h> +#include <linux/if_vlan.h> + +#define LBK_CHAN_BASE 0x000 +#define SDP_CHAN_BASE 0x700 +#define CGX_CHAN_BASE 0x800 + +#define OTX2_DATA_ALIGN(X) ALIGN(X, OTX2_ALIGN) +#define OTX2_HEAD_ROOM OTX2_ALIGN + +#define OTX2_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN) +#define OTX2_MIN_MTU 64 +#define OTX2_MAX_MTU (9212 - OTX2_ETH_HLEN) + +#define OTX2_MAX_GSO_SEGS 255 +#define OTX2_MAX_FRAGS_IN_SQE 9 + +/* Rx buffer size should be in multiples of 128bytes */ +#define RCV_FRAG_LEN1(x) \ + ((OTX2_HEAD_ROOM + OTX2_DATA_ALIGN(x)) + \ + OTX2_DATA_ALIGN(sizeof(struct skb_shared_info))) + +/* Prefer 2048 byte buffers for better last level cache + * utilization or data distribution across regions. + */ +#define RCV_FRAG_LEN(x) \ + ((RCV_FRAG_LEN1(x) < 2048) ? 2048 : RCV_FRAG_LEN1(x)) + +#define DMA_BUFFER_LEN(x) \ + ((x) - OTX2_HEAD_ROOM - \ + OTX2_DATA_ALIGN(sizeof(struct skb_shared_info))) + +/* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT] + * is equal to this value. + */ +#define CQ_CQE_THRESH_DEFAULT 10 + +/* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT] + * is nonzero and this much time elapses after that. + */ +#define CQ_TIMER_THRESH_DEFAULT 1 /* 1 usec */ +#define CQ_TIMER_THRESH_MAX 25 /* 25 usec */ + +/* Min number of CQs (of the ones mapped to this CINT) + * with valid CQEs. + */ +#define CQ_QCOUNT_DEFAULT 1 + +struct queue_stats { + u64 bytes; + u64 pkts; +}; + +struct otx2_rcv_queue { + struct queue_stats stats; +}; + +struct sg_list { + u16 num_segs; + u64 skb; + u64 size[OTX2_MAX_FRAGS_IN_SQE]; + u64 dma_addr[OTX2_MAX_FRAGS_IN_SQE]; +}; + +struct otx2_snd_queue { + u8 aura_id; + u16 head; + u16 sqe_size; + u32 sqe_cnt; + u16 num_sqbs; + u16 sqe_thresh; + u8 sqe_per_sqb; + u64 io_addr; + u64 *aura_fc_addr; + u64 *lmt_addr; + void *sqe_base; + struct qmem *sqe; + struct qmem *tso_hdrs; + struct sg_list *sg; + struct qmem *timestamps; + struct queue_stats stats; + u16 sqb_count; + u64 *sqb_ptrs; +} ____cacheline_aligned_in_smp; + +enum cq_type { + CQ_RX, + CQ_TX, + CQS_PER_CINT = 2, /* RQ + SQ */ +}; + +struct otx2_cq_poll { + void *dev; +#define CINT_INVALID_CQ 255 + u8 cint_idx; + u8 cq_ids[CQS_PER_CINT]; + struct napi_struct napi; +}; + +struct otx2_pool { + struct qmem *stack; + struct qmem *fc_addr; + u16 rbsize; +}; + +struct otx2_cq_queue { + u8 cq_idx; + u8 cq_type; + u8 cint_idx; /* CQ interrupt id */ + u8 refill_task_sched; + u16 cqe_size; + u16 pool_ptrs; + u32 cqe_cnt; + u32 cq_head; + void *cqe_base; + struct qmem *cqe; + struct otx2_pool *rbpool; +} ____cacheline_aligned_in_smp; + +struct otx2_qset { + u32 rqe_cnt; + u32 sqe_cnt; /* Keep these two at top */ +#define OTX2_MAX_CQ_CNT 64 + u16 cq_cnt; + u16 xqe_size; + struct otx2_pool *pool; + struct otx2_cq_poll *napi; + struct otx2_cq_queue *cq; + struct otx2_snd_queue *sq; + struct otx2_rcv_queue *rq; +}; + +/* Translate IOVA to physical address */ +static inline u64 otx2_iova_to_phys(void *iommu_domain, dma_addr_t dma_addr) +{ + /* Translation is installed only when IOMMU is present */ + if (likely(iommu_domain)) + return iommu_iova_to_phys(iommu_domain, dma_addr); + return dma_addr; +} + +int otx2_napi_handler(struct napi_struct *napi, int budget); +bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, + struct sk_buff *skb, u16 qidx); +#endif /* OTX2_TXRX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c new file mode 100644 index 000000000..5310b7179 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -0,0 +1,657 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Virtual Function ethernet driver */ + +#include <linux/etherdevice.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "otx2_common.h" +#include "otx2_reg.h" + +#define DRV_NAME "octeontx2-nicvf" +#define DRV_STRING "Marvell OcteonTX2 NIC Virtual Function Driver" + +static const struct pci_device_id otx2_vf_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AFVF) }, + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF) }, + { } +}; + +MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>"); +MODULE_DESCRIPTION(DRV_STRING); +MODULE_LICENSE("GPL v2"); +MODULE_DEVICE_TABLE(pci, otx2_vf_id_table); + +/* RVU VF Interrupt Vector Enumeration */ +enum { + RVU_VF_INT_VEC_MBOX = 0x0, +}; + +static void otx2vf_process_vfaf_mbox_msg(struct otx2_nic *vf, + struct mbox_msghdr *msg) +{ + if (msg->id >= MBOX_MSG_MAX) { + dev_err(vf->dev, + "Mbox msg with unknown ID %d\n", msg->id); + return; + } + + if (msg->sig != OTX2_MBOX_RSP_SIG) { + dev_err(vf->dev, + "Mbox msg with wrong signature %x, ID %d\n", + msg->sig, msg->id); + return; + } + + if (msg->rc == MBOX_MSG_INVALID) { + dev_err(vf->dev, + "PF/AF says the sent msg(s) %d were invalid\n", + msg->id); + return; + } + + switch (msg->id) { + case MBOX_MSG_READY: + vf->pcifunc = msg->pcifunc; + break; + case MBOX_MSG_MSIX_OFFSET: + mbox_handler_msix_offset(vf, (struct msix_offset_rsp *)msg); + break; + case MBOX_MSG_NPA_LF_ALLOC: + mbox_handler_npa_lf_alloc(vf, (struct npa_lf_alloc_rsp *)msg); + break; + case MBOX_MSG_NIX_LF_ALLOC: + mbox_handler_nix_lf_alloc(vf, (struct nix_lf_alloc_rsp *)msg); + break; + case MBOX_MSG_NIX_TXSCH_ALLOC: + mbox_handler_nix_txsch_alloc(vf, + (struct nix_txsch_alloc_rsp *)msg); + break; + case MBOX_MSG_NIX_BP_ENABLE: + mbox_handler_nix_bp_enable(vf, (struct nix_bp_cfg_rsp *)msg); + break; + default: + if (msg->rc) + dev_err(vf->dev, + "Mbox msg response has err %d, ID %d\n", + msg->rc, msg->id); + } +} + +static void otx2vf_vfaf_mbox_handler(struct work_struct *work) +{ + struct otx2_mbox_dev *mdev; + struct mbox_hdr *rsp_hdr; + struct mbox_msghdr *msg; + struct otx2_mbox *mbox; + struct mbox *af_mbox; + int offset, id; + + af_mbox = container_of(work, struct mbox, mbox_wrk); + mbox = &af_mbox->mbox; + mdev = &mbox->dev[0]; + rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (af_mbox->num_msgs == 0) + return; + offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); + + for (id = 0; id < af_mbox->num_msgs; id++) { + msg = (struct mbox_msghdr *)(mdev->mbase + offset); + otx2vf_process_vfaf_mbox_msg(af_mbox->pfvf, msg); + offset = mbox->rx_start + msg->next_msgoff; + if (mdev->msgs_acked == (af_mbox->num_msgs - 1)) + __otx2_mbox_reset(mbox, 0); + mdev->msgs_acked++; + } +} + +static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf, + struct mbox_msghdr *req) +{ + struct msg_rsp *rsp; + int err; + + /* Check if valid, if not reply with a invalid msg */ + if (req->sig != OTX2_MBOX_REQ_SIG) { + otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id); + return -ENODEV; + } + + switch (req->id) { + case MBOX_MSG_CGX_LINK_EVENT: + rsp = (struct msg_rsp *)otx2_mbox_alloc_msg( + &vf->mbox.mbox_up, 0, + sizeof(struct msg_rsp)); + if (!rsp) + return -ENOMEM; + + rsp->hdr.id = MBOX_MSG_CGX_LINK_EVENT; + rsp->hdr.sig = OTX2_MBOX_RSP_SIG; + rsp->hdr.pcifunc = 0; + rsp->hdr.rc = 0; + err = otx2_mbox_up_handler_cgx_link_event( + vf, (struct cgx_link_info_msg *)req, rsp); + return err; + default: + otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id); + return -ENODEV; + } + return 0; +} + +static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work) +{ + struct otx2_mbox_dev *mdev; + struct mbox_hdr *rsp_hdr; + struct mbox_msghdr *msg; + struct otx2_mbox *mbox; + struct mbox *vf_mbox; + struct otx2_nic *vf; + int offset, id; + + vf_mbox = container_of(work, struct mbox, mbox_up_wrk); + vf = vf_mbox->pfvf; + mbox = &vf_mbox->mbox_up; + mdev = &mbox->dev[0]; + + rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (vf_mbox->up_num_msgs == 0) + return; + + offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); + + for (id = 0; id < vf_mbox->up_num_msgs; id++) { + msg = (struct mbox_msghdr *)(mdev->mbase + offset); + otx2vf_process_mbox_msg_up(vf, msg); + offset = mbox->rx_start + msg->next_msgoff; + } + + otx2_mbox_msg_send(mbox, 0); +} + +static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq) +{ + struct otx2_nic *vf = (struct otx2_nic *)vf_irq; + struct otx2_mbox_dev *mdev; + struct otx2_mbox *mbox; + struct mbox_hdr *hdr; + + /* Clear the IRQ */ + otx2_write64(vf, RVU_VF_INT, BIT_ULL(0)); + + /* Read latest mbox data */ + smp_rmb(); + + /* Check for PF => VF response messages */ + mbox = &vf->mbox.mbox; + mdev = &mbox->dev[0]; + otx2_sync_mbox_bbuf(mbox, 0); + + trace_otx2_msg_interrupt(mbox->pdev, "PF to VF", BIT_ULL(0)); + + hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (hdr->num_msgs) { + vf->mbox.num_msgs = hdr->num_msgs; + hdr->num_msgs = 0; + memset(mbox->hwbase + mbox->rx_start, 0, + ALIGN(sizeof(struct mbox_hdr), sizeof(u64))); + queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk); + } + /* Check for PF => VF notification messages */ + mbox = &vf->mbox.mbox_up; + mdev = &mbox->dev[0]; + otx2_sync_mbox_bbuf(mbox, 0); + + hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (hdr->num_msgs) { + vf->mbox.up_num_msgs = hdr->num_msgs; + hdr->num_msgs = 0; + memset(mbox->hwbase + mbox->rx_start, 0, + ALIGN(sizeof(struct mbox_hdr), sizeof(u64))); + queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk); + } + + return IRQ_HANDLED; +} + +static void otx2vf_disable_mbox_intr(struct otx2_nic *vf) +{ + int vector = pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX); + + /* Disable VF => PF mailbox IRQ */ + otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0)); + free_irq(vector, vf); +} + +static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf) +{ + struct otx2_hw *hw = &vf->hw; + struct msg_req *req; + char *irq_name; + int err; + + /* Register mailbox interrupt handler */ + irq_name = &hw->irq_name[RVU_VF_INT_VEC_MBOX * NAME_SIZE]; + snprintf(irq_name, NAME_SIZE, "RVUVFAF Mbox"); + err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX), + otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf); + if (err) { + dev_err(vf->dev, + "RVUPF: IRQ registration failed for VFAF mbox irq\n"); + return err; + } + + /* Enable mailbox interrupt for msgs coming from PF. + * First clear to avoid spurious interrupts, if any. + */ + otx2_write64(vf, RVU_VF_INT, BIT_ULL(0)); + otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0)); + + if (!probe_pf) + return 0; + + /* Check mailbox communication with PF */ + req = otx2_mbox_alloc_msg_ready(&vf->mbox); + if (!req) { + otx2vf_disable_mbox_intr(vf); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&vf->mbox); + if (err) { + dev_warn(vf->dev, + "AF not responding to mailbox, deferring probe\n"); + otx2vf_disable_mbox_intr(vf); + return -EPROBE_DEFER; + } + return 0; +} + +static void otx2vf_vfaf_mbox_destroy(struct otx2_nic *vf) +{ + struct mbox *mbox = &vf->mbox; + + if (vf->mbox_wq) { + flush_workqueue(vf->mbox_wq); + destroy_workqueue(vf->mbox_wq); + vf->mbox_wq = NULL; + } + + if (mbox->mbox.hwbase) + iounmap((void __iomem *)mbox->mbox.hwbase); + + otx2_mbox_destroy(&mbox->mbox); + otx2_mbox_destroy(&mbox->mbox_up); +} + +static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf) +{ + struct mbox *mbox = &vf->mbox; + void __iomem *hwbase; + int err; + + mbox->pfvf = vf; + vf->mbox_wq = alloc_workqueue("otx2_vfaf_mailbox", + WQ_UNBOUND | WQ_HIGHPRI | + WQ_MEM_RECLAIM, 1); + if (!vf->mbox_wq) + return -ENOMEM; + + /* Mailbox is a reserved memory (in RAM) region shared between + * admin function (i.e PF0) and this VF, shouldn't be mapped as + * device memory to allow unaligned accesses. + */ + hwbase = ioremap_wc(pci_resource_start(vf->pdev, PCI_MBOX_BAR_NUM), + pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM)); + if (!hwbase) { + dev_err(vf->dev, "Unable to map VFAF mailbox region\n"); + err = -ENOMEM; + goto exit; + } + + err = otx2_mbox_init(&mbox->mbox, hwbase, vf->pdev, vf->reg_base, + MBOX_DIR_VFPF, 1); + if (err) + goto exit; + + err = otx2_mbox_init(&mbox->mbox_up, hwbase, vf->pdev, vf->reg_base, + MBOX_DIR_VFPF_UP, 1); + if (err) + goto exit; + + err = otx2_mbox_bbuf_init(mbox, vf->pdev); + if (err) + goto exit; + + INIT_WORK(&mbox->mbox_wrk, otx2vf_vfaf_mbox_handler); + INIT_WORK(&mbox->mbox_up_wrk, otx2vf_vfaf_mbox_up_handler); + mutex_init(&mbox->lock); + + return 0; +exit: + destroy_workqueue(vf->mbox_wq); + return err; +} + +static int otx2vf_open(struct net_device *netdev) +{ + struct otx2_nic *vf; + int err; + + err = otx2_open(netdev); + if (err) + return err; + + /* LBKs do not receive link events so tell everyone we are up here */ + vf = netdev_priv(netdev); + if (is_otx2_lbkvf(vf->pdev)) { + pr_info("%s NIC Link is UP\n", netdev->name); + netif_carrier_on(netdev); + netif_tx_start_all_queues(netdev); + } + + return 0; +} + +static int otx2vf_stop(struct net_device *netdev) +{ + return otx2_stop(netdev); +} + +static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct otx2_nic *vf = netdev_priv(netdev); + int qidx = skb_get_queue_mapping(skb); + struct otx2_snd_queue *sq; + struct netdev_queue *txq; + + sq = &vf->qset.sq[qidx]; + txq = netdev_get_tx_queue(netdev, qidx); + + if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) { + netif_tx_stop_queue(txq); + + /* Check again, incase SQBs got freed up */ + smp_mb(); + if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb) + > sq->sqe_thresh) + netif_tx_wake_queue(txq); + + return NETDEV_TX_BUSY; + } + + return NETDEV_TX_OK; +} + +static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu) +{ + bool if_up = netif_running(netdev); + int err = 0; + + if (if_up) + otx2vf_stop(netdev); + + netdev_info(netdev, "Changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (if_up) + err = otx2vf_open(netdev); + + return err; +} + +static void otx2vf_reset_task(struct work_struct *work) +{ + struct otx2_nic *vf = container_of(work, struct otx2_nic, reset_task); + + rtnl_lock(); + + if (netif_running(vf->netdev)) { + otx2vf_stop(vf->netdev); + vf->reset_count++; + otx2vf_open(vf->netdev); + } + + rtnl_unlock(); +} + +static const struct net_device_ops otx2vf_netdev_ops = { + .ndo_open = otx2vf_open, + .ndo_stop = otx2vf_stop, + .ndo_start_xmit = otx2vf_xmit, + .ndo_set_mac_address = otx2_set_mac_address, + .ndo_change_mtu = otx2vf_change_mtu, + .ndo_get_stats64 = otx2_get_stats64, + .ndo_tx_timeout = otx2_tx_timeout, +}; + +static int otx2vf_realloc_msix_vectors(struct otx2_nic *vf) +{ + struct otx2_hw *hw = &vf->hw; + int num_vec, err; + + num_vec = hw->nix_msixoff; + num_vec += NIX_LF_CINT_VEC_START + hw->max_queues; + + otx2vf_disable_mbox_intr(vf); + pci_free_irq_vectors(hw->pdev); + err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX); + if (err < 0) { + dev_err(vf->dev, "%s: Failed to realloc %d IRQ vectors\n", + __func__, num_vec); + return err; + } + + return otx2vf_register_mbox_intr(vf, false); +} + +static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int num_vec = pci_msix_vec_count(pdev); + struct device *dev = &pdev->dev; + struct net_device *netdev; + struct otx2_nic *vf; + struct otx2_hw *hw; + int err, qcount; + + err = pcim_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device\n"); + return err; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(dev, "PCI request regions failed 0x%x\n", err); + return err; + } + + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); + if (err) { + dev_err(dev, "DMA mask config failed, abort\n"); + goto err_release_regions; + } + + pci_set_master(pdev); + + qcount = num_online_cpus(); + netdev = alloc_etherdev_mqs(sizeof(*vf), qcount, qcount); + if (!netdev) { + err = -ENOMEM; + goto err_release_regions; + } + + pci_set_drvdata(pdev, netdev); + SET_NETDEV_DEV(netdev, &pdev->dev); + vf = netdev_priv(netdev); + vf->netdev = netdev; + vf->pdev = pdev; + vf->dev = dev; + vf->iommu_domain = iommu_get_domain_for_dev(dev); + + vf->flags |= OTX2_FLAG_INTF_DOWN; + hw = &vf->hw; + hw->pdev = vf->pdev; + hw->rx_queues = qcount; + hw->tx_queues = qcount; + hw->max_queues = qcount; + + hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE, + GFP_KERNEL); + if (!hw->irq_name) { + err = -ENOMEM; + goto err_free_netdev; + } + + hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec, + sizeof(cpumask_var_t), GFP_KERNEL); + if (!hw->affinity_mask) { + err = -ENOMEM; + goto err_free_netdev; + } + + err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX); + if (err < 0) { + dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n", + __func__, num_vec); + goto err_free_netdev; + } + + vf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); + if (!vf->reg_base) { + dev_err(dev, "Unable to map physical function CSRs, aborting\n"); + err = -ENOMEM; + goto err_free_irq_vectors; + } + + /* Init VF <=> PF mailbox stuff */ + err = otx2vf_vfaf_mbox_init(vf); + if (err) + goto err_free_irq_vectors; + + /* Register mailbox interrupt */ + err = otx2vf_register_mbox_intr(vf, true); + if (err) + goto err_mbox_destroy; + + /* Request AF to attach NPA and LIX LFs to this AF */ + err = otx2_attach_npa_nix(vf); + if (err) + goto err_disable_mbox_intr; + + err = otx2vf_realloc_msix_vectors(vf); + if (err) + goto err_detach_rsrc; + + err = otx2_set_real_num_queues(netdev, qcount, qcount); + if (err) + goto err_detach_rsrc; + + otx2_setup_dev_hw_settings(vf); + + /* Assign default mac address */ + otx2_get_mac_from_af(netdev); + + netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | NETIF_F_RXHASH | + NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GSO_UDP_L4; + netdev->features = netdev->hw_features; + + netdev->gso_max_segs = OTX2_MAX_GSO_SEGS; + netdev->watchdog_timeo = OTX2_TX_TIMEOUT; + + netdev->netdev_ops = &otx2vf_netdev_ops; + + /* MTU range: 68 - 9190 */ + netdev->min_mtu = OTX2_MIN_MTU; + netdev->max_mtu = OTX2_MAX_MTU; + + INIT_WORK(&vf->reset_task, otx2vf_reset_task); + + /* To distinguish, for LBK VFs set netdev name explicitly */ + if (is_otx2_lbkvf(vf->pdev)) { + int n; + + n = (vf->pcifunc >> RVU_PFVF_FUNC_SHIFT) & RVU_PFVF_FUNC_MASK; + /* Need to subtract 1 to get proper VF number */ + n -= 1; + snprintf(netdev->name, sizeof(netdev->name), "lbk%d", n); + } + + err = register_netdev(netdev); + if (err) { + dev_err(dev, "Failed to register netdevice\n"); + goto err_detach_rsrc; + } + + otx2vf_set_ethtool_ops(netdev); + + /* Enable pause frames by default */ + vf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED; + vf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED; + + return 0; + +err_detach_rsrc: + otx2_detach_resources(&vf->mbox); +err_disable_mbox_intr: + otx2vf_disable_mbox_intr(vf); +err_mbox_destroy: + otx2vf_vfaf_mbox_destroy(vf); +err_free_irq_vectors: + pci_free_irq_vectors(hw->pdev); +err_free_netdev: + pci_set_drvdata(pdev, NULL); + free_netdev(netdev); +err_release_regions: + pci_release_regions(pdev); + return err; +} + +static void otx2vf_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct otx2_nic *vf; + + if (!netdev) + return; + + vf = netdev_priv(netdev); + + cancel_work_sync(&vf->reset_task); + unregister_netdev(netdev); + otx2vf_disable_mbox_intr(vf); + + otx2_detach_resources(&vf->mbox); + otx2vf_vfaf_mbox_destroy(vf); + pci_free_irq_vectors(vf->pdev); + pci_set_drvdata(pdev, NULL); + free_netdev(netdev); + + pci_release_regions(pdev); +} + +static struct pci_driver otx2vf_driver = { + .name = DRV_NAME, + .id_table = otx2_vf_id_table, + .probe = otx2vf_probe, + .remove = otx2vf_remove, + .shutdown = otx2vf_remove, +}; + +static int __init otx2vf_init_module(void) +{ + pr_info("%s: %s\n", DRV_NAME, DRV_STRING); + + return pci_register_driver(&otx2vf_driver); +} + +static void __exit otx2vf_cleanup_module(void) +{ + pci_unregister_driver(&otx2vf_driver); +} + +module_init(otx2vf_init_module); +module_exit(otx2vf_cleanup_module); diff --git a/drivers/net/ethernet/marvell/prestera/Kconfig b/drivers/net/ethernet/marvell/prestera/Kconfig new file mode 100644 index 000000000..b6f20e203 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/Kconfig @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Marvell Prestera drivers configuration +# + +config PRESTERA + tristate "Marvell Prestera Switch ASICs support" + depends on NET_SWITCHDEV && VLAN_8021Q + depends on BRIDGE || BRIDGE=n + select NET_DEVLINK + help + This driver supports Marvell Prestera Switch ASICs family. + + To compile this driver as a module, choose M here: the + module will be called prestera. + +config PRESTERA_PCI + tristate "PCI interface driver for Marvell Prestera Switch ASICs family" + depends on PCI && HAS_IOMEM && PRESTERA + default PRESTERA + help + This is implementation of PCI interface support for Marvell Prestera + Switch ASICs family. + + To compile this driver as a module, choose M here: the + module will be called prestera_pci. diff --git a/drivers/net/ethernet/marvell/prestera/Makefile b/drivers/net/ethernet/marvell/prestera/Makefile new file mode 100644 index 000000000..93129e32e --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_PRESTERA) += prestera.o +prestera-objs := prestera_main.o prestera_hw.o prestera_dsa.o \ + prestera_rxtx.o prestera_devlink.o prestera_ethtool.o \ + prestera_switchdev.o + +obj-$(CONFIG_PRESTERA_PCI) += prestera_pci.o diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h new file mode 100644 index 000000000..55aa4bf8a --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera.h @@ -0,0 +1,206 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */ + +#ifndef _PRESTERA_H_ +#define _PRESTERA_H_ + +#include <linux/notifier.h> +#include <linux/skbuff.h> +#include <linux/workqueue.h> +#include <net/devlink.h> +#include <uapi/linux/if_ether.h> + +#define PRESTERA_DRV_NAME "prestera" + +#define PRESTERA_DEFAULT_VID 1 + +struct prestera_fw_rev { + u16 maj; + u16 min; + u16 sub; +}; + +struct prestera_port_stats { + u64 good_octets_received; + u64 bad_octets_received; + u64 mac_trans_error; + u64 broadcast_frames_received; + u64 multicast_frames_received; + u64 frames_64_octets; + u64 frames_65_to_127_octets; + u64 frames_128_to_255_octets; + u64 frames_256_to_511_octets; + u64 frames_512_to_1023_octets; + u64 frames_1024_to_max_octets; + u64 excessive_collision; + u64 multicast_frames_sent; + u64 broadcast_frames_sent; + u64 fc_sent; + u64 fc_received; + u64 buffer_overrun; + u64 undersize; + u64 fragments; + u64 oversize; + u64 jabber; + u64 rx_error_frame_received; + u64 bad_crc; + u64 collisions; + u64 late_collision; + u64 unicast_frames_received; + u64 unicast_frames_sent; + u64 sent_multiple; + u64 sent_deferred; + u64 good_octets_sent; +}; + +struct prestera_port_caps { + u64 supp_link_modes; + u8 supp_fec; + u8 type; + u8 transceiver; +}; + +struct prestera_port { + struct net_device *dev; + struct prestera_switch *sw; + struct devlink_port dl_port; + u32 id; + u32 hw_id; + u32 dev_id; + u16 fp_id; + u16 pvid; + bool autoneg; + u64 adver_link_modes; + u8 adver_fec; + struct prestera_port_caps caps; + struct list_head list; + struct list_head vlans_list; + struct { + struct prestera_port_stats stats; + struct delayed_work caching_dw; + } cached_hw_stats; +}; + +struct prestera_device { + struct device *dev; + u8 __iomem *ctl_regs; + u8 __iomem *pp_regs; + struct prestera_fw_rev fw_rev; + void *priv; + + /* called by device driver to handle received packets */ + void (*recv_pkt)(struct prestera_device *dev); + + /* called by device driver to pass event up to the higher layer */ + int (*recv_msg)(struct prestera_device *dev, void *msg, size_t size); + + /* called by higher layer to send request to the firmware */ + int (*send_req)(struct prestera_device *dev, void *in_msg, + size_t in_size, void *out_msg, size_t out_size, + unsigned int wait); +}; + +enum prestera_event_type { + PRESTERA_EVENT_TYPE_UNSPEC, + + PRESTERA_EVENT_TYPE_PORT, + PRESTERA_EVENT_TYPE_FDB, + PRESTERA_EVENT_TYPE_RXTX, + + PRESTERA_EVENT_TYPE_MAX +}; + +enum prestera_rxtx_event_id { + PRESTERA_RXTX_EVENT_UNSPEC, + PRESTERA_RXTX_EVENT_RCV_PKT, +}; + +enum prestera_port_event_id { + PRESTERA_PORT_EVENT_UNSPEC, + PRESTERA_PORT_EVENT_STATE_CHANGED, +}; + +struct prestera_port_event { + u32 port_id; + union { + u32 oper_state; + } data; +}; + +enum prestera_fdb_event_id { + PRESTERA_FDB_EVENT_UNSPEC, + PRESTERA_FDB_EVENT_LEARNED, + PRESTERA_FDB_EVENT_AGED, +}; + +struct prestera_fdb_event { + u32 port_id; + u32 vid; + union { + u8 mac[ETH_ALEN]; + } data; +}; + +struct prestera_event { + u16 id; + union { + struct prestera_port_event port_evt; + struct prestera_fdb_event fdb_evt; + }; +}; + +struct prestera_switchdev; +struct prestera_rxtx; + +struct prestera_switch { + struct prestera_device *dev; + struct prestera_switchdev *swdev; + struct prestera_rxtx *rxtx; + struct list_head event_handlers; + struct notifier_block netdev_nb; + char base_mac[ETH_ALEN]; + struct list_head port_list; + rwlock_t port_list_lock; + u32 port_count; + u32 mtu_min; + u32 mtu_max; + u8 id; +}; + +struct prestera_rxtx_params { + bool use_sdma; + u32 map_addr; +}; + +#define prestera_dev(sw) ((sw)->dev->dev) + +static inline void prestera_write(const struct prestera_switch *sw, + unsigned int reg, u32 val) +{ + writel(val, sw->dev->pp_regs + reg); +} + +static inline u32 prestera_read(const struct prestera_switch *sw, + unsigned int reg) +{ + return readl(sw->dev->pp_regs + reg); +} + +int prestera_device_register(struct prestera_device *dev); +void prestera_device_unregister(struct prestera_device *dev); + +struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw, + u32 dev_id, u32 hw_id); + +int prestera_port_autoneg_set(struct prestera_port *port, bool enable, + u64 adver_link_modes, u8 adver_fec); + +struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id); + +struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev); + +int prestera_port_pvid_set(struct prestera_port *port, u16 vid); + +bool prestera_netdev_check(const struct net_device *dev); + +#endif /* _PRESTERA_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c new file mode 100644 index 000000000..94c185a0e --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */ + +#include <net/devlink.h> + +#include "prestera_devlink.h" + +static int prestera_dl_info_get(struct devlink *dl, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct prestera_switch *sw = devlink_priv(dl); + char buf[16]; + int err; + + err = devlink_info_driver_name_put(req, PRESTERA_DRV_NAME); + if (err) + return err; + + snprintf(buf, sizeof(buf), "%d.%d.%d", + sw->dev->fw_rev.maj, + sw->dev->fw_rev.min, + sw->dev->fw_rev.sub); + + return devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW, + buf); +} + +static const struct devlink_ops prestera_dl_ops = { + .info_get = prestera_dl_info_get, +}; + +struct prestera_switch *prestera_devlink_alloc(void) +{ + struct devlink *dl; + + dl = devlink_alloc(&prestera_dl_ops, sizeof(struct prestera_switch)); + + return devlink_priv(dl); +} + +void prestera_devlink_free(struct prestera_switch *sw) +{ + struct devlink *dl = priv_to_devlink(sw); + + devlink_free(dl); +} + +int prestera_devlink_register(struct prestera_switch *sw) +{ + struct devlink *dl = priv_to_devlink(sw); + int err; + + err = devlink_register(dl, sw->dev->dev); + if (err) + dev_err(prestera_dev(sw), "devlink_register failed: %d\n", err); + + return err; +} + +void prestera_devlink_unregister(struct prestera_switch *sw) +{ + struct devlink *dl = priv_to_devlink(sw); + + devlink_unregister(dl); +} + +int prestera_devlink_port_register(struct prestera_port *port) +{ + struct prestera_switch *sw = port->sw; + struct devlink *dl = priv_to_devlink(sw); + struct devlink_port_attrs attrs = {}; + int err; + + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.phys.port_number = port->fp_id; + attrs.switch_id.id_len = sizeof(sw->id); + memcpy(attrs.switch_id.id, &sw->id, attrs.switch_id.id_len); + + devlink_port_attrs_set(&port->dl_port, &attrs); + + err = devlink_port_register(dl, &port->dl_port, port->fp_id); + if (err) { + dev_err(prestera_dev(sw), "devlink_port_register failed: %d\n", err); + return err; + } + + return 0; +} + +void prestera_devlink_port_unregister(struct prestera_port *port) +{ + devlink_port_unregister(&port->dl_port); +} + +void prestera_devlink_port_set(struct prestera_port *port) +{ + devlink_port_type_eth_set(&port->dl_port, port->dev); +} + +void prestera_devlink_port_clear(struct prestera_port *port) +{ + devlink_port_type_clear(&port->dl_port); +} + +struct devlink_port *prestera_devlink_get_port(struct net_device *dev) +{ + struct prestera_port *port = netdev_priv(dev); + + return &port->dl_port; +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h new file mode 100644 index 000000000..51bee9f75 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */ + +#ifndef _PRESTERA_DEVLINK_H_ +#define _PRESTERA_DEVLINK_H_ + +#include "prestera.h" + +struct prestera_switch *prestera_devlink_alloc(void); +void prestera_devlink_free(struct prestera_switch *sw); + +int prestera_devlink_register(struct prestera_switch *sw); +void prestera_devlink_unregister(struct prestera_switch *sw); + +int prestera_devlink_port_register(struct prestera_port *port); +void prestera_devlink_port_unregister(struct prestera_port *port); + +void prestera_devlink_port_set(struct prestera_port *port); +void prestera_devlink_port_clear(struct prestera_port *port); + +struct devlink_port *prestera_devlink_get_port(struct net_device *dev); + +#endif /* _PRESTERA_DEVLINK_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_dsa.c b/drivers/net/ethernet/marvell/prestera/prestera_dsa.c new file mode 100644 index 000000000..a5e01c7a3 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_dsa.c @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */ + +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/errno.h> +#include <linux/string.h> + +#include "prestera_dsa.h" + +#define PRESTERA_DSA_W0_CMD GENMASK(31, 30) +#define PRESTERA_DSA_W0_IS_TAGGED BIT(29) +#define PRESTERA_DSA_W0_DEV_NUM GENMASK(28, 24) +#define PRESTERA_DSA_W0_PORT_NUM GENMASK(23, 19) +#define PRESTERA_DSA_W0_VPT GENMASK(15, 13) +#define PRESTERA_DSA_W0_EXT_BIT BIT(12) +#define PRESTERA_DSA_W0_VID GENMASK(11, 0) + +#define PRESTERA_DSA_W1_EXT_BIT BIT(31) +#define PRESTERA_DSA_W1_CFI_BIT BIT(30) +#define PRESTERA_DSA_W1_PORT_NUM GENMASK(11, 10) + +#define PRESTERA_DSA_W2_EXT_BIT BIT(31) +#define PRESTERA_DSA_W2_PORT_NUM BIT(20) + +#define PRESTERA_DSA_W3_VID GENMASK(30, 27) +#define PRESTERA_DSA_W3_DST_EPORT GENMASK(23, 7) +#define PRESTERA_DSA_W3_DEV_NUM GENMASK(6, 0) + +#define PRESTERA_DSA_VID GENMASK(15, 12) +#define PRESTERA_DSA_DEV_NUM GENMASK(11, 5) + +int prestera_dsa_parse(struct prestera_dsa *dsa, const u8 *dsa_buf) +{ + __be32 *dsa_words = (__be32 *)dsa_buf; + enum prestera_dsa_cmd cmd; + u32 words[4]; + u32 field; + + words[0] = ntohl(dsa_words[0]); + words[1] = ntohl(dsa_words[1]); + words[2] = ntohl(dsa_words[2]); + words[3] = ntohl(dsa_words[3]); + + /* set the common parameters */ + cmd = (enum prestera_dsa_cmd)FIELD_GET(PRESTERA_DSA_W0_CMD, words[0]); + + /* only to CPU is supported */ + if (unlikely(cmd != PRESTERA_DSA_CMD_TO_CPU)) + return -EINVAL; + + if (FIELD_GET(PRESTERA_DSA_W0_EXT_BIT, words[0]) == 0) + return -EINVAL; + if (FIELD_GET(PRESTERA_DSA_W1_EXT_BIT, words[1]) == 0) + return -EINVAL; + if (FIELD_GET(PRESTERA_DSA_W2_EXT_BIT, words[2]) == 0) + return -EINVAL; + + field = FIELD_GET(PRESTERA_DSA_W3_VID, words[3]); + + dsa->vlan.is_tagged = FIELD_GET(PRESTERA_DSA_W0_IS_TAGGED, words[0]); + dsa->vlan.cfi_bit = FIELD_GET(PRESTERA_DSA_W1_CFI_BIT, words[1]); + dsa->vlan.vpt = FIELD_GET(PRESTERA_DSA_W0_VPT, words[0]); + dsa->vlan.vid = FIELD_GET(PRESTERA_DSA_W0_VID, words[0]); + dsa->vlan.vid &= ~PRESTERA_DSA_VID; + dsa->vlan.vid |= FIELD_PREP(PRESTERA_DSA_VID, field); + + field = FIELD_GET(PRESTERA_DSA_W3_DEV_NUM, words[3]); + + dsa->hw_dev_num = FIELD_GET(PRESTERA_DSA_W0_DEV_NUM, words[0]); + dsa->hw_dev_num |= FIELD_PREP(PRESTERA_DSA_DEV_NUM, field); + + dsa->port_num = (FIELD_GET(PRESTERA_DSA_W0_PORT_NUM, words[0]) << 0) | + (FIELD_GET(PRESTERA_DSA_W1_PORT_NUM, words[1]) << 5) | + (FIELD_GET(PRESTERA_DSA_W2_PORT_NUM, words[2]) << 7); + + return 0; +} + +int prestera_dsa_build(const struct prestera_dsa *dsa, u8 *dsa_buf) +{ + __be32 *dsa_words = (__be32 *)dsa_buf; + u32 dev_num = dsa->hw_dev_num; + u32 words[4] = { 0 }; + + words[0] |= FIELD_PREP(PRESTERA_DSA_W0_CMD, PRESTERA_DSA_CMD_FROM_CPU); + + words[0] |= FIELD_PREP(PRESTERA_DSA_W0_DEV_NUM, dev_num); + dev_num = FIELD_GET(PRESTERA_DSA_DEV_NUM, dev_num); + words[3] |= FIELD_PREP(PRESTERA_DSA_W3_DEV_NUM, dev_num); + + words[3] |= FIELD_PREP(PRESTERA_DSA_W3_DST_EPORT, dsa->port_num); + + words[0] |= FIELD_PREP(PRESTERA_DSA_W0_EXT_BIT, 1); + words[1] |= FIELD_PREP(PRESTERA_DSA_W1_EXT_BIT, 1); + words[2] |= FIELD_PREP(PRESTERA_DSA_W2_EXT_BIT, 1); + + dsa_words[0] = htonl(words[0]); + dsa_words[1] = htonl(words[1]); + dsa_words[2] = htonl(words[2]); + dsa_words[3] = htonl(words[3]); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_dsa.h b/drivers/net/ethernet/marvell/prestera/prestera_dsa.h new file mode 100644 index 000000000..67018629b --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_dsa.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */ + +#ifndef __PRESTERA_DSA_H_ +#define __PRESTERA_DSA_H_ + +#include <linux/types.h> + +#define PRESTERA_DSA_HLEN 16 + +enum prestera_dsa_cmd { + /* DSA command is "To CPU" */ + PRESTERA_DSA_CMD_TO_CPU = 0, + + /* DSA command is "From CPU" */ + PRESTERA_DSA_CMD_FROM_CPU, +}; + +struct prestera_dsa_vlan { + u16 vid; + u8 vpt; + u8 cfi_bit; + bool is_tagged; +}; + +struct prestera_dsa { + struct prestera_dsa_vlan vlan; + u32 hw_dev_num; + u32 port_num; +}; + +int prestera_dsa_parse(struct prestera_dsa *dsa, const u8 *dsa_buf); +int prestera_dsa_build(const struct prestera_dsa *dsa, u8 *dsa_buf); + +#endif /* _PRESTERA_DSA_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c new file mode 100644 index 000000000..93a5e2baf --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c @@ -0,0 +1,780 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */ + +#include <linux/ethtool.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> + +#include "prestera_ethtool.h" +#include "prestera.h" +#include "prestera_hw.h" + +#define PRESTERA_STATS_CNT \ + (sizeof(struct prestera_port_stats) / sizeof(u64)) +#define PRESTERA_STATS_IDX(name) \ + (offsetof(struct prestera_port_stats, name) / sizeof(u64)) +#define PRESTERA_STATS_FIELD(name) \ + [PRESTERA_STATS_IDX(name)] = __stringify(name) + +static const char driver_kind[] = "prestera"; + +static const struct prestera_link_mode { + enum ethtool_link_mode_bit_indices eth_mode; + u32 speed; + u64 pr_mask; + u8 duplex; + u8 port_type; +} port_link_modes[PRESTERA_LINK_MODE_MAX] = { + [PRESTERA_LINK_MODE_10baseT_Half] = { + .eth_mode = ETHTOOL_LINK_MODE_10baseT_Half_BIT, + .speed = 10, + .pr_mask = 1 << PRESTERA_LINK_MODE_10baseT_Half, + .duplex = PRESTERA_PORT_DUPLEX_HALF, + .port_type = PRESTERA_PORT_TYPE_TP, + }, + [PRESTERA_LINK_MODE_10baseT_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_10baseT_Full_BIT, + .speed = 10, + .pr_mask = 1 << PRESTERA_LINK_MODE_10baseT_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_TP, + }, + [PRESTERA_LINK_MODE_100baseT_Half] = { + .eth_mode = ETHTOOL_LINK_MODE_100baseT_Half_BIT, + .speed = 100, + .pr_mask = 1 << PRESTERA_LINK_MODE_100baseT_Half, + .duplex = PRESTERA_PORT_DUPLEX_HALF, + .port_type = PRESTERA_PORT_TYPE_TP, + }, + [PRESTERA_LINK_MODE_100baseT_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_100baseT_Full_BIT, + .speed = 100, + .pr_mask = 1 << PRESTERA_LINK_MODE_100baseT_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_TP, + }, + [PRESTERA_LINK_MODE_1000baseT_Half] = { + .eth_mode = ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + .speed = 1000, + .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseT_Half, + .duplex = PRESTERA_PORT_DUPLEX_HALF, + .port_type = PRESTERA_PORT_TYPE_TP, + }, + [PRESTERA_LINK_MODE_1000baseT_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + .speed = 1000, + .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseT_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_TP, + }, + [PRESTERA_LINK_MODE_1000baseX_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + .speed = 1000, + .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseX_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_FIBRE, + }, + [PRESTERA_LINK_MODE_1000baseKX_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + .speed = 1000, + .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseKX_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_TP, + }, + [PRESTERA_LINK_MODE_2500baseX_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_2500baseX_Full_BIT, + .speed = 2500, + .pr_mask = 1 << PRESTERA_LINK_MODE_2500baseX_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + }, + [PRESTERA_LINK_MODE_10GbaseKR_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + .speed = 10000, + .pr_mask = 1 << PRESTERA_LINK_MODE_10GbaseKR_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_TP, + }, + [PRESTERA_LINK_MODE_10GbaseSR_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + .speed = 10000, + .pr_mask = 1 << PRESTERA_LINK_MODE_10GbaseSR_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_FIBRE, + }, + [PRESTERA_LINK_MODE_10GbaseLR_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + .speed = 10000, + .pr_mask = 1 << PRESTERA_LINK_MODE_10GbaseLR_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_FIBRE, + }, + [PRESTERA_LINK_MODE_20GbaseKR2_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, + .speed = 20000, + .pr_mask = 1 << PRESTERA_LINK_MODE_20GbaseKR2_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_TP, + }, + [PRESTERA_LINK_MODE_25GbaseCR_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + .speed = 25000, + .pr_mask = 1 << PRESTERA_LINK_MODE_25GbaseCR_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_DA, + }, + [PRESTERA_LINK_MODE_25GbaseKR_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + .speed = 25000, + .pr_mask = 1 << PRESTERA_LINK_MODE_25GbaseKR_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_TP, + }, + [PRESTERA_LINK_MODE_25GbaseSR_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, + .speed = 25000, + .pr_mask = 1 << PRESTERA_LINK_MODE_25GbaseSR_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_FIBRE, + }, + [PRESTERA_LINK_MODE_40GbaseKR4_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + .speed = 40000, + .pr_mask = 1 << PRESTERA_LINK_MODE_40GbaseKR4_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_TP, + }, + [PRESTERA_LINK_MODE_40GbaseCR4_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + .speed = 40000, + .pr_mask = 1 << PRESTERA_LINK_MODE_40GbaseCR4_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_DA, + }, + [PRESTERA_LINK_MODE_40GbaseSR4_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, + .speed = 40000, + .pr_mask = 1 << PRESTERA_LINK_MODE_40GbaseSR4_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_FIBRE, + }, + [PRESTERA_LINK_MODE_50GbaseCR2_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + .speed = 50000, + .pr_mask = 1 << PRESTERA_LINK_MODE_50GbaseCR2_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_DA, + }, + [PRESTERA_LINK_MODE_50GbaseKR2_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + .speed = 50000, + .pr_mask = 1 << PRESTERA_LINK_MODE_50GbaseKR2_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_TP, + }, + [PRESTERA_LINK_MODE_50GbaseSR2_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, + .speed = 50000, + .pr_mask = 1 << PRESTERA_LINK_MODE_50GbaseSR2_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_FIBRE, + }, + [PRESTERA_LINK_MODE_100GbaseKR4_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + .speed = 100000, + .pr_mask = 1 << PRESTERA_LINK_MODE_100GbaseKR4_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_TP, + }, + [PRESTERA_LINK_MODE_100GbaseSR4_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + .speed = 100000, + .pr_mask = 1 << PRESTERA_LINK_MODE_100GbaseSR4_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_FIBRE, + }, + [PRESTERA_LINK_MODE_100GbaseCR4_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + .speed = 100000, + .pr_mask = 1 << PRESTERA_LINK_MODE_100GbaseCR4_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_DA, + } +}; + +static const struct prestera_fec { + u32 eth_fec; + enum ethtool_link_mode_bit_indices eth_mode; + u8 pr_fec; +} port_fec_caps[PRESTERA_PORT_FEC_MAX] = { + [PRESTERA_PORT_FEC_OFF] = { + .eth_fec = ETHTOOL_FEC_OFF, + .eth_mode = ETHTOOL_LINK_MODE_FEC_NONE_BIT, + .pr_fec = 1 << PRESTERA_PORT_FEC_OFF, + }, + [PRESTERA_PORT_FEC_BASER] = { + .eth_fec = ETHTOOL_FEC_BASER, + .eth_mode = ETHTOOL_LINK_MODE_FEC_BASER_BIT, + .pr_fec = 1 << PRESTERA_PORT_FEC_BASER, + }, + [PRESTERA_PORT_FEC_RS] = { + .eth_fec = ETHTOOL_FEC_RS, + .eth_mode = ETHTOOL_LINK_MODE_FEC_RS_BIT, + .pr_fec = 1 << PRESTERA_PORT_FEC_RS, + } +}; + +static const struct prestera_port_type { + enum ethtool_link_mode_bit_indices eth_mode; + u8 eth_type; +} port_types[PRESTERA_PORT_TYPE_MAX] = { + [PRESTERA_PORT_TYPE_NONE] = { + .eth_mode = __ETHTOOL_LINK_MODE_MASK_NBITS, + .eth_type = PORT_NONE, + }, + [PRESTERA_PORT_TYPE_TP] = { + .eth_mode = ETHTOOL_LINK_MODE_TP_BIT, + .eth_type = PORT_TP, + }, + [PRESTERA_PORT_TYPE_AUI] = { + .eth_mode = ETHTOOL_LINK_MODE_AUI_BIT, + .eth_type = PORT_AUI, + }, + [PRESTERA_PORT_TYPE_MII] = { + .eth_mode = ETHTOOL_LINK_MODE_MII_BIT, + .eth_type = PORT_MII, + }, + [PRESTERA_PORT_TYPE_FIBRE] = { + .eth_mode = ETHTOOL_LINK_MODE_FIBRE_BIT, + .eth_type = PORT_FIBRE, + }, + [PRESTERA_PORT_TYPE_BNC] = { + .eth_mode = ETHTOOL_LINK_MODE_BNC_BIT, + .eth_type = PORT_BNC, + }, + [PRESTERA_PORT_TYPE_DA] = { + .eth_mode = ETHTOOL_LINK_MODE_TP_BIT, + .eth_type = PORT_TP, + }, + [PRESTERA_PORT_TYPE_OTHER] = { + .eth_mode = __ETHTOOL_LINK_MODE_MASK_NBITS, + .eth_type = PORT_OTHER, + } +}; + +static const char prestera_cnt_name[PRESTERA_STATS_CNT][ETH_GSTRING_LEN] = { + PRESTERA_STATS_FIELD(good_octets_received), + PRESTERA_STATS_FIELD(bad_octets_received), + PRESTERA_STATS_FIELD(mac_trans_error), + PRESTERA_STATS_FIELD(broadcast_frames_received), + PRESTERA_STATS_FIELD(multicast_frames_received), + PRESTERA_STATS_FIELD(frames_64_octets), + PRESTERA_STATS_FIELD(frames_65_to_127_octets), + PRESTERA_STATS_FIELD(frames_128_to_255_octets), + PRESTERA_STATS_FIELD(frames_256_to_511_octets), + PRESTERA_STATS_FIELD(frames_512_to_1023_octets), + PRESTERA_STATS_FIELD(frames_1024_to_max_octets), + PRESTERA_STATS_FIELD(excessive_collision), + PRESTERA_STATS_FIELD(multicast_frames_sent), + PRESTERA_STATS_FIELD(broadcast_frames_sent), + PRESTERA_STATS_FIELD(fc_sent), + PRESTERA_STATS_FIELD(fc_received), + PRESTERA_STATS_FIELD(buffer_overrun), + PRESTERA_STATS_FIELD(undersize), + PRESTERA_STATS_FIELD(fragments), + PRESTERA_STATS_FIELD(oversize), + PRESTERA_STATS_FIELD(jabber), + PRESTERA_STATS_FIELD(rx_error_frame_received), + PRESTERA_STATS_FIELD(bad_crc), + PRESTERA_STATS_FIELD(collisions), + PRESTERA_STATS_FIELD(late_collision), + PRESTERA_STATS_FIELD(unicast_frames_received), + PRESTERA_STATS_FIELD(unicast_frames_sent), + PRESTERA_STATS_FIELD(sent_multiple), + PRESTERA_STATS_FIELD(sent_deferred), + PRESTERA_STATS_FIELD(good_octets_sent), +}; + +static void prestera_ethtool_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + struct prestera_port *port = netdev_priv(dev); + struct prestera_switch *sw = port->sw; + + strlcpy(drvinfo->driver, driver_kind, sizeof(drvinfo->driver)); + strlcpy(drvinfo->bus_info, dev_name(prestera_dev(sw)), + sizeof(drvinfo->bus_info)); + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%d", + sw->dev->fw_rev.maj, + sw->dev->fw_rev.min, + sw->dev->fw_rev.sub); +} + +static u8 prestera_port_type_get(struct prestera_port *port) +{ + if (port->caps.type < PRESTERA_PORT_TYPE_MAX) + return port_types[port->caps.type].eth_type; + + return PORT_OTHER; +} + +static int prestera_port_type_set(const struct ethtool_link_ksettings *ecmd, + struct prestera_port *port) +{ + u32 new_mode = PRESTERA_LINK_MODE_MAX; + u32 type, mode; + int err; + + for (type = 0; type < PRESTERA_PORT_TYPE_MAX; type++) { + if (port_types[type].eth_type == ecmd->base.port && + test_bit(port_types[type].eth_mode, + ecmd->link_modes.supported)) { + break; + } + } + + if (type == port->caps.type) + return 0; + if (type != port->caps.type && ecmd->base.autoneg == AUTONEG_ENABLE) + return -EINVAL; + if (type == PRESTERA_PORT_TYPE_MAX) + return -EOPNOTSUPP; + + for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) { + if ((port_link_modes[mode].pr_mask & + port->caps.supp_link_modes) && + type == port_link_modes[mode].port_type) { + new_mode = mode; + } + } + + if (new_mode < PRESTERA_LINK_MODE_MAX) + err = prestera_hw_port_link_mode_set(port, new_mode); + else + err = -EINVAL; + + if (err) + return err; + + port->caps.type = type; + port->autoneg = false; + + return 0; +} + +static void prestera_modes_to_eth(unsigned long *eth_modes, u64 link_modes, + u8 fec, u8 type) +{ + u32 mode; + + for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) { + if ((port_link_modes[mode].pr_mask & link_modes) == 0) + continue; + + if (type != PRESTERA_PORT_TYPE_NONE && + port_link_modes[mode].port_type != type) + continue; + + __set_bit(port_link_modes[mode].eth_mode, eth_modes); + } + + for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) { + if ((port_fec_caps[mode].pr_fec & fec) == 0) + continue; + + __set_bit(port_fec_caps[mode].eth_mode, eth_modes); + } +} + +static void prestera_modes_from_eth(const unsigned long *eth_modes, + u64 *link_modes, u8 *fec, u8 type) +{ + u64 adver_modes = 0; + u32 fec_modes = 0; + u32 mode; + + for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) { + if (!test_bit(port_link_modes[mode].eth_mode, eth_modes)) + continue; + + if (port_link_modes[mode].port_type != type) + continue; + + adver_modes |= port_link_modes[mode].pr_mask; + } + + for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) { + if (!test_bit(port_fec_caps[mode].eth_mode, eth_modes)) + continue; + + fec_modes |= port_fec_caps[mode].pr_fec; + } + + *link_modes = adver_modes; + *fec = fec_modes; +} + +static void prestera_port_supp_types_get(struct ethtool_link_ksettings *ecmd, + struct prestera_port *port) +{ + u32 mode; + u8 ptype; + + for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) { + if ((port_link_modes[mode].pr_mask & + port->caps.supp_link_modes) == 0) + continue; + + ptype = port_link_modes[mode].port_type; + __set_bit(port_types[ptype].eth_mode, + ecmd->link_modes.supported); + } +} + +static void prestera_port_remote_cap_get(struct ethtool_link_ksettings *ecmd, + struct prestera_port *port) +{ + bool asym_pause; + bool pause; + u64 bitmap; + int err; + + err = prestera_hw_port_remote_cap_get(port, &bitmap); + if (!err) { + prestera_modes_to_eth(ecmd->link_modes.lp_advertising, + bitmap, 0, PRESTERA_PORT_TYPE_NONE); + + if (!bitmap_empty(ecmd->link_modes.lp_advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS)) { + ethtool_link_ksettings_add_link_mode(ecmd, + lp_advertising, + Autoneg); + } + } + + err = prestera_hw_port_remote_fc_get(port, &pause, &asym_pause); + if (err) + return; + + if (pause) + ethtool_link_ksettings_add_link_mode(ecmd, + lp_advertising, + Pause); + if (asym_pause) + ethtool_link_ksettings_add_link_mode(ecmd, + lp_advertising, + Asym_Pause); +} + +static void prestera_port_speed_get(struct ethtool_link_ksettings *ecmd, + struct prestera_port *port) +{ + u32 speed; + int err; + + err = prestera_hw_port_speed_get(port, &speed); + ecmd->base.speed = err ? SPEED_UNKNOWN : speed; +} + +static void prestera_port_duplex_get(struct ethtool_link_ksettings *ecmd, + struct prestera_port *port) +{ + u8 duplex; + int err; + + err = prestera_hw_port_duplex_get(port, &duplex); + if (err) { + ecmd->base.duplex = DUPLEX_UNKNOWN; + return; + } + + ecmd->base.duplex = duplex == PRESTERA_PORT_DUPLEX_FULL ? + DUPLEX_FULL : DUPLEX_HALF; +} + +static int +prestera_ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *ecmd) +{ + struct prestera_port *port = netdev_priv(dev); + + ethtool_link_ksettings_zero_link_mode(ecmd, supported); + ethtool_link_ksettings_zero_link_mode(ecmd, advertising); + ethtool_link_ksettings_zero_link_mode(ecmd, lp_advertising); + + ecmd->base.autoneg = port->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + if (port->caps.type == PRESTERA_PORT_TYPE_TP) { + ethtool_link_ksettings_add_link_mode(ecmd, supported, Autoneg); + + if (netif_running(dev) && + (port->autoneg || + port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER)) + ethtool_link_ksettings_add_link_mode(ecmd, advertising, + Autoneg); + } + + prestera_modes_to_eth(ecmd->link_modes.supported, + port->caps.supp_link_modes, + port->caps.supp_fec, + port->caps.type); + + prestera_port_supp_types_get(ecmd, port); + + if (netif_carrier_ok(dev)) { + prestera_port_speed_get(ecmd, port); + prestera_port_duplex_get(ecmd, port); + } else { + ecmd->base.speed = SPEED_UNKNOWN; + ecmd->base.duplex = DUPLEX_UNKNOWN; + } + + ecmd->base.port = prestera_port_type_get(port); + + if (port->autoneg) { + if (netif_running(dev)) + prestera_modes_to_eth(ecmd->link_modes.advertising, + port->adver_link_modes, + port->adver_fec, + port->caps.type); + + if (netif_carrier_ok(dev) && + port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER) + prestera_port_remote_cap_get(ecmd, port); + } + + if (port->caps.type == PRESTERA_PORT_TYPE_TP && + port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER) + prestera_hw_port_mdix_get(port, &ecmd->base.eth_tp_mdix, + &ecmd->base.eth_tp_mdix_ctrl); + + return 0; +} + +static int prestera_port_mdix_set(const struct ethtool_link_ksettings *ecmd, + struct prestera_port *port) +{ + if (ecmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_INVALID && + port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER && + port->caps.type == PRESTERA_PORT_TYPE_TP) + return prestera_hw_port_mdix_set(port, + ecmd->base.eth_tp_mdix_ctrl); + + return 0; +} + +static int prestera_port_link_mode_set(struct prestera_port *port, + u32 speed, u8 duplex, u8 type) +{ + u32 new_mode = PRESTERA_LINK_MODE_MAX; + u32 mode; + + for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) { + if (speed != port_link_modes[mode].speed) + continue; + + if (duplex != port_link_modes[mode].duplex) + continue; + + if (!(port_link_modes[mode].pr_mask & + port->caps.supp_link_modes)) + continue; + + if (type != port_link_modes[mode].port_type) + continue; + + new_mode = mode; + break; + } + + if (new_mode == PRESTERA_LINK_MODE_MAX) + return -EOPNOTSUPP; + + return prestera_hw_port_link_mode_set(port, new_mode); +} + +static int +prestera_port_speed_duplex_set(const struct ethtool_link_ksettings *ecmd, + struct prestera_port *port) +{ + u32 curr_mode; + u8 duplex; + u32 speed; + int err; + + err = prestera_hw_port_link_mode_get(port, &curr_mode); + if (err) + return err; + if (curr_mode >= PRESTERA_LINK_MODE_MAX) + return -EINVAL; + + if (ecmd->base.duplex != DUPLEX_UNKNOWN) + duplex = ecmd->base.duplex == DUPLEX_FULL ? + PRESTERA_PORT_DUPLEX_FULL : PRESTERA_PORT_DUPLEX_HALF; + else + duplex = port_link_modes[curr_mode].duplex; + + if (ecmd->base.speed != SPEED_UNKNOWN) + speed = ecmd->base.speed; + else + speed = port_link_modes[curr_mode].speed; + + return prestera_port_link_mode_set(port, speed, duplex, + port->caps.type); +} + +static int +prestera_ethtool_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *ecmd) +{ + struct prestera_port *port = netdev_priv(dev); + u64 adver_modes; + u8 adver_fec; + int err; + + err = prestera_port_type_set(ecmd, port); + if (err) + return err; + + if (port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER) { + err = prestera_port_mdix_set(ecmd, port); + if (err) + return err; + } + + prestera_modes_from_eth(ecmd->link_modes.advertising, &adver_modes, + &adver_fec, port->caps.type); + + err = prestera_port_autoneg_set(port, + ecmd->base.autoneg == AUTONEG_ENABLE, + adver_modes, adver_fec); + if (err) + return err; + + if (ecmd->base.autoneg == AUTONEG_DISABLE) { + err = prestera_port_speed_duplex_set(ecmd, port); + if (err) + return err; + } + + return 0; +} + +static int prestera_ethtool_get_fecparam(struct net_device *dev, + struct ethtool_fecparam *fecparam) +{ + struct prestera_port *port = netdev_priv(dev); + u8 active; + u32 mode; + int err; + + err = prestera_hw_port_fec_get(port, &active); + if (err) + return err; + + fecparam->fec = 0; + + for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) { + if ((port_fec_caps[mode].pr_fec & port->caps.supp_fec) == 0) + continue; + + fecparam->fec |= port_fec_caps[mode].eth_fec; + } + + if (active < PRESTERA_PORT_FEC_MAX) + fecparam->active_fec = port_fec_caps[active].eth_fec; + else + fecparam->active_fec = ETHTOOL_FEC_AUTO; + + return 0; +} + +static int prestera_ethtool_set_fecparam(struct net_device *dev, + struct ethtool_fecparam *fecparam) +{ + struct prestera_port *port = netdev_priv(dev); + u8 fec, active; + u32 mode; + int err; + + if (port->autoneg) { + netdev_err(dev, "FEC set is not allowed while autoneg is on\n"); + return -EINVAL; + } + + err = prestera_hw_port_fec_get(port, &active); + if (err) + return err; + + fec = PRESTERA_PORT_FEC_MAX; + for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) { + if ((port_fec_caps[mode].eth_fec & fecparam->fec) && + (port_fec_caps[mode].pr_fec & port->caps.supp_fec)) { + fec = mode; + break; + } + } + + if (fec == active) + return 0; + + if (fec == PRESTERA_PORT_FEC_MAX) + return -EOPNOTSUPP; + + return prestera_hw_port_fec_set(port, fec); +} + +static int prestera_ethtool_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return PRESTERA_STATS_CNT; + default: + return -EOPNOTSUPP; + } +} + +static void prestera_ethtool_get_strings(struct net_device *dev, + u32 stringset, u8 *data) +{ + if (stringset != ETH_SS_STATS) + return; + + memcpy(data, prestera_cnt_name, sizeof(prestera_cnt_name)); +} + +static void prestera_ethtool_get_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct prestera_port *port = netdev_priv(dev); + struct prestera_port_stats *port_stats; + + port_stats = &port->cached_hw_stats.stats; + + memcpy(data, port_stats, sizeof(*port_stats)); +} + +static int prestera_ethtool_nway_reset(struct net_device *dev) +{ + struct prestera_port *port = netdev_priv(dev); + + if (netif_running(dev) && + port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER && + port->caps.type == PRESTERA_PORT_TYPE_TP) + return prestera_hw_port_autoneg_restart(port); + + return -EINVAL; +} + +const struct ethtool_ops prestera_ethtool_ops = { + .get_drvinfo = prestera_ethtool_get_drvinfo, + .get_link_ksettings = prestera_ethtool_get_link_ksettings, + .set_link_ksettings = prestera_ethtool_set_link_ksettings, + .get_fecparam = prestera_ethtool_get_fecparam, + .set_fecparam = prestera_ethtool_set_fecparam, + .get_sset_count = prestera_ethtool_get_sset_count, + .get_strings = prestera_ethtool_get_strings, + .get_ethtool_stats = prestera_ethtool_get_stats, + .get_link = ethtool_op_get_link, + .nway_reset = prestera_ethtool_nway_reset +}; diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h new file mode 100644 index 000000000..523ef1f59 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */ + +#ifndef __PRESTERA_ETHTOOL_H_ +#define __PRESTERA_ETHTOOL_H_ + +#include <linux/ethtool.h> + +extern const struct ethtool_ops prestera_ethtool_ops; + +#endif /* _PRESTERA_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c new file mode 100644 index 000000000..0424718d5 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c @@ -0,0 +1,1253 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */ + +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/list.h> + +#include "prestera.h" +#include "prestera_hw.h" + +#define PRESTERA_SWITCH_INIT_TIMEOUT_MS (30 * 1000) + +#define PRESTERA_MIN_MTU 64 + +enum prestera_cmd_type_t { + PRESTERA_CMD_TYPE_SWITCH_INIT = 0x1, + PRESTERA_CMD_TYPE_SWITCH_ATTR_SET = 0x2, + + PRESTERA_CMD_TYPE_PORT_ATTR_SET = 0x100, + PRESTERA_CMD_TYPE_PORT_ATTR_GET = 0x101, + PRESTERA_CMD_TYPE_PORT_INFO_GET = 0x110, + + PRESTERA_CMD_TYPE_VLAN_CREATE = 0x200, + PRESTERA_CMD_TYPE_VLAN_DELETE = 0x201, + PRESTERA_CMD_TYPE_VLAN_PORT_SET = 0x202, + PRESTERA_CMD_TYPE_VLAN_PVID_SET = 0x203, + + PRESTERA_CMD_TYPE_FDB_ADD = 0x300, + PRESTERA_CMD_TYPE_FDB_DELETE = 0x301, + PRESTERA_CMD_TYPE_FDB_FLUSH_PORT = 0x310, + PRESTERA_CMD_TYPE_FDB_FLUSH_VLAN = 0x311, + PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN = 0x312, + + PRESTERA_CMD_TYPE_BRIDGE_CREATE = 0x400, + PRESTERA_CMD_TYPE_BRIDGE_DELETE = 0x401, + PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD = 0x402, + PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE = 0x403, + + PRESTERA_CMD_TYPE_RXTX_INIT = 0x800, + PRESTERA_CMD_TYPE_RXTX_PORT_INIT = 0x801, + + PRESTERA_CMD_TYPE_STP_PORT_SET = 0x1000, + + PRESTERA_CMD_TYPE_ACK = 0x10000, + PRESTERA_CMD_TYPE_MAX +}; + +enum { + PRESTERA_CMD_PORT_ATTR_ADMIN_STATE = 1, + PRESTERA_CMD_PORT_ATTR_MTU = 3, + PRESTERA_CMD_PORT_ATTR_MAC = 4, + PRESTERA_CMD_PORT_ATTR_SPEED = 5, + PRESTERA_CMD_PORT_ATTR_ACCEPT_FRAME_TYPE = 6, + PRESTERA_CMD_PORT_ATTR_LEARNING = 7, + PRESTERA_CMD_PORT_ATTR_FLOOD = 8, + PRESTERA_CMD_PORT_ATTR_CAPABILITY = 9, + PRESTERA_CMD_PORT_ATTR_REMOTE_CAPABILITY = 10, + PRESTERA_CMD_PORT_ATTR_REMOTE_FC = 11, + PRESTERA_CMD_PORT_ATTR_LINK_MODE = 12, + PRESTERA_CMD_PORT_ATTR_TYPE = 13, + PRESTERA_CMD_PORT_ATTR_FEC = 14, + PRESTERA_CMD_PORT_ATTR_AUTONEG = 15, + PRESTERA_CMD_PORT_ATTR_DUPLEX = 16, + PRESTERA_CMD_PORT_ATTR_STATS = 17, + PRESTERA_CMD_PORT_ATTR_MDIX = 18, + PRESTERA_CMD_PORT_ATTR_AUTONEG_RESTART = 19, +}; + +enum { + PRESTERA_CMD_SWITCH_ATTR_MAC = 1, + PRESTERA_CMD_SWITCH_ATTR_AGEING = 2, +}; + +enum { + PRESTERA_CMD_ACK_OK, + PRESTERA_CMD_ACK_FAILED, + + PRESTERA_CMD_ACK_MAX +}; + +enum { + PRESTERA_PORT_TP_NA, + PRESTERA_PORT_TP_MDI, + PRESTERA_PORT_TP_MDIX, + PRESTERA_PORT_TP_AUTO, +}; + +enum { + PRESTERA_PORT_GOOD_OCTETS_RCV_CNT, + PRESTERA_PORT_BAD_OCTETS_RCV_CNT, + PRESTERA_PORT_MAC_TRANSMIT_ERR_CNT, + PRESTERA_PORT_BRDC_PKTS_RCV_CNT, + PRESTERA_PORT_MC_PKTS_RCV_CNT, + PRESTERA_PORT_PKTS_64L_CNT, + PRESTERA_PORT_PKTS_65TO127L_CNT, + PRESTERA_PORT_PKTS_128TO255L_CNT, + PRESTERA_PORT_PKTS_256TO511L_CNT, + PRESTERA_PORT_PKTS_512TO1023L_CNT, + PRESTERA_PORT_PKTS_1024TOMAXL_CNT, + PRESTERA_PORT_EXCESSIVE_COLLISIONS_CNT, + PRESTERA_PORT_MC_PKTS_SENT_CNT, + PRESTERA_PORT_BRDC_PKTS_SENT_CNT, + PRESTERA_PORT_FC_SENT_CNT, + PRESTERA_PORT_GOOD_FC_RCV_CNT, + PRESTERA_PORT_DROP_EVENTS_CNT, + PRESTERA_PORT_UNDERSIZE_PKTS_CNT, + PRESTERA_PORT_FRAGMENTS_PKTS_CNT, + PRESTERA_PORT_OVERSIZE_PKTS_CNT, + PRESTERA_PORT_JABBER_PKTS_CNT, + PRESTERA_PORT_MAC_RCV_ERROR_CNT, + PRESTERA_PORT_BAD_CRC_CNT, + PRESTERA_PORT_COLLISIONS_CNT, + PRESTERA_PORT_LATE_COLLISIONS_CNT, + PRESTERA_PORT_GOOD_UC_PKTS_RCV_CNT, + PRESTERA_PORT_GOOD_UC_PKTS_SENT_CNT, + PRESTERA_PORT_MULTIPLE_PKTS_SENT_CNT, + PRESTERA_PORT_DEFERRED_PKTS_SENT_CNT, + PRESTERA_PORT_GOOD_OCTETS_SENT_CNT, + + PRESTERA_PORT_CNT_MAX +}; + +enum { + PRESTERA_FC_NONE, + PRESTERA_FC_SYMMETRIC, + PRESTERA_FC_ASYMMETRIC, + PRESTERA_FC_SYMM_ASYMM, +}; + +struct prestera_fw_event_handler { + struct list_head list; + struct rcu_head rcu; + enum prestera_event_type type; + prestera_event_cb_t func; + void *arg; +}; + +struct prestera_msg_cmd { + u32 type; +}; + +struct prestera_msg_ret { + struct prestera_msg_cmd cmd; + u32 status; +}; + +struct prestera_msg_common_req { + struct prestera_msg_cmd cmd; +}; + +struct prestera_msg_common_resp { + struct prestera_msg_ret ret; +}; + +union prestera_msg_switch_param { + u8 mac[ETH_ALEN]; + u32 ageing_timeout_ms; +}; + +struct prestera_msg_switch_attr_req { + struct prestera_msg_cmd cmd; + u32 attr; + union prestera_msg_switch_param param; +}; + +struct prestera_msg_switch_init_resp { + struct prestera_msg_ret ret; + u32 port_count; + u32 mtu_max; + u8 switch_id; +}; + +struct prestera_msg_port_autoneg_param { + u64 link_mode; + u8 enable; + u8 fec; +}; + +struct prestera_msg_port_cap_param { + u64 link_mode; + u8 type; + u8 fec; + u8 transceiver; +}; + +struct prestera_msg_port_mdix_param { + u8 status; + u8 admin_mode; +}; + +union prestera_msg_port_param { + u8 admin_state; + u8 oper_state; + u32 mtu; + u8 mac[ETH_ALEN]; + u8 accept_frm_type; + u32 speed; + u8 learning; + u8 flood; + u32 link_mode; + u8 type; + u8 duplex; + u8 fec; + u8 fc; + struct prestera_msg_port_mdix_param mdix; + struct prestera_msg_port_autoneg_param autoneg; + struct prestera_msg_port_cap_param cap; +}; + +struct prestera_msg_port_attr_req { + struct prestera_msg_cmd cmd; + u32 attr; + u32 port; + u32 dev; + union prestera_msg_port_param param; +}; + +struct prestera_msg_port_attr_resp { + struct prestera_msg_ret ret; + union prestera_msg_port_param param; +}; + +struct prestera_msg_port_stats_resp { + struct prestera_msg_ret ret; + u64 stats[PRESTERA_PORT_CNT_MAX]; +}; + +struct prestera_msg_port_info_req { + struct prestera_msg_cmd cmd; + u32 port; +}; + +struct prestera_msg_port_info_resp { + struct prestera_msg_ret ret; + u32 hw_id; + u32 dev_id; + u16 fp_id; +}; + +struct prestera_msg_vlan_req { + struct prestera_msg_cmd cmd; + u32 port; + u32 dev; + u16 vid; + u8 is_member; + u8 is_tagged; +}; + +struct prestera_msg_fdb_req { + struct prestera_msg_cmd cmd; + u8 dest_type; + u32 port; + u32 dev; + u8 mac[ETH_ALEN]; + u16 vid; + u8 dynamic; + u32 flush_mode; +}; + +struct prestera_msg_bridge_req { + struct prestera_msg_cmd cmd; + u32 port; + u32 dev; + u16 bridge; +}; + +struct prestera_msg_bridge_resp { + struct prestera_msg_ret ret; + u16 bridge; +}; + +struct prestera_msg_stp_req { + struct prestera_msg_cmd cmd; + u32 port; + u32 dev; + u16 vid; + u8 state; +}; + +struct prestera_msg_rxtx_req { + struct prestera_msg_cmd cmd; + u8 use_sdma; +}; + +struct prestera_msg_rxtx_resp { + struct prestera_msg_ret ret; + u32 map_addr; +}; + +struct prestera_msg_rxtx_port_req { + struct prestera_msg_cmd cmd; + u32 port; + u32 dev; +}; + +struct prestera_msg_event { + u16 type; + u16 id; +}; + +union prestera_msg_event_port_param { + u32 oper_state; +}; + +struct prestera_msg_event_port { + struct prestera_msg_event id; + u32 port_id; + union prestera_msg_event_port_param param; +}; + +union prestera_msg_event_fdb_param { + u8 mac[ETH_ALEN]; +}; + +struct prestera_msg_event_fdb { + struct prestera_msg_event id; + u8 dest_type; + u32 port_id; + u32 vid; + union prestera_msg_event_fdb_param param; +}; + +static int __prestera_cmd_ret(struct prestera_switch *sw, + enum prestera_cmd_type_t type, + struct prestera_msg_cmd *cmd, size_t clen, + struct prestera_msg_ret *ret, size_t rlen, + int waitms) +{ + struct prestera_device *dev = sw->dev; + int err; + + cmd->type = type; + + err = dev->send_req(dev, cmd, clen, ret, rlen, waitms); + if (err) + return err; + + if (ret->cmd.type != PRESTERA_CMD_TYPE_ACK) + return -EBADE; + if (ret->status != PRESTERA_CMD_ACK_OK) + return -EINVAL; + + return 0; +} + +static int prestera_cmd_ret(struct prestera_switch *sw, + enum prestera_cmd_type_t type, + struct prestera_msg_cmd *cmd, size_t clen, + struct prestera_msg_ret *ret, size_t rlen) +{ + return __prestera_cmd_ret(sw, type, cmd, clen, ret, rlen, 0); +} + +static int prestera_cmd_ret_wait(struct prestera_switch *sw, + enum prestera_cmd_type_t type, + struct prestera_msg_cmd *cmd, size_t clen, + struct prestera_msg_ret *ret, size_t rlen, + int waitms) +{ + return __prestera_cmd_ret(sw, type, cmd, clen, ret, rlen, waitms); +} + +static int prestera_cmd(struct prestera_switch *sw, + enum prestera_cmd_type_t type, + struct prestera_msg_cmd *cmd, size_t clen) +{ + struct prestera_msg_common_resp resp; + + return prestera_cmd_ret(sw, type, cmd, clen, &resp.ret, sizeof(resp)); +} + +static int prestera_fw_parse_port_evt(void *msg, struct prestera_event *evt) +{ + struct prestera_msg_event_port *hw_evt = msg; + + if (evt->id != PRESTERA_PORT_EVENT_STATE_CHANGED) + return -EINVAL; + + evt->port_evt.data.oper_state = hw_evt->param.oper_state; + evt->port_evt.port_id = hw_evt->port_id; + + return 0; +} + +static int prestera_fw_parse_fdb_evt(void *msg, struct prestera_event *evt) +{ + struct prestera_msg_event_fdb *hw_evt = msg; + + evt->fdb_evt.port_id = hw_evt->port_id; + evt->fdb_evt.vid = hw_evt->vid; + + ether_addr_copy(evt->fdb_evt.data.mac, hw_evt->param.mac); + + return 0; +} + +static struct prestera_fw_evt_parser { + int (*func)(void *msg, struct prestera_event *evt); +} fw_event_parsers[PRESTERA_EVENT_TYPE_MAX] = { + [PRESTERA_EVENT_TYPE_PORT] = { .func = prestera_fw_parse_port_evt }, + [PRESTERA_EVENT_TYPE_FDB] = { .func = prestera_fw_parse_fdb_evt }, +}; + +static struct prestera_fw_event_handler * +__find_event_handler(const struct prestera_switch *sw, + enum prestera_event_type type) +{ + struct prestera_fw_event_handler *eh; + + list_for_each_entry_rcu(eh, &sw->event_handlers, list) { + if (eh->type == type) + return eh; + } + + return NULL; +} + +static int prestera_find_event_handler(const struct prestera_switch *sw, + enum prestera_event_type type, + struct prestera_fw_event_handler *eh) +{ + struct prestera_fw_event_handler *tmp; + int err = 0; + + rcu_read_lock(); + tmp = __find_event_handler(sw, type); + if (tmp) + *eh = *tmp; + else + err = -ENOENT; + rcu_read_unlock(); + + return err; +} + +static int prestera_evt_recv(struct prestera_device *dev, void *buf, size_t size) +{ + struct prestera_switch *sw = dev->priv; + struct prestera_msg_event *msg = buf; + struct prestera_fw_event_handler eh; + struct prestera_event evt; + int err; + + if (msg->type >= PRESTERA_EVENT_TYPE_MAX) + return -EINVAL; + if (!fw_event_parsers[msg->type].func) + return -ENOENT; + + err = prestera_find_event_handler(sw, msg->type, &eh); + if (err) + return err; + + evt.id = msg->id; + + err = fw_event_parsers[msg->type].func(buf, &evt); + if (err) + return err; + + eh.func(sw, &evt, eh.arg); + + return 0; +} + +static void prestera_pkt_recv(struct prestera_device *dev) +{ + struct prestera_switch *sw = dev->priv; + struct prestera_fw_event_handler eh; + struct prestera_event ev; + int err; + + ev.id = PRESTERA_RXTX_EVENT_RCV_PKT; + + err = prestera_find_event_handler(sw, PRESTERA_EVENT_TYPE_RXTX, &eh); + if (err) + return; + + eh.func(sw, &ev, eh.arg); +} + +int prestera_hw_port_info_get(const struct prestera_port *port, + u32 *dev_id, u32 *hw_id, u16 *fp_id) +{ + struct prestera_msg_port_info_req req = { + .port = port->id, + }; + struct prestera_msg_port_info_resp resp; + int err; + + err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_INFO_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *dev_id = resp.dev_id; + *hw_id = resp.hw_id; + *fp_id = resp.fp_id; + + return 0; +} + +int prestera_hw_switch_mac_set(struct prestera_switch *sw, const char *mac) +{ + struct prestera_msg_switch_attr_req req = { + .attr = PRESTERA_CMD_SWITCH_ATTR_MAC, + }; + + ether_addr_copy(req.param.mac, mac); + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_SWITCH_ATTR_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_switch_init(struct prestera_switch *sw) +{ + struct prestera_msg_switch_init_resp resp; + struct prestera_msg_common_req req; + int err; + + INIT_LIST_HEAD(&sw->event_handlers); + + err = prestera_cmd_ret_wait(sw, PRESTERA_CMD_TYPE_SWITCH_INIT, + &req.cmd, sizeof(req), + &resp.ret, sizeof(resp), + PRESTERA_SWITCH_INIT_TIMEOUT_MS); + if (err) + return err; + + sw->dev->recv_msg = prestera_evt_recv; + sw->dev->recv_pkt = prestera_pkt_recv; + sw->port_count = resp.port_count; + sw->mtu_min = PRESTERA_MIN_MTU; + sw->mtu_max = resp.mtu_max; + sw->id = resp.switch_id; + + return 0; +} + +void prestera_hw_switch_fini(struct prestera_switch *sw) +{ + WARN_ON(!list_empty(&sw->event_handlers)); +} + +int prestera_hw_switch_ageing_set(struct prestera_switch *sw, u32 ageing_ms) +{ + struct prestera_msg_switch_attr_req req = { + .attr = PRESTERA_CMD_SWITCH_ATTR_AGEING, + .param = { + .ageing_timeout_ms = ageing_ms, + }, + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_SWITCH_ATTR_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_port_state_set(const struct prestera_port *port, + bool admin_state) +{ + struct prestera_msg_port_attr_req req = { + .attr = PRESTERA_CMD_PORT_ATTR_ADMIN_STATE, + .port = port->hw_id, + .dev = port->dev_id, + .param = { + .admin_state = admin_state, + } + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_port_mtu_set(const struct prestera_port *port, u32 mtu) +{ + struct prestera_msg_port_attr_req req = { + .attr = PRESTERA_CMD_PORT_ATTR_MTU, + .port = port->hw_id, + .dev = port->dev_id, + .param = { + .mtu = mtu, + } + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_port_mac_set(const struct prestera_port *port, const char *mac) +{ + struct prestera_msg_port_attr_req req = { + .attr = PRESTERA_CMD_PORT_ATTR_MAC, + .port = port->hw_id, + .dev = port->dev_id, + }; + + ether_addr_copy(req.param.mac, mac); + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_port_accept_frm_type(struct prestera_port *port, + enum prestera_accept_frm_type type) +{ + struct prestera_msg_port_attr_req req = { + .attr = PRESTERA_CMD_PORT_ATTR_ACCEPT_FRAME_TYPE, + .port = port->hw_id, + .dev = port->dev_id, + .param = { + .accept_frm_type = type, + } + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_port_cap_get(const struct prestera_port *port, + struct prestera_port_caps *caps) +{ + struct prestera_msg_port_attr_req req = { + .attr = PRESTERA_CMD_PORT_ATTR_CAPABILITY, + .port = port->hw_id, + .dev = port->dev_id, + }; + struct prestera_msg_port_attr_resp resp; + int err; + + err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + caps->supp_link_modes = resp.param.cap.link_mode; + caps->transceiver = resp.param.cap.transceiver; + caps->supp_fec = resp.param.cap.fec; + caps->type = resp.param.cap.type; + + return err; +} + +int prestera_hw_port_remote_cap_get(const struct prestera_port *port, + u64 *link_mode_bitmap) +{ + struct prestera_msg_port_attr_req req = { + .attr = PRESTERA_CMD_PORT_ATTR_REMOTE_CAPABILITY, + .port = port->hw_id, + .dev = port->dev_id, + }; + struct prestera_msg_port_attr_resp resp; + int err; + + err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *link_mode_bitmap = resp.param.cap.link_mode; + + return 0; +} + +int prestera_hw_port_remote_fc_get(const struct prestera_port *port, + bool *pause, bool *asym_pause) +{ + struct prestera_msg_port_attr_req req = { + .attr = PRESTERA_CMD_PORT_ATTR_REMOTE_FC, + .port = port->hw_id, + .dev = port->dev_id, + }; + struct prestera_msg_port_attr_resp resp; + int err; + + err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + switch (resp.param.fc) { + case PRESTERA_FC_SYMMETRIC: + *pause = true; + *asym_pause = false; + break; + case PRESTERA_FC_ASYMMETRIC: + *pause = false; + *asym_pause = true; + break; + case PRESTERA_FC_SYMM_ASYMM: + *pause = true; + *asym_pause = true; + break; + default: + *pause = false; + *asym_pause = false; + } + + return 0; +} + +int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type) +{ + struct prestera_msg_port_attr_req req = { + .attr = PRESTERA_CMD_PORT_ATTR_TYPE, + .port = port->hw_id, + .dev = port->dev_id, + }; + struct prestera_msg_port_attr_resp resp; + int err; + + err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *type = resp.param.type; + + return 0; +} + +int prestera_hw_port_fec_get(const struct prestera_port *port, u8 *fec) +{ + struct prestera_msg_port_attr_req req = { + .attr = PRESTERA_CMD_PORT_ATTR_FEC, + .port = port->hw_id, + .dev = port->dev_id, + }; + struct prestera_msg_port_attr_resp resp; + int err; + + err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *fec = resp.param.fec; + + return 0; +} + +int prestera_hw_port_fec_set(const struct prestera_port *port, u8 fec) +{ + struct prestera_msg_port_attr_req req = { + .attr = PRESTERA_CMD_PORT_ATTR_FEC, + .port = port->hw_id, + .dev = port->dev_id, + .param = { + .fec = fec, + } + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, + &req.cmd, sizeof(req)); +} + +static u8 prestera_hw_mdix_to_eth(u8 mode) +{ + switch (mode) { + case PRESTERA_PORT_TP_MDI: + return ETH_TP_MDI; + case PRESTERA_PORT_TP_MDIX: + return ETH_TP_MDI_X; + case PRESTERA_PORT_TP_AUTO: + return ETH_TP_MDI_AUTO; + default: + return ETH_TP_MDI_INVALID; + } +} + +static u8 prestera_hw_mdix_from_eth(u8 mode) +{ + switch (mode) { + case ETH_TP_MDI: + return PRESTERA_PORT_TP_MDI; + case ETH_TP_MDI_X: + return PRESTERA_PORT_TP_MDIX; + case ETH_TP_MDI_AUTO: + return PRESTERA_PORT_TP_AUTO; + default: + return PRESTERA_PORT_TP_NA; + } +} + +int prestera_hw_port_mdix_get(const struct prestera_port *port, u8 *status, + u8 *admin_mode) +{ + struct prestera_msg_port_attr_req req = { + .attr = PRESTERA_CMD_PORT_ATTR_MDIX, + .port = port->hw_id, + .dev = port->dev_id, + }; + struct prestera_msg_port_attr_resp resp; + int err; + + err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *status = prestera_hw_mdix_to_eth(resp.param.mdix.status); + *admin_mode = prestera_hw_mdix_to_eth(resp.param.mdix.admin_mode); + + return 0; +} + +int prestera_hw_port_mdix_set(const struct prestera_port *port, u8 mode) +{ + struct prestera_msg_port_attr_req req = { + .attr = PRESTERA_CMD_PORT_ATTR_MDIX, + .port = port->hw_id, + .dev = port->dev_id, + }; + + req.param.mdix.admin_mode = prestera_hw_mdix_from_eth(mode); + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_port_link_mode_set(const struct prestera_port *port, u32 mode) +{ + struct prestera_msg_port_attr_req req = { + .attr = PRESTERA_CMD_PORT_ATTR_LINK_MODE, + .port = port->hw_id, + .dev = port->dev_id, + .param = { + .link_mode = mode, + } + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_port_link_mode_get(const struct prestera_port *port, u32 *mode) +{ + struct prestera_msg_port_attr_req req = { + .attr = PRESTERA_CMD_PORT_ATTR_LINK_MODE, + .port = port->hw_id, + .dev = port->dev_id, + }; + struct prestera_msg_port_attr_resp resp; + int err; + + err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *mode = resp.param.link_mode; + + return 0; +} + +int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed) +{ + struct prestera_msg_port_attr_req req = { + .attr = PRESTERA_CMD_PORT_ATTR_SPEED, + .port = port->hw_id, + .dev = port->dev_id, + }; + struct prestera_msg_port_attr_resp resp; + int err; + + err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *speed = resp.param.speed; + + return 0; +} + +int prestera_hw_port_autoneg_set(const struct prestera_port *port, + bool autoneg, u64 link_modes, u8 fec) +{ + struct prestera_msg_port_attr_req req = { + .attr = PRESTERA_CMD_PORT_ATTR_AUTONEG, + .port = port->hw_id, + .dev = port->dev_id, + .param = { + .autoneg = { + .link_mode = link_modes, + .enable = autoneg, + .fec = fec, + } + } + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_port_autoneg_restart(struct prestera_port *port) +{ + struct prestera_msg_port_attr_req req = { + .attr = PRESTERA_CMD_PORT_ATTR_AUTONEG_RESTART, + .port = port->hw_id, + .dev = port->dev_id, + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_port_duplex_get(const struct prestera_port *port, u8 *duplex) +{ + struct prestera_msg_port_attr_req req = { + .attr = PRESTERA_CMD_PORT_ATTR_DUPLEX, + .port = port->hw_id, + .dev = port->dev_id, + }; + struct prestera_msg_port_attr_resp resp; + int err; + + err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *duplex = resp.param.duplex; + + return 0; +} + +int prestera_hw_port_stats_get(const struct prestera_port *port, + struct prestera_port_stats *st) +{ + struct prestera_msg_port_attr_req req = { + .attr = PRESTERA_CMD_PORT_ATTR_STATS, + .port = port->hw_id, + .dev = port->dev_id, + }; + struct prestera_msg_port_stats_resp resp; + u64 *hw = resp.stats; + int err; + + err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + st->good_octets_received = hw[PRESTERA_PORT_GOOD_OCTETS_RCV_CNT]; + st->bad_octets_received = hw[PRESTERA_PORT_BAD_OCTETS_RCV_CNT]; + st->mac_trans_error = hw[PRESTERA_PORT_MAC_TRANSMIT_ERR_CNT]; + st->broadcast_frames_received = hw[PRESTERA_PORT_BRDC_PKTS_RCV_CNT]; + st->multicast_frames_received = hw[PRESTERA_PORT_MC_PKTS_RCV_CNT]; + st->frames_64_octets = hw[PRESTERA_PORT_PKTS_64L_CNT]; + st->frames_65_to_127_octets = hw[PRESTERA_PORT_PKTS_65TO127L_CNT]; + st->frames_128_to_255_octets = hw[PRESTERA_PORT_PKTS_128TO255L_CNT]; + st->frames_256_to_511_octets = hw[PRESTERA_PORT_PKTS_256TO511L_CNT]; + st->frames_512_to_1023_octets = hw[PRESTERA_PORT_PKTS_512TO1023L_CNT]; + st->frames_1024_to_max_octets = hw[PRESTERA_PORT_PKTS_1024TOMAXL_CNT]; + st->excessive_collision = hw[PRESTERA_PORT_EXCESSIVE_COLLISIONS_CNT]; + st->multicast_frames_sent = hw[PRESTERA_PORT_MC_PKTS_SENT_CNT]; + st->broadcast_frames_sent = hw[PRESTERA_PORT_BRDC_PKTS_SENT_CNT]; + st->fc_sent = hw[PRESTERA_PORT_FC_SENT_CNT]; + st->fc_received = hw[PRESTERA_PORT_GOOD_FC_RCV_CNT]; + st->buffer_overrun = hw[PRESTERA_PORT_DROP_EVENTS_CNT]; + st->undersize = hw[PRESTERA_PORT_UNDERSIZE_PKTS_CNT]; + st->fragments = hw[PRESTERA_PORT_FRAGMENTS_PKTS_CNT]; + st->oversize = hw[PRESTERA_PORT_OVERSIZE_PKTS_CNT]; + st->jabber = hw[PRESTERA_PORT_JABBER_PKTS_CNT]; + st->rx_error_frame_received = hw[PRESTERA_PORT_MAC_RCV_ERROR_CNT]; + st->bad_crc = hw[PRESTERA_PORT_BAD_CRC_CNT]; + st->collisions = hw[PRESTERA_PORT_COLLISIONS_CNT]; + st->late_collision = hw[PRESTERA_PORT_LATE_COLLISIONS_CNT]; + st->unicast_frames_received = hw[PRESTERA_PORT_GOOD_UC_PKTS_RCV_CNT]; + st->unicast_frames_sent = hw[PRESTERA_PORT_GOOD_UC_PKTS_SENT_CNT]; + st->sent_multiple = hw[PRESTERA_PORT_MULTIPLE_PKTS_SENT_CNT]; + st->sent_deferred = hw[PRESTERA_PORT_DEFERRED_PKTS_SENT_CNT]; + st->good_octets_sent = hw[PRESTERA_PORT_GOOD_OCTETS_SENT_CNT]; + + return 0; +} + +int prestera_hw_port_learning_set(struct prestera_port *port, bool enable) +{ + struct prestera_msg_port_attr_req req = { + .attr = PRESTERA_CMD_PORT_ATTR_LEARNING, + .port = port->hw_id, + .dev = port->dev_id, + .param = { + .learning = enable, + } + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_port_flood_set(struct prestera_port *port, bool flood) +{ + struct prestera_msg_port_attr_req req = { + .attr = PRESTERA_CMD_PORT_ATTR_FLOOD, + .port = port->hw_id, + .dev = port->dev_id, + .param = { + .flood = flood, + } + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid) +{ + struct prestera_msg_vlan_req req = { + .vid = vid, + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_VLAN_CREATE, + &req.cmd, sizeof(req)); +} + +int prestera_hw_vlan_delete(struct prestera_switch *sw, u16 vid) +{ + struct prestera_msg_vlan_req req = { + .vid = vid, + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_VLAN_DELETE, + &req.cmd, sizeof(req)); +} + +int prestera_hw_vlan_port_set(struct prestera_port *port, u16 vid, + bool is_member, bool untagged) +{ + struct prestera_msg_vlan_req req = { + .port = port->hw_id, + .dev = port->dev_id, + .vid = vid, + .is_member = is_member, + .is_tagged = !untagged, + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_VLAN_PORT_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_vlan_port_vid_set(struct prestera_port *port, u16 vid) +{ + struct prestera_msg_vlan_req req = { + .port = port->hw_id, + .dev = port->dev_id, + .vid = vid, + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_VLAN_PVID_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_vlan_port_stp_set(struct prestera_port *port, u16 vid, u8 state) +{ + struct prestera_msg_stp_req req = { + .port = port->hw_id, + .dev = port->dev_id, + .vid = vid, + .state = state, + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_STP_PORT_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_fdb_add(struct prestera_port *port, const unsigned char *mac, + u16 vid, bool dynamic) +{ + struct prestera_msg_fdb_req req = { + .port = port->hw_id, + .dev = port->dev_id, + .vid = vid, + .dynamic = dynamic, + }; + + ether_addr_copy(req.mac, mac); + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_ADD, + &req.cmd, sizeof(req)); +} + +int prestera_hw_fdb_del(struct prestera_port *port, const unsigned char *mac, + u16 vid) +{ + struct prestera_msg_fdb_req req = { + .port = port->hw_id, + .dev = port->dev_id, + .vid = vid, + }; + + ether_addr_copy(req.mac, mac); + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_DELETE, + &req.cmd, sizeof(req)); +} + +int prestera_hw_fdb_flush_port(struct prestera_port *port, u32 mode) +{ + struct prestera_msg_fdb_req req = { + .port = port->hw_id, + .dev = port->dev_id, + .flush_mode = mode, + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT, + &req.cmd, sizeof(req)); +} + +int prestera_hw_fdb_flush_vlan(struct prestera_switch *sw, u16 vid, u32 mode) +{ + struct prestera_msg_fdb_req req = { + .vid = vid, + .flush_mode = mode, + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_VLAN, + &req.cmd, sizeof(req)); +} + +int prestera_hw_fdb_flush_port_vlan(struct prestera_port *port, u16 vid, + u32 mode) +{ + struct prestera_msg_fdb_req req = { + .port = port->hw_id, + .dev = port->dev_id, + .vid = vid, + .flush_mode = mode, + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN, + &req.cmd, sizeof(req)); +} + +int prestera_hw_bridge_create(struct prestera_switch *sw, u16 *bridge_id) +{ + struct prestera_msg_bridge_resp resp; + struct prestera_msg_bridge_req req; + int err; + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_BRIDGE_CREATE, + &req.cmd, sizeof(req), + &resp.ret, sizeof(resp)); + if (err) + return err; + + *bridge_id = resp.bridge; + + return 0; +} + +int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id) +{ + struct prestera_msg_bridge_req req = { + .bridge = bridge_id, + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_BRIDGE_DELETE, + &req.cmd, sizeof(req)); +} + +int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id) +{ + struct prestera_msg_bridge_req req = { + .bridge = bridge_id, + .port = port->hw_id, + .dev = port->dev_id, + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD, + &req.cmd, sizeof(req)); +} + +int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id) +{ + struct prestera_msg_bridge_req req = { + .bridge = bridge_id, + .port = port->hw_id, + .dev = port->dev_id, + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE, + &req.cmd, sizeof(req)); +} + +int prestera_hw_rxtx_init(struct prestera_switch *sw, + struct prestera_rxtx_params *params) +{ + struct prestera_msg_rxtx_resp resp; + struct prestera_msg_rxtx_req req; + int err; + + req.use_sdma = params->use_sdma; + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_RXTX_INIT, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + params->map_addr = resp.map_addr; + + return 0; +} + +int prestera_hw_rxtx_port_init(struct prestera_port *port) +{ + struct prestera_msg_rxtx_port_req req = { + .port = port->hw_id, + .dev = port->dev_id, + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_RXTX_PORT_INIT, + &req.cmd, sizeof(req)); +} + +int prestera_hw_event_handler_register(struct prestera_switch *sw, + enum prestera_event_type type, + prestera_event_cb_t fn, + void *arg) +{ + struct prestera_fw_event_handler *eh; + + eh = __find_event_handler(sw, type); + if (eh) + return -EEXIST; + + eh = kmalloc(sizeof(*eh), GFP_KERNEL); + if (!eh) + return -ENOMEM; + + eh->type = type; + eh->func = fn; + eh->arg = arg; + + INIT_LIST_HEAD(&eh->list); + + list_add_rcu(&eh->list, &sw->event_handlers); + + return 0; +} + +void prestera_hw_event_handler_unregister(struct prestera_switch *sw, + enum prestera_event_type type, + prestera_event_cb_t fn) +{ + struct prestera_fw_event_handler *eh; + + eh = __find_event_handler(sw, type); + if (!eh) + return; + + list_del_rcu(&eh->list); + kfree_rcu(eh, rcu); +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h new file mode 100644 index 000000000..b2b5ac95b --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h @@ -0,0 +1,182 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */ + +#ifndef _PRESTERA_HW_H_ +#define _PRESTERA_HW_H_ + +#include <linux/types.h> + +enum prestera_accept_frm_type { + PRESTERA_ACCEPT_FRAME_TYPE_TAGGED, + PRESTERA_ACCEPT_FRAME_TYPE_UNTAGGED, + PRESTERA_ACCEPT_FRAME_TYPE_ALL, +}; + +enum prestera_fdb_flush_mode { + PRESTERA_FDB_FLUSH_MODE_DYNAMIC = BIT(0), + PRESTERA_FDB_FLUSH_MODE_STATIC = BIT(1), + PRESTERA_FDB_FLUSH_MODE_ALL = PRESTERA_FDB_FLUSH_MODE_DYNAMIC + | PRESTERA_FDB_FLUSH_MODE_STATIC, +}; + +enum { + PRESTERA_LINK_MODE_10baseT_Half, + PRESTERA_LINK_MODE_10baseT_Full, + PRESTERA_LINK_MODE_100baseT_Half, + PRESTERA_LINK_MODE_100baseT_Full, + PRESTERA_LINK_MODE_1000baseT_Half, + PRESTERA_LINK_MODE_1000baseT_Full, + PRESTERA_LINK_MODE_1000baseX_Full, + PRESTERA_LINK_MODE_1000baseKX_Full, + PRESTERA_LINK_MODE_2500baseX_Full, + PRESTERA_LINK_MODE_10GbaseKR_Full, + PRESTERA_LINK_MODE_10GbaseSR_Full, + PRESTERA_LINK_MODE_10GbaseLR_Full, + PRESTERA_LINK_MODE_20GbaseKR2_Full, + PRESTERA_LINK_MODE_25GbaseCR_Full, + PRESTERA_LINK_MODE_25GbaseKR_Full, + PRESTERA_LINK_MODE_25GbaseSR_Full, + PRESTERA_LINK_MODE_40GbaseKR4_Full, + PRESTERA_LINK_MODE_40GbaseCR4_Full, + PRESTERA_LINK_MODE_40GbaseSR4_Full, + PRESTERA_LINK_MODE_50GbaseCR2_Full, + PRESTERA_LINK_MODE_50GbaseKR2_Full, + PRESTERA_LINK_MODE_50GbaseSR2_Full, + PRESTERA_LINK_MODE_100GbaseKR4_Full, + PRESTERA_LINK_MODE_100GbaseSR4_Full, + PRESTERA_LINK_MODE_100GbaseCR4_Full, + + PRESTERA_LINK_MODE_MAX +}; + +enum { + PRESTERA_PORT_TYPE_NONE, + PRESTERA_PORT_TYPE_TP, + PRESTERA_PORT_TYPE_AUI, + PRESTERA_PORT_TYPE_MII, + PRESTERA_PORT_TYPE_FIBRE, + PRESTERA_PORT_TYPE_BNC, + PRESTERA_PORT_TYPE_DA, + PRESTERA_PORT_TYPE_OTHER, + + PRESTERA_PORT_TYPE_MAX +}; + +enum { + PRESTERA_PORT_TCVR_COPPER, + PRESTERA_PORT_TCVR_SFP, + + PRESTERA_PORT_TCVR_MAX +}; + +enum { + PRESTERA_PORT_FEC_OFF, + PRESTERA_PORT_FEC_BASER, + PRESTERA_PORT_FEC_RS, + + PRESTERA_PORT_FEC_MAX +}; + +enum { + PRESTERA_PORT_DUPLEX_HALF, + PRESTERA_PORT_DUPLEX_FULL, +}; + +enum { + PRESTERA_STP_DISABLED, + PRESTERA_STP_BLOCK_LISTEN, + PRESTERA_STP_LEARN, + PRESTERA_STP_FORWARD, +}; + +struct prestera_switch; +struct prestera_port; +struct prestera_port_stats; +struct prestera_port_caps; +enum prestera_event_type; +struct prestera_event; + +typedef void (*prestera_event_cb_t) + (struct prestera_switch *sw, struct prestera_event *evt, void *arg); + +struct prestera_rxtx_params; + +/* Switch API */ +int prestera_hw_switch_init(struct prestera_switch *sw); +void prestera_hw_switch_fini(struct prestera_switch *sw); +int prestera_hw_switch_ageing_set(struct prestera_switch *sw, u32 ageing_ms); +int prestera_hw_switch_mac_set(struct prestera_switch *sw, const char *mac); + +/* Port API */ +int prestera_hw_port_info_get(const struct prestera_port *port, + u32 *dev_id, u32 *hw_id, u16 *fp_id); +int prestera_hw_port_state_set(const struct prestera_port *port, + bool admin_state); +int prestera_hw_port_mtu_set(const struct prestera_port *port, u32 mtu); +int prestera_hw_port_mtu_get(const struct prestera_port *port, u32 *mtu); +int prestera_hw_port_mac_set(const struct prestera_port *port, const char *mac); +int prestera_hw_port_mac_get(const struct prestera_port *port, char *mac); +int prestera_hw_port_cap_get(const struct prestera_port *port, + struct prestera_port_caps *caps); +int prestera_hw_port_remote_cap_get(const struct prestera_port *port, + u64 *link_mode_bitmap); +int prestera_hw_port_remote_fc_get(const struct prestera_port *port, + bool *pause, bool *asym_pause); +int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type); +int prestera_hw_port_fec_get(const struct prestera_port *port, u8 *fec); +int prestera_hw_port_fec_set(const struct prestera_port *port, u8 fec); +int prestera_hw_port_autoneg_set(const struct prestera_port *port, + bool autoneg, u64 link_modes, u8 fec); +int prestera_hw_port_autoneg_restart(struct prestera_port *port); +int prestera_hw_port_duplex_get(const struct prestera_port *port, u8 *duplex); +int prestera_hw_port_stats_get(const struct prestera_port *port, + struct prestera_port_stats *stats); +int prestera_hw_port_link_mode_set(const struct prestera_port *port, u32 mode); +int prestera_hw_port_link_mode_get(const struct prestera_port *port, u32 *mode); +int prestera_hw_port_mdix_get(const struct prestera_port *port, u8 *status, + u8 *admin_mode); +int prestera_hw_port_mdix_set(const struct prestera_port *port, u8 mode); +int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed); +int prestera_hw_port_learning_set(struct prestera_port *port, bool enable); +int prestera_hw_port_flood_set(struct prestera_port *port, bool flood); +int prestera_hw_port_accept_frm_type(struct prestera_port *port, + enum prestera_accept_frm_type type); +/* Vlan API */ +int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid); +int prestera_hw_vlan_delete(struct prestera_switch *sw, u16 vid); +int prestera_hw_vlan_port_set(struct prestera_port *port, u16 vid, + bool is_member, bool untagged); +int prestera_hw_vlan_port_vid_set(struct prestera_port *port, u16 vid); +int prestera_hw_vlan_port_stp_set(struct prestera_port *port, u16 vid, u8 state); + +/* FDB API */ +int prestera_hw_fdb_add(struct prestera_port *port, const unsigned char *mac, + u16 vid, bool dynamic); +int prestera_hw_fdb_del(struct prestera_port *port, const unsigned char *mac, + u16 vid); +int prestera_hw_fdb_flush_port(struct prestera_port *port, u32 mode); +int prestera_hw_fdb_flush_vlan(struct prestera_switch *sw, u16 vid, u32 mode); +int prestera_hw_fdb_flush_port_vlan(struct prestera_port *port, u16 vid, + u32 mode); + +/* Bridge API */ +int prestera_hw_bridge_create(struct prestera_switch *sw, u16 *bridge_id); +int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id); +int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id); +int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id); + +/* Event handlers */ +int prestera_hw_event_handler_register(struct prestera_switch *sw, + enum prestera_event_type type, + prestera_event_cb_t fn, + void *arg); +void prestera_hw_event_handler_unregister(struct prestera_switch *sw, + enum prestera_event_type type, + prestera_event_cb_t fn); + +/* RX/TX */ +int prestera_hw_rxtx_init(struct prestera_switch *sw, + struct prestera_rxtx_params *params); +int prestera_hw_rxtx_port_init(struct prestera_port *port); + +#endif /* _PRESTERA_HW_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c new file mode 100644 index 000000000..f406f5b51 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@ -0,0 +1,674 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */ + +#include <linux/etherdevice.h> +#include <linux/jiffies.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/netdev_features.h> +#include <linux/of.h> +#include <linux/of_net.h> + +#include "prestera.h" +#include "prestera_hw.h" +#include "prestera_rxtx.h" +#include "prestera_devlink.h" +#include "prestera_ethtool.h" +#include "prestera_switchdev.h" + +#define PRESTERA_MTU_DEFAULT 1536 + +#define PRESTERA_STATS_DELAY_MS 1000 + +#define PRESTERA_MAC_ADDR_NUM_MAX 255 + +static struct workqueue_struct *prestera_wq; + +int prestera_port_pvid_set(struct prestera_port *port, u16 vid) +{ + enum prestera_accept_frm_type frm_type; + int err; + + frm_type = PRESTERA_ACCEPT_FRAME_TYPE_TAGGED; + + if (vid) { + err = prestera_hw_vlan_port_vid_set(port, vid); + if (err) + return err; + + frm_type = PRESTERA_ACCEPT_FRAME_TYPE_ALL; + } + + err = prestera_hw_port_accept_frm_type(port, frm_type); + if (err && frm_type == PRESTERA_ACCEPT_FRAME_TYPE_ALL) + prestera_hw_vlan_port_vid_set(port, port->pvid); + + port->pvid = vid; + return 0; +} + +struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw, + u32 dev_id, u32 hw_id) +{ + struct prestera_port *port = NULL, *tmp; + + read_lock(&sw->port_list_lock); + list_for_each_entry(tmp, &sw->port_list, list) { + if (tmp->dev_id == dev_id && tmp->hw_id == hw_id) { + port = tmp; + break; + } + } + read_unlock(&sw->port_list_lock); + + return port; +} + +struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id) +{ + struct prestera_port *port = NULL, *tmp; + + read_lock(&sw->port_list_lock); + list_for_each_entry(tmp, &sw->port_list, list) { + if (tmp->id == id) { + port = tmp; + break; + } + } + read_unlock(&sw->port_list_lock); + + return port; +} + +static int prestera_port_open(struct net_device *dev) +{ + struct prestera_port *port = netdev_priv(dev); + int err; + + err = prestera_hw_port_state_set(port, true); + if (err) + return err; + + netif_start_queue(dev); + + return 0; +} + +static int prestera_port_close(struct net_device *dev) +{ + struct prestera_port *port = netdev_priv(dev); + int err; + + netif_stop_queue(dev); + + err = prestera_hw_port_state_set(port, false); + if (err) + return err; + + return 0; +} + +static netdev_tx_t prestera_port_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + return prestera_rxtx_xmit(netdev_priv(dev), skb); +} + +static int prestera_is_valid_mac_addr(struct prestera_port *port, u8 *addr) +{ + if (!is_valid_ether_addr(addr)) + return -EADDRNOTAVAIL; + + /* firmware requires that port's MAC address contains first 5 bytes + * of the base MAC address + */ + if (memcmp(port->sw->base_mac, addr, ETH_ALEN - 1)) + return -EINVAL; + + return 0; +} + +static int prestera_port_set_mac_address(struct net_device *dev, void *p) +{ + struct prestera_port *port = netdev_priv(dev); + struct sockaddr *addr = p; + int err; + + err = prestera_is_valid_mac_addr(port, addr->sa_data); + if (err) + return err; + + err = prestera_hw_port_mac_set(port, addr->sa_data); + if (err) + return err; + + ether_addr_copy(dev->dev_addr, addr->sa_data); + + return 0; +} + +static int prestera_port_change_mtu(struct net_device *dev, int mtu) +{ + struct prestera_port *port = netdev_priv(dev); + int err; + + err = prestera_hw_port_mtu_set(port, mtu); + if (err) + return err; + + dev->mtu = mtu; + + return 0; +} + +static void prestera_port_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct prestera_port *port = netdev_priv(dev); + struct prestera_port_stats *port_stats = &port->cached_hw_stats.stats; + + stats->rx_packets = port_stats->broadcast_frames_received + + port_stats->multicast_frames_received + + port_stats->unicast_frames_received; + + stats->tx_packets = port_stats->broadcast_frames_sent + + port_stats->multicast_frames_sent + + port_stats->unicast_frames_sent; + + stats->rx_bytes = port_stats->good_octets_received; + + stats->tx_bytes = port_stats->good_octets_sent; + + stats->rx_errors = port_stats->rx_error_frame_received; + stats->tx_errors = port_stats->mac_trans_error; + + stats->rx_dropped = port_stats->buffer_overrun; + stats->tx_dropped = 0; + + stats->multicast = port_stats->multicast_frames_received; + stats->collisions = port_stats->excessive_collision; + + stats->rx_crc_errors = port_stats->bad_crc; +} + +static void prestera_port_get_hw_stats(struct prestera_port *port) +{ + prestera_hw_port_stats_get(port, &port->cached_hw_stats.stats); +} + +static void prestera_port_stats_update(struct work_struct *work) +{ + struct prestera_port *port = + container_of(work, struct prestera_port, + cached_hw_stats.caching_dw.work); + + prestera_port_get_hw_stats(port); + + queue_delayed_work(prestera_wq, &port->cached_hw_stats.caching_dw, + msecs_to_jiffies(PRESTERA_STATS_DELAY_MS)); +} + +static const struct net_device_ops prestera_netdev_ops = { + .ndo_open = prestera_port_open, + .ndo_stop = prestera_port_close, + .ndo_start_xmit = prestera_port_xmit, + .ndo_change_mtu = prestera_port_change_mtu, + .ndo_get_stats64 = prestera_port_get_stats64, + .ndo_set_mac_address = prestera_port_set_mac_address, + .ndo_get_devlink_port = prestera_devlink_get_port, +}; + +int prestera_port_autoneg_set(struct prestera_port *port, bool enable, + u64 adver_link_modes, u8 adver_fec) +{ + bool refresh = false; + u64 link_modes; + int err; + u8 fec; + + if (port->caps.type != PRESTERA_PORT_TYPE_TP) + return enable ? -EINVAL : 0; + + if (!enable) + goto set_autoneg; + + link_modes = port->caps.supp_link_modes & adver_link_modes; + fec = port->caps.supp_fec & adver_fec; + + if (!link_modes && !fec) + return -EOPNOTSUPP; + + if (link_modes && port->adver_link_modes != link_modes) { + port->adver_link_modes = link_modes; + refresh = true; + } + + if (fec && port->adver_fec != fec) { + port->adver_fec = fec; + refresh = true; + } + +set_autoneg: + if (port->autoneg == enable && !refresh) + return 0; + + err = prestera_hw_port_autoneg_set(port, enable, port->adver_link_modes, + port->adver_fec); + if (err) + return err; + + port->autoneg = enable; + + return 0; +} + +static void prestera_port_list_add(struct prestera_port *port) +{ + write_lock(&port->sw->port_list_lock); + list_add(&port->list, &port->sw->port_list); + write_unlock(&port->sw->port_list_lock); +} + +static void prestera_port_list_del(struct prestera_port *port) +{ + write_lock(&port->sw->port_list_lock); + list_del(&port->list); + write_unlock(&port->sw->port_list_lock); +} + +static int prestera_port_create(struct prestera_switch *sw, u32 id) +{ + struct prestera_port *port; + struct net_device *dev; + int err; + + dev = alloc_etherdev(sizeof(*port)); + if (!dev) + return -ENOMEM; + + port = netdev_priv(dev); + + INIT_LIST_HEAD(&port->vlans_list); + port->pvid = PRESTERA_DEFAULT_VID; + port->dev = dev; + port->id = id; + port->sw = sw; + + err = prestera_hw_port_info_get(port, &port->dev_id, &port->hw_id, + &port->fp_id); + if (err) { + dev_err(prestera_dev(sw), "Failed to get port(%u) info\n", id); + goto err_port_info_get; + } + + err = prestera_devlink_port_register(port); + if (err) + goto err_dl_port_register; + + dev->features |= NETIF_F_NETNS_LOCAL; + dev->netdev_ops = &prestera_netdev_ops; + dev->ethtool_ops = &prestera_ethtool_ops; + + netif_carrier_off(dev); + + dev->mtu = min_t(unsigned int, sw->mtu_max, PRESTERA_MTU_DEFAULT); + dev->min_mtu = sw->mtu_min; + dev->max_mtu = sw->mtu_max; + + err = prestera_hw_port_mtu_set(port, dev->mtu); + if (err) { + dev_err(prestera_dev(sw), "Failed to set port(%u) mtu(%d)\n", + id, dev->mtu); + goto err_port_init; + } + + if (port->fp_id >= PRESTERA_MAC_ADDR_NUM_MAX) { + err = -EINVAL; + goto err_port_init; + } + + /* firmware requires that port's MAC address consist of the first + * 5 bytes of the base MAC address + */ + memcpy(dev->dev_addr, sw->base_mac, dev->addr_len - 1); + dev->dev_addr[dev->addr_len - 1] = port->fp_id; + + err = prestera_hw_port_mac_set(port, dev->dev_addr); + if (err) { + dev_err(prestera_dev(sw), "Failed to set port(%u) mac addr\n", id); + goto err_port_init; + } + + err = prestera_hw_port_cap_get(port, &port->caps); + if (err) { + dev_err(prestera_dev(sw), "Failed to get port(%u) caps\n", id); + goto err_port_init; + } + + port->adver_fec = BIT(PRESTERA_PORT_FEC_OFF); + prestera_port_autoneg_set(port, true, port->caps.supp_link_modes, + port->caps.supp_fec); + + err = prestera_hw_port_state_set(port, false); + if (err) { + dev_err(prestera_dev(sw), "Failed to set port(%u) down\n", id); + goto err_port_init; + } + + err = prestera_rxtx_port_init(port); + if (err) + goto err_port_init; + + INIT_DELAYED_WORK(&port->cached_hw_stats.caching_dw, + &prestera_port_stats_update); + + prestera_port_list_add(port); + + err = register_netdev(dev); + if (err) + goto err_register_netdev; + + prestera_devlink_port_set(port); + + return 0; + +err_register_netdev: + prestera_port_list_del(port); +err_port_init: + prestera_devlink_port_unregister(port); +err_dl_port_register: +err_port_info_get: + free_netdev(dev); + return err; +} + +static void prestera_port_destroy(struct prestera_port *port) +{ + struct net_device *dev = port->dev; + + cancel_delayed_work_sync(&port->cached_hw_stats.caching_dw); + prestera_devlink_port_clear(port); + unregister_netdev(dev); + prestera_port_list_del(port); + prestera_devlink_port_unregister(port); + free_netdev(dev); +} + +static void prestera_destroy_ports(struct prestera_switch *sw) +{ + struct prestera_port *port, *tmp; + + list_for_each_entry_safe(port, tmp, &sw->port_list, list) + prestera_port_destroy(port); +} + +static int prestera_create_ports(struct prestera_switch *sw) +{ + struct prestera_port *port, *tmp; + u32 port_idx; + int err; + + for (port_idx = 0; port_idx < sw->port_count; port_idx++) { + err = prestera_port_create(sw, port_idx); + if (err) + goto err_port_create; + } + + return 0; + +err_port_create: + list_for_each_entry_safe(port, tmp, &sw->port_list, list) + prestera_port_destroy(port); + + return err; +} + +static void prestera_port_handle_event(struct prestera_switch *sw, + struct prestera_event *evt, void *arg) +{ + struct delayed_work *caching_dw; + struct prestera_port *port; + + port = prestera_find_port(sw, evt->port_evt.port_id); + if (!port || !port->dev) + return; + + caching_dw = &port->cached_hw_stats.caching_dw; + + if (evt->id == PRESTERA_PORT_EVENT_STATE_CHANGED) { + if (evt->port_evt.data.oper_state) { + netif_carrier_on(port->dev); + if (!delayed_work_pending(caching_dw)) + queue_delayed_work(prestera_wq, caching_dw, 0); + } else if (netif_running(port->dev) && + netif_carrier_ok(port->dev)) { + netif_carrier_off(port->dev); + if (delayed_work_pending(caching_dw)) + cancel_delayed_work(caching_dw); + } + } +} + +static int prestera_event_handlers_register(struct prestera_switch *sw) +{ + return prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_PORT, + prestera_port_handle_event, + NULL); +} + +static void prestera_event_handlers_unregister(struct prestera_switch *sw) +{ + prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_PORT, + prestera_port_handle_event); +} + +static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw) +{ + struct device_node *base_mac_np; + struct device_node *np; + const char *base_mac; + + np = of_find_compatible_node(NULL, NULL, "marvell,prestera"); + base_mac_np = of_parse_phandle(np, "base-mac-provider", 0); + + base_mac = of_get_mac_address(base_mac_np); + of_node_put(base_mac_np); + if (!IS_ERR(base_mac)) + ether_addr_copy(sw->base_mac, base_mac); + + if (!is_valid_ether_addr(sw->base_mac)) { + eth_random_addr(sw->base_mac); + dev_info(prestera_dev(sw), "using random base mac address\n"); + } + + return prestera_hw_switch_mac_set(sw, sw->base_mac); +} + +bool prestera_netdev_check(const struct net_device *dev) +{ + return dev->netdev_ops == &prestera_netdev_ops; +} + +static int prestera_lower_dev_walk(struct net_device *dev, + struct netdev_nested_priv *priv) +{ + struct prestera_port **pport = (struct prestera_port **)priv->data; + + if (prestera_netdev_check(dev)) { + *pport = netdev_priv(dev); + return 1; + } + + return 0; +} + +struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev) +{ + struct prestera_port *port = NULL; + struct netdev_nested_priv priv = { + .data = (void *)&port, + }; + + if (prestera_netdev_check(dev)) + return netdev_priv(dev); + + netdev_walk_all_lower_dev(dev, prestera_lower_dev_walk, &priv); + + return port; +} + +static int prestera_netdev_port_event(struct net_device *dev, + unsigned long event, void *ptr) +{ + switch (event) { + case NETDEV_PRECHANGEUPPER: + case NETDEV_CHANGEUPPER: + return prestera_bridge_port_event(dev, event, ptr); + default: + return 0; + } +} + +static int prestera_netdev_event_handler(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + int err = 0; + + if (prestera_netdev_check(dev)) + err = prestera_netdev_port_event(dev, event, ptr); + + return notifier_from_errno(err); +} + +static int prestera_netdev_event_handler_register(struct prestera_switch *sw) +{ + sw->netdev_nb.notifier_call = prestera_netdev_event_handler; + + return register_netdevice_notifier(&sw->netdev_nb); +} + +static void prestera_netdev_event_handler_unregister(struct prestera_switch *sw) +{ + unregister_netdevice_notifier(&sw->netdev_nb); +} + +static int prestera_switch_init(struct prestera_switch *sw) +{ + int err; + + err = prestera_hw_switch_init(sw); + if (err) { + dev_err(prestera_dev(sw), "Failed to init Switch device\n"); + return err; + } + + rwlock_init(&sw->port_list_lock); + INIT_LIST_HEAD(&sw->port_list); + + err = prestera_switch_set_base_mac_addr(sw); + if (err) + return err; + + err = prestera_netdev_event_handler_register(sw); + if (err) + return err; + + err = prestera_switchdev_init(sw); + if (err) + goto err_swdev_register; + + err = prestera_rxtx_switch_init(sw); + if (err) + goto err_rxtx_register; + + err = prestera_event_handlers_register(sw); + if (err) + goto err_handlers_register; + + err = prestera_devlink_register(sw); + if (err) + goto err_dl_register; + + err = prestera_create_ports(sw); + if (err) + goto err_ports_create; + + return 0; + +err_ports_create: + prestera_devlink_unregister(sw); +err_dl_register: + prestera_event_handlers_unregister(sw); +err_handlers_register: + prestera_rxtx_switch_fini(sw); +err_rxtx_register: + prestera_switchdev_fini(sw); +err_swdev_register: + prestera_netdev_event_handler_unregister(sw); + prestera_hw_switch_fini(sw); + + return err; +} + +static void prestera_switch_fini(struct prestera_switch *sw) +{ + prestera_destroy_ports(sw); + prestera_devlink_unregister(sw); + prestera_event_handlers_unregister(sw); + prestera_rxtx_switch_fini(sw); + prestera_switchdev_fini(sw); + prestera_netdev_event_handler_unregister(sw); + prestera_hw_switch_fini(sw); +} + +int prestera_device_register(struct prestera_device *dev) +{ + struct prestera_switch *sw; + int err; + + sw = prestera_devlink_alloc(); + if (!sw) + return -ENOMEM; + + dev->priv = sw; + sw->dev = dev; + + err = prestera_switch_init(sw); + if (err) { + prestera_devlink_free(sw); + return err; + } + + return 0; +} +EXPORT_SYMBOL(prestera_device_register); + +void prestera_device_unregister(struct prestera_device *dev) +{ + struct prestera_switch *sw = dev->priv; + + prestera_switch_fini(sw); + prestera_devlink_free(sw); +} +EXPORT_SYMBOL(prestera_device_unregister); + +static int __init prestera_module_init(void) +{ + prestera_wq = alloc_workqueue("prestera", 0, 0); + if (!prestera_wq) + return -ENOMEM; + + return 0; +} + +static void __exit prestera_module_exit(void) +{ + destroy_workqueue(prestera_wq); +} + +module_init(prestera_module_init); +module_exit(prestera_module_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Marvell Prestera switch driver"); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c new file mode 100644 index 000000000..be5677623 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c @@ -0,0 +1,772 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */ + +#include <linux/circ_buf.h> +#include <linux/device.h> +#include <linux/firmware.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "prestera.h" + +#define PRESTERA_MSG_MAX_SIZE 1500 + +#define PRESTERA_SUPP_FW_MAJ_VER 2 +#define PRESTERA_SUPP_FW_MIN_VER 0 + +#define PRESTERA_FW_PATH_FMT "mrvl/prestera/mvsw_prestera_fw-v%u.%u.img" + +#define PRESTERA_FW_HDR_MAGIC 0x351D9D06 +#define PRESTERA_FW_DL_TIMEOUT_MS 50000 +#define PRESTERA_FW_BLK_SZ 1024 + +#define PRESTERA_FW_VER_MAJ_MUL 1000000 +#define PRESTERA_FW_VER_MIN_MUL 1000 + +#define PRESTERA_FW_VER_MAJ(v) ((v) / PRESTERA_FW_VER_MAJ_MUL) + +#define PRESTERA_FW_VER_MIN(v) \ + (((v) - (PRESTERA_FW_VER_MAJ(v) * PRESTERA_FW_VER_MAJ_MUL)) / \ + PRESTERA_FW_VER_MIN_MUL) + +#define PRESTERA_FW_VER_PATCH(v) \ + ((v) - (PRESTERA_FW_VER_MAJ(v) * PRESTERA_FW_VER_MAJ_MUL) - \ + (PRESTERA_FW_VER_MIN(v) * PRESTERA_FW_VER_MIN_MUL)) + +enum prestera_pci_bar_t { + PRESTERA_PCI_BAR_FW = 2, + PRESTERA_PCI_BAR_PP = 4, +}; + +struct prestera_fw_header { + __be32 magic_number; + __be32 version_value; + u8 reserved[8]; +}; + +struct prestera_ldr_regs { + u32 ldr_ready; + u32 pad1; + + u32 ldr_img_size; + u32 ldr_ctl_flags; + + u32 ldr_buf_offs; + u32 ldr_buf_size; + + u32 ldr_buf_rd; + u32 pad2; + u32 ldr_buf_wr; + + u32 ldr_status; +}; + +#define PRESTERA_LDR_REG_OFFSET(f) offsetof(struct prestera_ldr_regs, f) + +#define PRESTERA_LDR_READY_MAGIC 0xf00dfeed + +#define PRESTERA_LDR_STATUS_IMG_DL BIT(0) +#define PRESTERA_LDR_STATUS_START_FW BIT(1) +#define PRESTERA_LDR_STATUS_INVALID_IMG BIT(2) +#define PRESTERA_LDR_STATUS_NOMEM BIT(3) + +#define PRESTERA_LDR_REG_BASE(fw) ((fw)->ldr_regs) +#define PRESTERA_LDR_REG_ADDR(fw, reg) (PRESTERA_LDR_REG_BASE(fw) + (reg)) + +/* fw loader registers */ +#define PRESTERA_LDR_READY_REG PRESTERA_LDR_REG_OFFSET(ldr_ready) +#define PRESTERA_LDR_IMG_SIZE_REG PRESTERA_LDR_REG_OFFSET(ldr_img_size) +#define PRESTERA_LDR_CTL_REG PRESTERA_LDR_REG_OFFSET(ldr_ctl_flags) +#define PRESTERA_LDR_BUF_SIZE_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_size) +#define PRESTERA_LDR_BUF_OFFS_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_offs) +#define PRESTERA_LDR_BUF_RD_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_rd) +#define PRESTERA_LDR_BUF_WR_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_wr) +#define PRESTERA_LDR_STATUS_REG PRESTERA_LDR_REG_OFFSET(ldr_status) + +#define PRESTERA_LDR_CTL_DL_START BIT(0) + +#define PRESTERA_EVT_QNUM_MAX 4 + +struct prestera_fw_evtq_regs { + u32 rd_idx; + u32 pad1; + u32 wr_idx; + u32 pad2; + u32 offs; + u32 len; +}; + +struct prestera_fw_regs { + u32 fw_ready; + u32 pad; + u32 cmd_offs; + u32 cmd_len; + u32 evt_offs; + u32 evt_qnum; + + u32 cmd_req_ctl; + u32 cmd_req_len; + u32 cmd_rcv_ctl; + u32 cmd_rcv_len; + + u32 fw_status; + u32 rx_status; + + struct prestera_fw_evtq_regs evtq_list[PRESTERA_EVT_QNUM_MAX]; +}; + +#define PRESTERA_FW_REG_OFFSET(f) offsetof(struct prestera_fw_regs, f) + +#define PRESTERA_FW_READY_MAGIC 0xcafebabe + +/* fw registers */ +#define PRESTERA_FW_READY_REG PRESTERA_FW_REG_OFFSET(fw_ready) + +#define PRESTERA_CMD_BUF_OFFS_REG PRESTERA_FW_REG_OFFSET(cmd_offs) +#define PRESTERA_CMD_BUF_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_len) +#define PRESTERA_EVT_BUF_OFFS_REG PRESTERA_FW_REG_OFFSET(evt_offs) +#define PRESTERA_EVT_QNUM_REG PRESTERA_FW_REG_OFFSET(evt_qnum) + +#define PRESTERA_CMD_REQ_CTL_REG PRESTERA_FW_REG_OFFSET(cmd_req_ctl) +#define PRESTERA_CMD_REQ_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_req_len) + +#define PRESTERA_CMD_RCV_CTL_REG PRESTERA_FW_REG_OFFSET(cmd_rcv_ctl) +#define PRESTERA_CMD_RCV_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_rcv_len) +#define PRESTERA_FW_STATUS_REG PRESTERA_FW_REG_OFFSET(fw_status) +#define PRESTERA_RX_STATUS_REG PRESTERA_FW_REG_OFFSET(rx_status) + +/* PRESTERA_CMD_REQ_CTL_REG flags */ +#define PRESTERA_CMD_F_REQ_SENT BIT(0) +#define PRESTERA_CMD_F_REPL_RCVD BIT(1) + +/* PRESTERA_CMD_RCV_CTL_REG flags */ +#define PRESTERA_CMD_F_REPL_SENT BIT(0) + +#define PRESTERA_EVTQ_REG_OFFSET(q, f) \ + (PRESTERA_FW_REG_OFFSET(evtq_list) + \ + (q) * sizeof(struct prestera_fw_evtq_regs) + \ + offsetof(struct prestera_fw_evtq_regs, f)) + +#define PRESTERA_EVTQ_RD_IDX_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, rd_idx) +#define PRESTERA_EVTQ_WR_IDX_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, wr_idx) +#define PRESTERA_EVTQ_OFFS_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, offs) +#define PRESTERA_EVTQ_LEN_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, len) + +#define PRESTERA_FW_REG_BASE(fw) ((fw)->dev.ctl_regs) +#define PRESTERA_FW_REG_ADDR(fw, reg) PRESTERA_FW_REG_BASE((fw)) + (reg) + +#define PRESTERA_FW_CMD_DEFAULT_WAIT_MS 30000 +#define PRESTERA_FW_READY_WAIT_MS 20000 + +struct prestera_fw_evtq { + u8 __iomem *addr; + size_t len; +}; + +struct prestera_fw { + struct workqueue_struct *wq; + struct prestera_device dev; + u8 __iomem *ldr_regs; + u8 __iomem *ldr_ring_buf; + u32 ldr_buf_len; + u32 ldr_wr_idx; + struct mutex cmd_mtx; /* serialize access to dev->send_req */ + size_t cmd_mbox_len; + u8 __iomem *cmd_mbox; + struct prestera_fw_evtq evt_queue[PRESTERA_EVT_QNUM_MAX]; + u8 evt_qnum; + struct work_struct evt_work; + u8 __iomem *evt_buf; + u8 *evt_msg; +}; + +static int prestera_fw_load(struct prestera_fw *fw); + +static void prestera_fw_write(struct prestera_fw *fw, u32 reg, u32 val) +{ + writel(val, PRESTERA_FW_REG_ADDR(fw, reg)); +} + +static u32 prestera_fw_read(struct prestera_fw *fw, u32 reg) +{ + return readl(PRESTERA_FW_REG_ADDR(fw, reg)); +} + +static u32 prestera_fw_evtq_len(struct prestera_fw *fw, u8 qid) +{ + return fw->evt_queue[qid].len; +} + +static u32 prestera_fw_evtq_avail(struct prestera_fw *fw, u8 qid) +{ + u32 wr_idx = prestera_fw_read(fw, PRESTERA_EVTQ_WR_IDX_REG(qid)); + u32 rd_idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid)); + + return CIRC_CNT(wr_idx, rd_idx, prestera_fw_evtq_len(fw, qid)); +} + +static void prestera_fw_evtq_rd_set(struct prestera_fw *fw, + u8 qid, u32 idx) +{ + u32 rd_idx = idx & (prestera_fw_evtq_len(fw, qid) - 1); + + prestera_fw_write(fw, PRESTERA_EVTQ_RD_IDX_REG(qid), rd_idx); +} + +static u8 __iomem *prestera_fw_evtq_buf(struct prestera_fw *fw, u8 qid) +{ + return fw->evt_queue[qid].addr; +} + +static u32 prestera_fw_evtq_read32(struct prestera_fw *fw, u8 qid) +{ + u32 rd_idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid)); + u32 val; + + val = readl(prestera_fw_evtq_buf(fw, qid) + rd_idx); + prestera_fw_evtq_rd_set(fw, qid, rd_idx + 4); + return val; +} + +static ssize_t prestera_fw_evtq_read_buf(struct prestera_fw *fw, + u8 qid, void *buf, size_t len) +{ + u32 idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid)); + u8 __iomem *evtq_addr = prestera_fw_evtq_buf(fw, qid); + u32 *buf32 = buf; + int i; + + for (i = 0; i < len / 4; buf32++, i++) { + *buf32 = readl_relaxed(evtq_addr + idx); + idx = (idx + 4) & (prestera_fw_evtq_len(fw, qid) - 1); + } + + prestera_fw_evtq_rd_set(fw, qid, idx); + + return i; +} + +static u8 prestera_fw_evtq_pick(struct prestera_fw *fw) +{ + int qid; + + for (qid = 0; qid < fw->evt_qnum; qid++) { + if (prestera_fw_evtq_avail(fw, qid) >= 4) + return qid; + } + + return PRESTERA_EVT_QNUM_MAX; +} + +static void prestera_fw_evt_work_fn(struct work_struct *work) +{ + struct prestera_fw *fw; + void *msg; + u8 qid; + + fw = container_of(work, struct prestera_fw, evt_work); + msg = fw->evt_msg; + + while ((qid = prestera_fw_evtq_pick(fw)) < PRESTERA_EVT_QNUM_MAX) { + u32 idx; + u32 len; + + len = prestera_fw_evtq_read32(fw, qid); + idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid)); + + WARN_ON(prestera_fw_evtq_avail(fw, qid) < len); + + if (WARN_ON(len > PRESTERA_MSG_MAX_SIZE)) { + prestera_fw_evtq_rd_set(fw, qid, idx + len); + continue; + } + + prestera_fw_evtq_read_buf(fw, qid, msg, len); + + if (fw->dev.recv_msg) + fw->dev.recv_msg(&fw->dev, msg, len); + } +} + +static int prestera_fw_wait_reg32(struct prestera_fw *fw, u32 reg, u32 cmp, + unsigned int waitms) +{ + u8 __iomem *addr = PRESTERA_FW_REG_ADDR(fw, reg); + u32 val; + + return readl_poll_timeout(addr, val, cmp == val, + 1 * USEC_PER_MSEC, waitms * USEC_PER_MSEC); +} + +static int prestera_fw_cmd_send(struct prestera_fw *fw, + void *in_msg, size_t in_size, + void *out_msg, size_t out_size, + unsigned int waitms) +{ + u32 ret_size; + int err; + + if (!waitms) + waitms = PRESTERA_FW_CMD_DEFAULT_WAIT_MS; + + if (ALIGN(in_size, 4) > fw->cmd_mbox_len) + return -EMSGSIZE; + + /* wait for finish previous reply from FW */ + err = prestera_fw_wait_reg32(fw, PRESTERA_CMD_RCV_CTL_REG, 0, 30); + if (err) { + dev_err(fw->dev.dev, "finish reply from FW is timed out\n"); + return err; + } + + prestera_fw_write(fw, PRESTERA_CMD_REQ_LEN_REG, in_size); + memcpy_toio(fw->cmd_mbox, in_msg, in_size); + + prestera_fw_write(fw, PRESTERA_CMD_REQ_CTL_REG, PRESTERA_CMD_F_REQ_SENT); + + /* wait for reply from FW */ + err = prestera_fw_wait_reg32(fw, PRESTERA_CMD_RCV_CTL_REG, + PRESTERA_CMD_F_REPL_SENT, waitms); + if (err) { + dev_err(fw->dev.dev, "reply from FW is timed out\n"); + goto cmd_exit; + } + + ret_size = prestera_fw_read(fw, PRESTERA_CMD_RCV_LEN_REG); + if (ret_size > out_size) { + dev_err(fw->dev.dev, "ret_size (%u) > out_len(%zu)\n", + ret_size, out_size); + err = -EMSGSIZE; + goto cmd_exit; + } + + memcpy_fromio(out_msg, fw->cmd_mbox + in_size, ret_size); + +cmd_exit: + prestera_fw_write(fw, PRESTERA_CMD_REQ_CTL_REG, PRESTERA_CMD_F_REPL_RCVD); + return err; +} + +static int prestera_fw_send_req(struct prestera_device *dev, + void *in_msg, size_t in_size, void *out_msg, + size_t out_size, unsigned int waitms) +{ + struct prestera_fw *fw; + ssize_t ret; + + fw = container_of(dev, struct prestera_fw, dev); + + mutex_lock(&fw->cmd_mtx); + ret = prestera_fw_cmd_send(fw, in_msg, in_size, out_msg, out_size, waitms); + mutex_unlock(&fw->cmd_mtx); + + return ret; +} + +static int prestera_fw_init(struct prestera_fw *fw) +{ + u8 __iomem *base; + int err; + u8 qid; + + fw->dev.send_req = prestera_fw_send_req; + fw->ldr_regs = fw->dev.ctl_regs; + + err = prestera_fw_load(fw); + if (err) + return err; + + err = prestera_fw_wait_reg32(fw, PRESTERA_FW_READY_REG, + PRESTERA_FW_READY_MAGIC, + PRESTERA_FW_READY_WAIT_MS); + if (err) { + dev_err(fw->dev.dev, "FW failed to start\n"); + return err; + } + + base = fw->dev.ctl_regs; + + fw->cmd_mbox = base + prestera_fw_read(fw, PRESTERA_CMD_BUF_OFFS_REG); + fw->cmd_mbox_len = prestera_fw_read(fw, PRESTERA_CMD_BUF_LEN_REG); + mutex_init(&fw->cmd_mtx); + + fw->evt_buf = base + prestera_fw_read(fw, PRESTERA_EVT_BUF_OFFS_REG); + fw->evt_qnum = prestera_fw_read(fw, PRESTERA_EVT_QNUM_REG); + fw->evt_msg = kmalloc(PRESTERA_MSG_MAX_SIZE, GFP_KERNEL); + if (!fw->evt_msg) + return -ENOMEM; + + for (qid = 0; qid < fw->evt_qnum; qid++) { + u32 offs = prestera_fw_read(fw, PRESTERA_EVTQ_OFFS_REG(qid)); + struct prestera_fw_evtq *evtq = &fw->evt_queue[qid]; + + evtq->len = prestera_fw_read(fw, PRESTERA_EVTQ_LEN_REG(qid)); + evtq->addr = fw->evt_buf + offs; + } + + return 0; +} + +static void prestera_fw_uninit(struct prestera_fw *fw) +{ + kfree(fw->evt_msg); +} + +static irqreturn_t prestera_pci_irq_handler(int irq, void *dev_id) +{ + struct prestera_fw *fw = dev_id; + + if (prestera_fw_read(fw, PRESTERA_RX_STATUS_REG)) { + prestera_fw_write(fw, PRESTERA_RX_STATUS_REG, 0); + + if (fw->dev.recv_pkt) + fw->dev.recv_pkt(&fw->dev); + } + + queue_work(fw->wq, &fw->evt_work); + + return IRQ_HANDLED; +} + +static void prestera_ldr_write(struct prestera_fw *fw, u32 reg, u32 val) +{ + writel(val, PRESTERA_LDR_REG_ADDR(fw, reg)); +} + +static u32 prestera_ldr_read(struct prestera_fw *fw, u32 reg) +{ + return readl(PRESTERA_LDR_REG_ADDR(fw, reg)); +} + +static int prestera_ldr_wait_reg32(struct prestera_fw *fw, + u32 reg, u32 cmp, unsigned int waitms) +{ + u8 __iomem *addr = PRESTERA_LDR_REG_ADDR(fw, reg); + u32 val; + + return readl_poll_timeout(addr, val, cmp == val, + 10 * USEC_PER_MSEC, waitms * USEC_PER_MSEC); +} + +static u32 prestera_ldr_wait_buf(struct prestera_fw *fw, size_t len) +{ + u8 __iomem *addr = PRESTERA_LDR_REG_ADDR(fw, PRESTERA_LDR_BUF_RD_REG); + u32 buf_len = fw->ldr_buf_len; + u32 wr_idx = fw->ldr_wr_idx; + u32 rd_idx; + + return readl_poll_timeout(addr, rd_idx, + CIRC_SPACE(wr_idx, rd_idx, buf_len) >= len, + 1 * USEC_PER_MSEC, 100 * USEC_PER_MSEC); +} + +static int prestera_ldr_wait_dl_finish(struct prestera_fw *fw) +{ + u8 __iomem *addr = PRESTERA_LDR_REG_ADDR(fw, PRESTERA_LDR_STATUS_REG); + unsigned long mask = ~(PRESTERA_LDR_STATUS_IMG_DL); + u32 val; + int err; + + err = readl_poll_timeout(addr, val, val & mask, 10 * USEC_PER_MSEC, + PRESTERA_FW_DL_TIMEOUT_MS * USEC_PER_MSEC); + if (err) { + dev_err(fw->dev.dev, "Timeout to load FW img [state=%d]", + prestera_ldr_read(fw, PRESTERA_LDR_STATUS_REG)); + return err; + } + + return 0; +} + +static void prestera_ldr_wr_idx_move(struct prestera_fw *fw, unsigned int n) +{ + fw->ldr_wr_idx = (fw->ldr_wr_idx + (n)) & (fw->ldr_buf_len - 1); +} + +static void prestera_ldr_wr_idx_commit(struct prestera_fw *fw) +{ + prestera_ldr_write(fw, PRESTERA_LDR_BUF_WR_REG, fw->ldr_wr_idx); +} + +static u8 __iomem *prestera_ldr_wr_ptr(struct prestera_fw *fw) +{ + return fw->ldr_ring_buf + fw->ldr_wr_idx; +} + +static int prestera_ldr_send(struct prestera_fw *fw, const u8 *buf, size_t len) +{ + int err; + int i; + + err = prestera_ldr_wait_buf(fw, len); + if (err) { + dev_err(fw->dev.dev, "failed wait for sending firmware\n"); + return err; + } + + for (i = 0; i < len; i += 4) { + writel_relaxed(*(u32 *)(buf + i), prestera_ldr_wr_ptr(fw)); + prestera_ldr_wr_idx_move(fw, 4); + } + + prestera_ldr_wr_idx_commit(fw); + return 0; +} + +static int prestera_ldr_fw_send(struct prestera_fw *fw, + const char *img, u32 fw_size) +{ + u32 status; + u32 pos; + int err; + + err = prestera_ldr_wait_reg32(fw, PRESTERA_LDR_STATUS_REG, + PRESTERA_LDR_STATUS_IMG_DL, + 5 * MSEC_PER_SEC); + if (err) { + dev_err(fw->dev.dev, "Loader is not ready to load image\n"); + return err; + } + + for (pos = 0; pos < fw_size; pos += PRESTERA_FW_BLK_SZ) { + if (pos + PRESTERA_FW_BLK_SZ > fw_size) + break; + + err = prestera_ldr_send(fw, img + pos, PRESTERA_FW_BLK_SZ); + if (err) + return err; + } + + if (pos < fw_size) { + err = prestera_ldr_send(fw, img + pos, fw_size - pos); + if (err) + return err; + } + + err = prestera_ldr_wait_dl_finish(fw); + if (err) + return err; + + status = prestera_ldr_read(fw, PRESTERA_LDR_STATUS_REG); + + switch (status) { + case PRESTERA_LDR_STATUS_INVALID_IMG: + dev_err(fw->dev.dev, "FW img has bad CRC\n"); + return -EINVAL; + case PRESTERA_LDR_STATUS_NOMEM: + dev_err(fw->dev.dev, "Loader has no enough mem\n"); + return -ENOMEM; + } + + return 0; +} + +static void prestera_fw_rev_parse(const struct prestera_fw_header *hdr, + struct prestera_fw_rev *rev) +{ + u32 version = be32_to_cpu(hdr->version_value); + + rev->maj = PRESTERA_FW_VER_MAJ(version); + rev->min = PRESTERA_FW_VER_MIN(version); + rev->sub = PRESTERA_FW_VER_PATCH(version); +} + +static int prestera_fw_rev_check(struct prestera_fw *fw) +{ + struct prestera_fw_rev *rev = &fw->dev.fw_rev; + u16 maj_supp = PRESTERA_SUPP_FW_MAJ_VER; + u16 min_supp = PRESTERA_SUPP_FW_MIN_VER; + + if (rev->maj == maj_supp && rev->min >= min_supp) + return 0; + + dev_err(fw->dev.dev, "Driver supports FW version only '%u.%u.x'", + PRESTERA_SUPP_FW_MAJ_VER, PRESTERA_SUPP_FW_MIN_VER); + + return -EINVAL; +} + +static int prestera_fw_hdr_parse(struct prestera_fw *fw, + const struct firmware *img) +{ + struct prestera_fw_header *hdr = (struct prestera_fw_header *)img->data; + struct prestera_fw_rev *rev = &fw->dev.fw_rev; + u32 magic; + + magic = be32_to_cpu(hdr->magic_number); + if (magic != PRESTERA_FW_HDR_MAGIC) { + dev_err(fw->dev.dev, "FW img hdr magic is invalid"); + return -EINVAL; + } + + prestera_fw_rev_parse(hdr, rev); + + dev_info(fw->dev.dev, "FW version '%u.%u.%u'\n", + rev->maj, rev->min, rev->sub); + + return prestera_fw_rev_check(fw); +} + +static int prestera_fw_load(struct prestera_fw *fw) +{ + size_t hlen = sizeof(struct prestera_fw_header); + const struct firmware *f; + char fw_path[128]; + int err; + + err = prestera_ldr_wait_reg32(fw, PRESTERA_LDR_READY_REG, + PRESTERA_LDR_READY_MAGIC, + 5 * MSEC_PER_SEC); + if (err) { + dev_err(fw->dev.dev, "waiting for FW loader is timed out"); + return err; + } + + fw->ldr_ring_buf = fw->ldr_regs + + prestera_ldr_read(fw, PRESTERA_LDR_BUF_OFFS_REG); + + fw->ldr_buf_len = + prestera_ldr_read(fw, PRESTERA_LDR_BUF_SIZE_REG); + + fw->ldr_wr_idx = 0; + + snprintf(fw_path, sizeof(fw_path), PRESTERA_FW_PATH_FMT, + PRESTERA_SUPP_FW_MAJ_VER, PRESTERA_SUPP_FW_MIN_VER); + + err = request_firmware_direct(&f, fw_path, fw->dev.dev); + if (err) { + dev_err(fw->dev.dev, "failed to request firmware file\n"); + return err; + } + + err = prestera_fw_hdr_parse(fw, f); + if (err) { + dev_err(fw->dev.dev, "FW image header is invalid\n"); + goto out_release; + } + + prestera_ldr_write(fw, PRESTERA_LDR_IMG_SIZE_REG, f->size - hlen); + prestera_ldr_write(fw, PRESTERA_LDR_CTL_REG, PRESTERA_LDR_CTL_DL_START); + + dev_info(fw->dev.dev, "Loading %s ...", fw_path); + + err = prestera_ldr_fw_send(fw, f->data + hlen, f->size - hlen); + +out_release: + release_firmware(f); + return err; +} + +static int prestera_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + const char *driver_name = pdev->driver->name; + struct prestera_fw *fw; + int err; + + err = pcim_enable_device(pdev); + if (err) + return err; + + err = pcim_iomap_regions(pdev, BIT(PRESTERA_PCI_BAR_FW) | + BIT(PRESTERA_PCI_BAR_PP), + pci_name(pdev)); + if (err) + return err; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(30)); + if (err) { + dev_err(&pdev->dev, "fail to set DMA mask\n"); + goto err_dma_mask; + } + + pci_set_master(pdev); + + fw = devm_kzalloc(&pdev->dev, sizeof(*fw), GFP_KERNEL); + if (!fw) { + err = -ENOMEM; + goto err_pci_dev_alloc; + } + + fw->dev.ctl_regs = pcim_iomap_table(pdev)[PRESTERA_PCI_BAR_FW]; + fw->dev.pp_regs = pcim_iomap_table(pdev)[PRESTERA_PCI_BAR_PP]; + fw->dev.dev = &pdev->dev; + + pci_set_drvdata(pdev, fw); + + err = prestera_fw_init(fw); + if (err) + goto err_prestera_fw_init; + + dev_info(fw->dev.dev, "Prestera FW is ready\n"); + + fw->wq = alloc_workqueue("prestera_fw_wq", WQ_HIGHPRI, 1); + if (!fw->wq) { + err = -ENOMEM; + goto err_wq_alloc; + } + + INIT_WORK(&fw->evt_work, prestera_fw_evt_work_fn); + + err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); + if (err < 0) { + dev_err(&pdev->dev, "MSI IRQ init failed\n"); + goto err_irq_alloc; + } + + err = request_irq(pci_irq_vector(pdev, 0), prestera_pci_irq_handler, + 0, driver_name, fw); + if (err) { + dev_err(&pdev->dev, "fail to request IRQ\n"); + goto err_request_irq; + } + + err = prestera_device_register(&fw->dev); + if (err) + goto err_prestera_dev_register; + + return 0; + +err_prestera_dev_register: + free_irq(pci_irq_vector(pdev, 0), fw); +err_request_irq: + pci_free_irq_vectors(pdev); +err_irq_alloc: + destroy_workqueue(fw->wq); +err_wq_alloc: + prestera_fw_uninit(fw); +err_prestera_fw_init: +err_pci_dev_alloc: +err_dma_mask: + return err; +} + +static void prestera_pci_remove(struct pci_dev *pdev) +{ + struct prestera_fw *fw = pci_get_drvdata(pdev); + + prestera_device_unregister(&fw->dev); + free_irq(pci_irq_vector(pdev, 0), fw); + pci_free_irq_vectors(pdev); + destroy_workqueue(fw->wq); + prestera_fw_uninit(fw); +} + +static const struct pci_device_id prestera_pci_devices[] = { + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xC804) }, + { } +}; +MODULE_DEVICE_TABLE(pci, prestera_pci_devices); + +static struct pci_driver prestera_pci_driver = { + .name = "Prestera DX", + .id_table = prestera_pci_devices, + .probe = prestera_pci_probe, + .remove = prestera_pci_remove, +}; +module_pci_driver(prestera_pci_driver); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Marvell Prestera switch PCI interface"); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c new file mode 100644 index 000000000..59a3ea02b --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c @@ -0,0 +1,825 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */ + +#include <linux/bitfield.h> +#include <linux/dmapool.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +#include "prestera_dsa.h" +#include "prestera.h" +#include "prestera_hw.h" +#include "prestera_rxtx.h" + +#define PRESTERA_SDMA_WAIT_MUL 10 + +struct prestera_sdma_desc { + __le32 word1; + __le32 word2; + __le32 buff; + __le32 next; +} __packed __aligned(16); + +#define PRESTERA_SDMA_BUFF_SIZE_MAX 1544 + +#define PRESTERA_SDMA_RX_DESC_PKT_LEN(desc) \ + ((le32_to_cpu((desc)->word2) >> 16) & GENMASK(13, 0)) + +#define PRESTERA_SDMA_RX_DESC_OWNER(desc) \ + ((le32_to_cpu((desc)->word1) & BIT(31)) >> 31) + +#define PRESTERA_SDMA_RX_DESC_IS_RCVD(desc) \ + (PRESTERA_SDMA_RX_DESC_OWNER(desc) == PRESTERA_SDMA_RX_DESC_CPU_OWN) + +#define PRESTERA_SDMA_RX_DESC_CPU_OWN 0 +#define PRESTERA_SDMA_RX_DESC_DMA_OWN 1 + +#define PRESTERA_SDMA_RX_QUEUE_NUM 8 + +#define PRESTERA_SDMA_RX_DESC_PER_Q 1000 + +#define PRESTERA_SDMA_TX_DESC_PER_Q 1000 +#define PRESTERA_SDMA_TX_MAX_BURST 64 + +#define PRESTERA_SDMA_TX_DESC_OWNER(desc) \ + ((le32_to_cpu((desc)->word1) & BIT(31)) >> 31) + +#define PRESTERA_SDMA_TX_DESC_CPU_OWN 0 +#define PRESTERA_SDMA_TX_DESC_DMA_OWN 1U + +#define PRESTERA_SDMA_TX_DESC_IS_SENT(desc) \ + (PRESTERA_SDMA_TX_DESC_OWNER(desc) == PRESTERA_SDMA_TX_DESC_CPU_OWN) + +#define PRESTERA_SDMA_TX_DESC_LAST BIT(20) +#define PRESTERA_SDMA_TX_DESC_FIRST BIT(21) +#define PRESTERA_SDMA_TX_DESC_CALC_CRC BIT(12) + +#define PRESTERA_SDMA_TX_DESC_SINGLE \ + (PRESTERA_SDMA_TX_DESC_FIRST | PRESTERA_SDMA_TX_DESC_LAST) + +#define PRESTERA_SDMA_TX_DESC_INIT \ + (PRESTERA_SDMA_TX_DESC_SINGLE | PRESTERA_SDMA_TX_DESC_CALC_CRC) + +#define PRESTERA_SDMA_RX_INTR_MASK_REG 0x2814 +#define PRESTERA_SDMA_RX_QUEUE_STATUS_REG 0x2680 +#define PRESTERA_SDMA_RX_QUEUE_DESC_REG(n) (0x260C + (n) * 16) + +#define PRESTERA_SDMA_TX_QUEUE_DESC_REG 0x26C0 +#define PRESTERA_SDMA_TX_QUEUE_START_REG 0x2868 + +struct prestera_sdma_buf { + struct prestera_sdma_desc *desc; + dma_addr_t desc_dma; + struct sk_buff *skb; + dma_addr_t buf_dma; + bool is_used; +}; + +struct prestera_rx_ring { + struct prestera_sdma_buf *bufs; + int next_rx; +}; + +struct prestera_tx_ring { + struct prestera_sdma_buf *bufs; + int next_tx; + int max_burst; + int burst; +}; + +struct prestera_sdma { + struct prestera_rx_ring rx_ring[PRESTERA_SDMA_RX_QUEUE_NUM]; + struct prestera_tx_ring tx_ring; + struct prestera_switch *sw; + struct dma_pool *desc_pool; + struct work_struct tx_work; + struct napi_struct rx_napi; + struct net_device napi_dev; + u32 map_addr; + u64 dma_mask; + /* protect SDMA with concurrrent access from multiple CPUs */ + spinlock_t tx_lock; +}; + +struct prestera_rxtx { + struct prestera_sdma sdma; +}; + +static int prestera_sdma_buf_init(struct prestera_sdma *sdma, + struct prestera_sdma_buf *buf) +{ + struct prestera_sdma_desc *desc; + dma_addr_t dma; + + desc = dma_pool_alloc(sdma->desc_pool, GFP_DMA | GFP_KERNEL, &dma); + if (!desc) + return -ENOMEM; + + buf->buf_dma = DMA_MAPPING_ERROR; + buf->desc_dma = dma; + buf->desc = desc; + buf->skb = NULL; + + return 0; +} + +static u32 prestera_sdma_map(struct prestera_sdma *sdma, dma_addr_t pa) +{ + return sdma->map_addr + pa; +} + +static void prestera_sdma_rx_desc_init(struct prestera_sdma *sdma, + struct prestera_sdma_desc *desc, + dma_addr_t buf) +{ + u32 word = le32_to_cpu(desc->word2); + + u32p_replace_bits(&word, PRESTERA_SDMA_BUFF_SIZE_MAX, GENMASK(15, 0)); + desc->word2 = cpu_to_le32(word); + + desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf)); + + /* make sure buffer is set before reset the descriptor */ + wmb(); + + desc->word1 = cpu_to_le32(0xA0000000); +} + +static void prestera_sdma_rx_desc_set_next(struct prestera_sdma *sdma, + struct prestera_sdma_desc *desc, + dma_addr_t next) +{ + desc->next = cpu_to_le32(prestera_sdma_map(sdma, next)); +} + +static int prestera_sdma_rx_skb_alloc(struct prestera_sdma *sdma, + struct prestera_sdma_buf *buf) +{ + struct device *dev = sdma->sw->dev->dev; + struct sk_buff *skb; + dma_addr_t dma; + + skb = alloc_skb(PRESTERA_SDMA_BUFF_SIZE_MAX, GFP_DMA | GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + dma = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, dma)) + goto err_dma_map; + + if (buf->skb) + dma_unmap_single(dev, buf->buf_dma, buf->skb->len, + DMA_FROM_DEVICE); + + buf->buf_dma = dma; + buf->skb = skb; + + return 0; + +err_dma_map: + kfree_skb(skb); + + return -ENOMEM; +} + +static struct sk_buff *prestera_sdma_rx_skb_get(struct prestera_sdma *sdma, + struct prestera_sdma_buf *buf) +{ + dma_addr_t buf_dma = buf->buf_dma; + struct sk_buff *skb = buf->skb; + u32 len = skb->len; + int err; + + err = prestera_sdma_rx_skb_alloc(sdma, buf); + if (err) { + buf->buf_dma = buf_dma; + buf->skb = skb; + + skb = alloc_skb(skb->len, GFP_ATOMIC); + if (skb) { + skb_put(skb, len); + skb_copy_from_linear_data(buf->skb, skb->data, len); + } + } + + prestera_sdma_rx_desc_init(sdma, buf->desc, buf->buf_dma); + + return skb; +} + +static int prestera_rxtx_process_skb(struct prestera_sdma *sdma, + struct sk_buff *skb) +{ + const struct prestera_port *port; + struct prestera_dsa dsa; + u32 hw_port, dev_id; + int err; + + skb_pull(skb, ETH_HLEN); + + /* ethertype field is part of the dsa header */ + err = prestera_dsa_parse(&dsa, skb->data - ETH_TLEN); + if (err) + return err; + + dev_id = dsa.hw_dev_num; + hw_port = dsa.port_num; + + port = prestera_port_find_by_hwid(sdma->sw, dev_id, hw_port); + if (unlikely(!port)) { + dev_warn_ratelimited(prestera_dev(sdma->sw), "received pkt for non-existent port(%u, %u)\n", + dev_id, hw_port); + return -ENOENT; + } + + if (unlikely(!pskb_may_pull(skb, PRESTERA_DSA_HLEN))) + return -EINVAL; + + /* remove DSA tag and update checksum */ + skb_pull_rcsum(skb, PRESTERA_DSA_HLEN); + + memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - PRESTERA_DSA_HLEN, + ETH_ALEN * 2); + + skb_push(skb, ETH_HLEN); + + skb->protocol = eth_type_trans(skb, port->dev); + + if (dsa.vlan.is_tagged) { + u16 tci = dsa.vlan.vid & VLAN_VID_MASK; + + tci |= dsa.vlan.vpt << VLAN_PRIO_SHIFT; + if (dsa.vlan.cfi_bit) + tci |= VLAN_CFI_MASK; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tci); + } + + return 0; +} + +static int prestera_sdma_next_rx_buf_idx(int buf_idx) +{ + return (buf_idx + 1) % PRESTERA_SDMA_RX_DESC_PER_Q; +} + +static int prestera_sdma_rx_poll(struct napi_struct *napi, int budget) +{ + int qnum = PRESTERA_SDMA_RX_QUEUE_NUM; + unsigned int rxq_done_map = 0; + struct prestera_sdma *sdma; + struct list_head rx_list; + unsigned int qmask; + int pkts_done = 0; + int q; + + qnum = PRESTERA_SDMA_RX_QUEUE_NUM; + qmask = GENMASK(qnum - 1, 0); + + INIT_LIST_HEAD(&rx_list); + + sdma = container_of(napi, struct prestera_sdma, rx_napi); + + while (pkts_done < budget && rxq_done_map != qmask) { + for (q = 0; q < qnum && pkts_done < budget; q++) { + struct prestera_rx_ring *ring = &sdma->rx_ring[q]; + struct prestera_sdma_desc *desc; + struct prestera_sdma_buf *buf; + int buf_idx = ring->next_rx; + struct sk_buff *skb; + + buf = &ring->bufs[buf_idx]; + desc = buf->desc; + + if (PRESTERA_SDMA_RX_DESC_IS_RCVD(desc)) { + rxq_done_map &= ~BIT(q); + } else { + rxq_done_map |= BIT(q); + continue; + } + + pkts_done++; + + __skb_trim(buf->skb, PRESTERA_SDMA_RX_DESC_PKT_LEN(desc)); + + skb = prestera_sdma_rx_skb_get(sdma, buf); + if (!skb) + goto rx_next_buf; + + if (unlikely(prestera_rxtx_process_skb(sdma, skb))) + goto rx_next_buf; + + list_add_tail(&skb->list, &rx_list); +rx_next_buf: + ring->next_rx = prestera_sdma_next_rx_buf_idx(buf_idx); + } + } + + if (pkts_done < budget && napi_complete_done(napi, pkts_done)) + prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG, + GENMASK(9, 2)); + + netif_receive_skb_list(&rx_list); + + return pkts_done; +} + +static void prestera_sdma_rx_fini(struct prestera_sdma *sdma) +{ + int qnum = PRESTERA_SDMA_RX_QUEUE_NUM; + int q, b; + + /* disable all rx queues */ + prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG, + GENMASK(15, 8)); + + for (q = 0; q < qnum; q++) { + struct prestera_rx_ring *ring = &sdma->rx_ring[q]; + + if (!ring->bufs) + break; + + for (b = 0; b < PRESTERA_SDMA_RX_DESC_PER_Q; b++) { + struct prestera_sdma_buf *buf = &ring->bufs[b]; + + if (buf->desc_dma) + dma_pool_free(sdma->desc_pool, buf->desc, + buf->desc_dma); + + if (!buf->skb) + continue; + + if (buf->buf_dma != DMA_MAPPING_ERROR) + dma_unmap_single(sdma->sw->dev->dev, + buf->buf_dma, buf->skb->len, + DMA_FROM_DEVICE); + kfree_skb(buf->skb); + } + } +} + +static int prestera_sdma_rx_init(struct prestera_sdma *sdma) +{ + int bnum = PRESTERA_SDMA_RX_DESC_PER_Q; + int qnum = PRESTERA_SDMA_RX_QUEUE_NUM; + int err; + int q; + + /* disable all rx queues */ + prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG, + GENMASK(15, 8)); + + for (q = 0; q < qnum; q++) { + struct prestera_sdma_buf *head, *tail, *next, *prev; + struct prestera_rx_ring *ring = &sdma->rx_ring[q]; + + ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL); + if (!ring->bufs) + return -ENOMEM; + + ring->next_rx = 0; + + tail = &ring->bufs[bnum - 1]; + head = &ring->bufs[0]; + next = head; + prev = next; + + do { + err = prestera_sdma_buf_init(sdma, next); + if (err) + return err; + + err = prestera_sdma_rx_skb_alloc(sdma, next); + if (err) + return err; + + prestera_sdma_rx_desc_init(sdma, next->desc, + next->buf_dma); + + prestera_sdma_rx_desc_set_next(sdma, prev->desc, + next->desc_dma); + + prev = next; + next++; + } while (prev != tail); + + /* join tail with head to make a circular list */ + prestera_sdma_rx_desc_set_next(sdma, tail->desc, head->desc_dma); + + prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_DESC_REG(q), + prestera_sdma_map(sdma, head->desc_dma)); + } + + /* make sure all rx descs are filled before enabling all rx queues */ + wmb(); + + prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG, + GENMASK(7, 0)); + + return 0; +} + +static void prestera_sdma_tx_desc_init(struct prestera_sdma *sdma, + struct prestera_sdma_desc *desc) +{ + desc->word1 = cpu_to_le32(PRESTERA_SDMA_TX_DESC_INIT); + desc->word2 = 0; +} + +static void prestera_sdma_tx_desc_set_next(struct prestera_sdma *sdma, + struct prestera_sdma_desc *desc, + dma_addr_t next) +{ + desc->next = cpu_to_le32(prestera_sdma_map(sdma, next)); +} + +static void prestera_sdma_tx_desc_set_buf(struct prestera_sdma *sdma, + struct prestera_sdma_desc *desc, + dma_addr_t buf, size_t len) +{ + u32 word = le32_to_cpu(desc->word2); + + u32p_replace_bits(&word, len + ETH_FCS_LEN, GENMASK(30, 16)); + + desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf)); + desc->word2 = cpu_to_le32(word); +} + +static void prestera_sdma_tx_desc_xmit(struct prestera_sdma_desc *desc) +{ + u32 word = le32_to_cpu(desc->word1); + + word |= PRESTERA_SDMA_TX_DESC_DMA_OWN << 31; + + /* make sure everything is written before enable xmit */ + wmb(); + + desc->word1 = cpu_to_le32(word); +} + +static int prestera_sdma_tx_buf_map(struct prestera_sdma *sdma, + struct prestera_sdma_buf *buf, + struct sk_buff *skb) +{ + struct device *dma_dev = sdma->sw->dev->dev; + dma_addr_t dma; + + dma = dma_map_single(dma_dev, skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, dma)) + return -ENOMEM; + + buf->buf_dma = dma; + buf->skb = skb; + + return 0; +} + +static void prestera_sdma_tx_buf_unmap(struct prestera_sdma *sdma, + struct prestera_sdma_buf *buf) +{ + struct device *dma_dev = sdma->sw->dev->dev; + + dma_unmap_single(dma_dev, buf->buf_dma, buf->skb->len, DMA_TO_DEVICE); +} + +static void prestera_sdma_tx_recycle_work_fn(struct work_struct *work) +{ + int bnum = PRESTERA_SDMA_TX_DESC_PER_Q; + struct prestera_tx_ring *tx_ring; + struct prestera_sdma *sdma; + int b; + + sdma = container_of(work, struct prestera_sdma, tx_work); + + tx_ring = &sdma->tx_ring; + + for (b = 0; b < bnum; b++) { + struct prestera_sdma_buf *buf = &tx_ring->bufs[b]; + + if (!buf->is_used) + continue; + + if (!PRESTERA_SDMA_TX_DESC_IS_SENT(buf->desc)) + continue; + + prestera_sdma_tx_buf_unmap(sdma, buf); + dev_consume_skb_any(buf->skb); + buf->skb = NULL; + + /* make sure everything is cleaned up */ + wmb(); + + buf->is_used = false; + } +} + +static int prestera_sdma_tx_init(struct prestera_sdma *sdma) +{ + struct prestera_sdma_buf *head, *tail, *next, *prev; + struct prestera_tx_ring *tx_ring = &sdma->tx_ring; + int bnum = PRESTERA_SDMA_TX_DESC_PER_Q; + int err; + + INIT_WORK(&sdma->tx_work, prestera_sdma_tx_recycle_work_fn); + spin_lock_init(&sdma->tx_lock); + + tx_ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL); + if (!tx_ring->bufs) + return -ENOMEM; + + tail = &tx_ring->bufs[bnum - 1]; + head = &tx_ring->bufs[0]; + next = head; + prev = next; + + tx_ring->max_burst = PRESTERA_SDMA_TX_MAX_BURST; + tx_ring->burst = tx_ring->max_burst; + tx_ring->next_tx = 0; + + do { + err = prestera_sdma_buf_init(sdma, next); + if (err) + return err; + + next->is_used = false; + + prestera_sdma_tx_desc_init(sdma, next->desc); + + prestera_sdma_tx_desc_set_next(sdma, prev->desc, + next->desc_dma); + + prev = next; + next++; + } while (prev != tail); + + /* join tail with head to make a circular list */ + prestera_sdma_tx_desc_set_next(sdma, tail->desc, head->desc_dma); + + /* make sure descriptors are written */ + wmb(); + + prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_DESC_REG, + prestera_sdma_map(sdma, head->desc_dma)); + + return 0; +} + +static void prestera_sdma_tx_fini(struct prestera_sdma *sdma) +{ + struct prestera_tx_ring *ring = &sdma->tx_ring; + int bnum = PRESTERA_SDMA_TX_DESC_PER_Q; + int b; + + cancel_work_sync(&sdma->tx_work); + + if (!ring->bufs) + return; + + for (b = 0; b < bnum; b++) { + struct prestera_sdma_buf *buf = &ring->bufs[b]; + + if (buf->desc) + dma_pool_free(sdma->desc_pool, buf->desc, + buf->desc_dma); + + if (!buf->skb) + continue; + + dma_unmap_single(sdma->sw->dev->dev, buf->buf_dma, + buf->skb->len, DMA_TO_DEVICE); + + dev_consume_skb_any(buf->skb); + } +} + +static void prestera_rxtx_handle_event(struct prestera_switch *sw, + struct prestera_event *evt, + void *arg) +{ + struct prestera_sdma *sdma = arg; + + if (evt->id != PRESTERA_RXTX_EVENT_RCV_PKT) + return; + + prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG, 0); + napi_schedule(&sdma->rx_napi); +} + +static int prestera_sdma_switch_init(struct prestera_switch *sw) +{ + struct prestera_sdma *sdma = &sw->rxtx->sdma; + struct device *dev = sw->dev->dev; + struct prestera_rxtx_params p; + int err; + + p.use_sdma = true; + + err = prestera_hw_rxtx_init(sw, &p); + if (err) { + dev_err(dev, "failed to init rxtx by hw\n"); + return err; + } + + sdma->dma_mask = dma_get_mask(dev); + sdma->map_addr = p.map_addr; + sdma->sw = sw; + + sdma->desc_pool = dma_pool_create("desc_pool", dev, + sizeof(struct prestera_sdma_desc), + 16, 0); + if (!sdma->desc_pool) + return -ENOMEM; + + err = prestera_sdma_rx_init(sdma); + if (err) { + dev_err(dev, "failed to init rx ring\n"); + goto err_rx_init; + } + + err = prestera_sdma_tx_init(sdma); + if (err) { + dev_err(dev, "failed to init tx ring\n"); + goto err_tx_init; + } + + err = prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_RXTX, + prestera_rxtx_handle_event, + sdma); + if (err) + goto err_evt_register; + + init_dummy_netdev(&sdma->napi_dev); + + netif_napi_add(&sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll, 64); + napi_enable(&sdma->rx_napi); + + return 0; + +err_evt_register: +err_tx_init: + prestera_sdma_tx_fini(sdma); +err_rx_init: + prestera_sdma_rx_fini(sdma); + + dma_pool_destroy(sdma->desc_pool); + return err; +} + +static void prestera_sdma_switch_fini(struct prestera_switch *sw) +{ + struct prestera_sdma *sdma = &sw->rxtx->sdma; + + napi_disable(&sdma->rx_napi); + netif_napi_del(&sdma->rx_napi); + prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_RXTX, + prestera_rxtx_handle_event); + prestera_sdma_tx_fini(sdma); + prestera_sdma_rx_fini(sdma); + dma_pool_destroy(sdma->desc_pool); +} + +static bool prestera_sdma_is_ready(struct prestera_sdma *sdma) +{ + return !(prestera_read(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG) & 1); +} + +static int prestera_sdma_tx_wait(struct prestera_sdma *sdma, + struct prestera_tx_ring *tx_ring) +{ + int tx_wait_num = PRESTERA_SDMA_WAIT_MUL * tx_ring->max_burst; + + do { + if (prestera_sdma_is_ready(sdma)) + return 0; + + udelay(1); + } while (--tx_wait_num); + + return -EBUSY; +} + +static void prestera_sdma_tx_start(struct prestera_sdma *sdma) +{ + prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG, 1); + schedule_work(&sdma->tx_work); +} + +static netdev_tx_t prestera_sdma_xmit(struct prestera_sdma *sdma, + struct sk_buff *skb) +{ + struct device *dma_dev = sdma->sw->dev->dev; + struct net_device *dev = skb->dev; + struct prestera_tx_ring *tx_ring; + struct prestera_sdma_buf *buf; + int err; + + spin_lock(&sdma->tx_lock); + + tx_ring = &sdma->tx_ring; + + buf = &tx_ring->bufs[tx_ring->next_tx]; + if (buf->is_used) { + schedule_work(&sdma->tx_work); + goto drop_skb; + } + + if (unlikely(eth_skb_pad(skb))) + goto drop_skb_nofree; + + err = prestera_sdma_tx_buf_map(sdma, buf, skb); + if (err) + goto drop_skb; + + prestera_sdma_tx_desc_set_buf(sdma, buf->desc, buf->buf_dma, skb->len); + + dma_sync_single_for_device(dma_dev, buf->buf_dma, skb->len, + DMA_TO_DEVICE); + + if (tx_ring->burst) { + tx_ring->burst--; + } else { + tx_ring->burst = tx_ring->max_burst; + + err = prestera_sdma_tx_wait(sdma, tx_ring); + if (err) + goto drop_skb_unmap; + } + + tx_ring->next_tx = (tx_ring->next_tx + 1) % PRESTERA_SDMA_TX_DESC_PER_Q; + prestera_sdma_tx_desc_xmit(buf->desc); + buf->is_used = true; + + prestera_sdma_tx_start(sdma); + + goto tx_done; + +drop_skb_unmap: + prestera_sdma_tx_buf_unmap(sdma, buf); +drop_skb: + dev_consume_skb_any(skb); +drop_skb_nofree: + dev->stats.tx_dropped++; +tx_done: + spin_unlock(&sdma->tx_lock); + return NETDEV_TX_OK; +} + +int prestera_rxtx_switch_init(struct prestera_switch *sw) +{ + struct prestera_rxtx *rxtx; + int err; + + rxtx = kzalloc(sizeof(*rxtx), GFP_KERNEL); + if (!rxtx) + return -ENOMEM; + + sw->rxtx = rxtx; + + err = prestera_sdma_switch_init(sw); + if (err) + kfree(rxtx); + + return err; +} + +void prestera_rxtx_switch_fini(struct prestera_switch *sw) +{ + prestera_sdma_switch_fini(sw); + kfree(sw->rxtx); +} + +int prestera_rxtx_port_init(struct prestera_port *port) +{ + int err; + + err = prestera_hw_rxtx_port_init(port); + if (err) + return err; + + port->dev->needed_headroom = PRESTERA_DSA_HLEN; + + return 0; +} + +netdev_tx_t prestera_rxtx_xmit(struct prestera_port *port, struct sk_buff *skb) +{ + struct prestera_dsa dsa; + + dsa.hw_dev_num = port->dev_id; + dsa.port_num = port->hw_id; + + if (skb_cow_head(skb, PRESTERA_DSA_HLEN) < 0) + return NET_XMIT_DROP; + + skb_push(skb, PRESTERA_DSA_HLEN); + memmove(skb->data, skb->data + PRESTERA_DSA_HLEN, 2 * ETH_ALEN); + + if (prestera_dsa_build(&dsa, skb->data + 2 * ETH_ALEN) != 0) + return NET_XMIT_DROP; + + return prestera_sdma_xmit(&port->sw->rxtx->sdma, skb); +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.h b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.h new file mode 100644 index 000000000..882a1225c --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */ + +#ifndef _PRESTERA_RXTX_H_ +#define _PRESTERA_RXTX_H_ + +#include <linux/netdevice.h> + +struct prestera_switch; +struct prestera_port; + +int prestera_rxtx_switch_init(struct prestera_switch *sw); +void prestera_rxtx_switch_fini(struct prestera_switch *sw); + +int prestera_rxtx_port_init(struct prestera_port *port); + +netdev_tx_t prestera_rxtx_xmit(struct prestera_port *port, struct sk_buff *skb); + +#endif /* _PRESTERA_RXTX_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c new file mode 100644 index 000000000..9101d00e9 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c @@ -0,0 +1,1275 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */ + +#include <linux/if_bridge.h> +#include <linux/if_vlan.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/notifier.h> +#include <net/netevent.h> +#include <net/switchdev.h> + +#include "prestera.h" +#include "prestera_hw.h" +#include "prestera_switchdev.h" + +#define PRESTERA_VID_ALL (0xffff) + +#define PRESTERA_DEFAULT_AGEING_TIME_MS 300000 +#define PRESTERA_MAX_AGEING_TIME_MS 1000000000 +#define PRESTERA_MIN_AGEING_TIME_MS 32000 + +struct prestera_fdb_event_work { + struct work_struct work; + struct switchdev_notifier_fdb_info fdb_info; + struct net_device *dev; + unsigned long event; +}; + +struct prestera_switchdev { + struct prestera_switch *sw; + struct list_head bridge_list; + bool bridge_8021q_exists; + struct notifier_block swdev_nb_blk; + struct notifier_block swdev_nb; +}; + +struct prestera_bridge { + struct list_head head; + struct net_device *dev; + struct prestera_switchdev *swdev; + struct list_head port_list; + bool vlan_enabled; + u16 bridge_id; +}; + +struct prestera_bridge_port { + struct list_head head; + struct net_device *dev; + struct prestera_bridge *bridge; + struct list_head vlan_list; + refcount_t ref_count; + unsigned long flags; + u8 stp_state; +}; + +struct prestera_bridge_vlan { + struct list_head head; + struct list_head port_vlan_list; + u16 vid; +}; + +struct prestera_port_vlan { + struct list_head br_vlan_head; + struct list_head port_head; + struct prestera_port *port; + struct prestera_bridge_port *br_port; + u16 vid; +}; + +static struct workqueue_struct *swdev_wq; + +static void prestera_bridge_port_put(struct prestera_bridge_port *br_port); + +static int prestera_port_vid_stp_set(struct prestera_port *port, u16 vid, + u8 state); + +static struct prestera_bridge_vlan * +prestera_bridge_vlan_create(struct prestera_bridge_port *br_port, u16 vid) +{ + struct prestera_bridge_vlan *br_vlan; + + br_vlan = kzalloc(sizeof(*br_vlan), GFP_KERNEL); + if (!br_vlan) + return NULL; + + INIT_LIST_HEAD(&br_vlan->port_vlan_list); + br_vlan->vid = vid; + list_add(&br_vlan->head, &br_port->vlan_list); + + return br_vlan; +} + +static void prestera_bridge_vlan_destroy(struct prestera_bridge_vlan *br_vlan) +{ + list_del(&br_vlan->head); + WARN_ON(!list_empty(&br_vlan->port_vlan_list)); + kfree(br_vlan); +} + +static struct prestera_bridge_vlan * +prestera_bridge_vlan_by_vid(struct prestera_bridge_port *br_port, u16 vid) +{ + struct prestera_bridge_vlan *br_vlan; + + list_for_each_entry(br_vlan, &br_port->vlan_list, head) { + if (br_vlan->vid == vid) + return br_vlan; + } + + return NULL; +} + +static int prestera_bridge_vlan_port_count(struct prestera_bridge *bridge, + u16 vid) +{ + struct prestera_bridge_port *br_port; + struct prestera_bridge_vlan *br_vlan; + int count = 0; + + list_for_each_entry(br_port, &bridge->port_list, head) { + list_for_each_entry(br_vlan, &br_port->vlan_list, head) { + if (br_vlan->vid == vid) { + count += 1; + break; + } + } + } + + return count; +} + +static void prestera_bridge_vlan_put(struct prestera_bridge_vlan *br_vlan) +{ + if (list_empty(&br_vlan->port_vlan_list)) + prestera_bridge_vlan_destroy(br_vlan); +} + +static struct prestera_port_vlan * +prestera_port_vlan_by_vid(struct prestera_port *port, u16 vid) +{ + struct prestera_port_vlan *port_vlan; + + list_for_each_entry(port_vlan, &port->vlans_list, port_head) { + if (port_vlan->vid == vid) + return port_vlan; + } + + return NULL; +} + +static struct prestera_port_vlan * +prestera_port_vlan_create(struct prestera_port *port, u16 vid, bool untagged) +{ + struct prestera_port_vlan *port_vlan; + int err; + + port_vlan = prestera_port_vlan_by_vid(port, vid); + if (port_vlan) + return ERR_PTR(-EEXIST); + + err = prestera_hw_vlan_port_set(port, vid, true, untagged); + if (err) + return ERR_PTR(err); + + port_vlan = kzalloc(sizeof(*port_vlan), GFP_KERNEL); + if (!port_vlan) { + err = -ENOMEM; + goto err_port_vlan_alloc; + } + + port_vlan->port = port; + port_vlan->vid = vid; + + list_add(&port_vlan->port_head, &port->vlans_list); + + return port_vlan; + +err_port_vlan_alloc: + prestera_hw_vlan_port_set(port, vid, false, false); + return ERR_PTR(err); +} + +static void +prestera_port_vlan_bridge_leave(struct prestera_port_vlan *port_vlan) +{ + u32 fdb_flush_mode = PRESTERA_FDB_FLUSH_MODE_DYNAMIC; + struct prestera_port *port = port_vlan->port; + struct prestera_bridge_vlan *br_vlan; + struct prestera_bridge_port *br_port; + bool last_port, last_vlan; + u16 vid = port_vlan->vid; + int port_count; + + br_port = port_vlan->br_port; + port_count = prestera_bridge_vlan_port_count(br_port->bridge, vid); + br_vlan = prestera_bridge_vlan_by_vid(br_port, vid); + + last_vlan = list_is_singular(&br_port->vlan_list); + last_port = port_count == 1; + + if (last_vlan) + prestera_hw_fdb_flush_port(port, fdb_flush_mode); + else if (last_port) + prestera_hw_fdb_flush_vlan(port->sw, vid, fdb_flush_mode); + else + prestera_hw_fdb_flush_port_vlan(port, vid, fdb_flush_mode); + + list_del(&port_vlan->br_vlan_head); + prestera_bridge_vlan_put(br_vlan); + prestera_bridge_port_put(br_port); + port_vlan->br_port = NULL; +} + +static void prestera_port_vlan_destroy(struct prestera_port_vlan *port_vlan) +{ + struct prestera_port *port = port_vlan->port; + u16 vid = port_vlan->vid; + + if (port_vlan->br_port) + prestera_port_vlan_bridge_leave(port_vlan); + + prestera_hw_vlan_port_set(port, vid, false, false); + list_del(&port_vlan->port_head); + kfree(port_vlan); +} + +static struct prestera_bridge * +prestera_bridge_create(struct prestera_switchdev *swdev, struct net_device *dev) +{ + bool vlan_enabled = br_vlan_enabled(dev); + struct prestera_bridge *bridge; + u16 bridge_id; + int err; + + if (vlan_enabled && swdev->bridge_8021q_exists) { + netdev_err(dev, "Only one VLAN-aware bridge is supported\n"); + return ERR_PTR(-EINVAL); + } + + bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); + if (!bridge) + return ERR_PTR(-ENOMEM); + + if (vlan_enabled) { + swdev->bridge_8021q_exists = true; + } else { + err = prestera_hw_bridge_create(swdev->sw, &bridge_id); + if (err) { + kfree(bridge); + return ERR_PTR(err); + } + + bridge->bridge_id = bridge_id; + } + + bridge->vlan_enabled = vlan_enabled; + bridge->swdev = swdev; + bridge->dev = dev; + + INIT_LIST_HEAD(&bridge->port_list); + + list_add(&bridge->head, &swdev->bridge_list); + + return bridge; +} + +static void prestera_bridge_destroy(struct prestera_bridge *bridge) +{ + struct prestera_switchdev *swdev = bridge->swdev; + + list_del(&bridge->head); + + if (bridge->vlan_enabled) + swdev->bridge_8021q_exists = false; + else + prestera_hw_bridge_delete(swdev->sw, bridge->bridge_id); + + WARN_ON(!list_empty(&bridge->port_list)); + kfree(bridge); +} + +static void prestera_bridge_put(struct prestera_bridge *bridge) +{ + if (list_empty(&bridge->port_list)) + prestera_bridge_destroy(bridge); +} + +static +struct prestera_bridge *prestera_bridge_by_dev(struct prestera_switchdev *swdev, + const struct net_device *dev) +{ + struct prestera_bridge *bridge; + + list_for_each_entry(bridge, &swdev->bridge_list, head) + if (bridge->dev == dev) + return bridge; + + return NULL; +} + +static struct prestera_bridge_port * +__prestera_bridge_port_by_dev(struct prestera_bridge *bridge, + struct net_device *dev) +{ + struct prestera_bridge_port *br_port; + + list_for_each_entry(br_port, &bridge->port_list, head) { + if (br_port->dev == dev) + return br_port; + } + + return NULL; +} + +static struct prestera_bridge_port * +prestera_bridge_port_by_dev(struct prestera_switchdev *swdev, + struct net_device *dev) +{ + struct net_device *br_dev = netdev_master_upper_dev_get(dev); + struct prestera_bridge *bridge; + + if (!br_dev) + return NULL; + + bridge = prestera_bridge_by_dev(swdev, br_dev); + if (!bridge) + return NULL; + + return __prestera_bridge_port_by_dev(bridge, dev); +} + +static struct prestera_bridge_port * +prestera_bridge_port_create(struct prestera_bridge *bridge, + struct net_device *dev) +{ + struct prestera_bridge_port *br_port; + + br_port = kzalloc(sizeof(*br_port), GFP_KERNEL); + if (!br_port) + return NULL; + + br_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC | + BR_MCAST_FLOOD; + br_port->stp_state = BR_STATE_DISABLED; + refcount_set(&br_port->ref_count, 1); + br_port->bridge = bridge; + br_port->dev = dev; + + INIT_LIST_HEAD(&br_port->vlan_list); + list_add(&br_port->head, &bridge->port_list); + + return br_port; +} + +static void +prestera_bridge_port_destroy(struct prestera_bridge_port *br_port) +{ + list_del(&br_port->head); + WARN_ON(!list_empty(&br_port->vlan_list)); + kfree(br_port); +} + +static void prestera_bridge_port_get(struct prestera_bridge_port *br_port) +{ + refcount_inc(&br_port->ref_count); +} + +static void prestera_bridge_port_put(struct prestera_bridge_port *br_port) +{ + struct prestera_bridge *bridge = br_port->bridge; + + if (refcount_dec_and_test(&br_port->ref_count)) { + prestera_bridge_port_destroy(br_port); + prestera_bridge_put(bridge); + } +} + +static struct prestera_bridge_port * +prestera_bridge_port_add(struct prestera_bridge *bridge, struct net_device *dev) +{ + struct prestera_bridge_port *br_port; + + br_port = __prestera_bridge_port_by_dev(bridge, dev); + if (br_port) { + prestera_bridge_port_get(br_port); + return br_port; + } + + br_port = prestera_bridge_port_create(bridge, dev); + if (!br_port) + return ERR_PTR(-ENOMEM); + + return br_port; +} + +static int +prestera_bridge_1d_port_join(struct prestera_bridge_port *br_port) +{ + struct prestera_port *port = netdev_priv(br_port->dev); + struct prestera_bridge *bridge = br_port->bridge; + int err; + + err = prestera_hw_bridge_port_add(port, bridge->bridge_id); + if (err) + return err; + + err = prestera_hw_port_flood_set(port, br_port->flags & BR_FLOOD); + if (err) + goto err_port_flood_set; + + err = prestera_hw_port_learning_set(port, br_port->flags & BR_LEARNING); + if (err) + goto err_port_learning_set; + + return 0; + +err_port_learning_set: + prestera_hw_port_flood_set(port, false); +err_port_flood_set: + prestera_hw_bridge_port_delete(port, bridge->bridge_id); + + return err; +} + +static int prestera_port_bridge_join(struct prestera_port *port, + struct net_device *upper) +{ + struct prestera_switchdev *swdev = port->sw->swdev; + struct prestera_bridge_port *br_port; + struct prestera_bridge *bridge; + int err; + + bridge = prestera_bridge_by_dev(swdev, upper); + if (!bridge) { + bridge = prestera_bridge_create(swdev, upper); + if (IS_ERR(bridge)) + return PTR_ERR(bridge); + } + + br_port = prestera_bridge_port_add(bridge, port->dev); + if (IS_ERR(br_port)) { + prestera_bridge_put(bridge); + return PTR_ERR(br_port); + } + + if (bridge->vlan_enabled) + return 0; + + err = prestera_bridge_1d_port_join(br_port); + if (err) + goto err_port_join; + + return 0; + +err_port_join: + prestera_bridge_port_put(br_port); + return err; +} + +static void prestera_bridge_1q_port_leave(struct prestera_bridge_port *br_port) +{ + struct prestera_port *port = netdev_priv(br_port->dev); + + prestera_hw_fdb_flush_port(port, PRESTERA_FDB_FLUSH_MODE_ALL); + prestera_port_pvid_set(port, PRESTERA_DEFAULT_VID); +} + +static void prestera_bridge_1d_port_leave(struct prestera_bridge_port *br_port) +{ + struct prestera_port *port = netdev_priv(br_port->dev); + + prestera_hw_fdb_flush_port(port, PRESTERA_FDB_FLUSH_MODE_ALL); + prestera_hw_bridge_port_delete(port, br_port->bridge->bridge_id); +} + +static int prestera_port_vid_stp_set(struct prestera_port *port, u16 vid, + u8 state) +{ + u8 hw_state = state; + + switch (state) { + case BR_STATE_DISABLED: + hw_state = PRESTERA_STP_DISABLED; + break; + + case BR_STATE_BLOCKING: + case BR_STATE_LISTENING: + hw_state = PRESTERA_STP_BLOCK_LISTEN; + break; + + case BR_STATE_LEARNING: + hw_state = PRESTERA_STP_LEARN; + break; + + case BR_STATE_FORWARDING: + hw_state = PRESTERA_STP_FORWARD; + break; + + default: + return -EINVAL; + } + + return prestera_hw_vlan_port_stp_set(port, vid, hw_state); +} + +static void prestera_port_bridge_leave(struct prestera_port *port, + struct net_device *upper) +{ + struct prestera_switchdev *swdev = port->sw->swdev; + struct prestera_bridge_port *br_port; + struct prestera_bridge *bridge; + + bridge = prestera_bridge_by_dev(swdev, upper); + if (!bridge) + return; + + br_port = __prestera_bridge_port_by_dev(bridge, port->dev); + if (!br_port) + return; + + bridge = br_port->bridge; + + if (bridge->vlan_enabled) + prestera_bridge_1q_port_leave(br_port); + else + prestera_bridge_1d_port_leave(br_port); + + prestera_hw_port_learning_set(port, false); + prestera_hw_port_flood_set(port, false); + prestera_port_vid_stp_set(port, PRESTERA_VID_ALL, BR_STATE_FORWARDING); + prestera_bridge_port_put(br_port); +} + +int prestera_bridge_port_event(struct net_device *dev, unsigned long event, + void *ptr) +{ + struct netdev_notifier_changeupper_info *info = ptr; + struct netlink_ext_ack *extack; + struct prestera_port *port; + struct net_device *upper; + int err; + + extack = netdev_notifier_info_to_extack(&info->info); + port = netdev_priv(dev); + upper = info->upper_dev; + + switch (event) { + case NETDEV_PRECHANGEUPPER: + if (!netif_is_bridge_master(upper)) { + NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); + return -EINVAL; + } + + if (!info->linking) + break; + + if (netdev_has_any_upper_dev(upper)) { + NL_SET_ERR_MSG_MOD(extack, "Upper device is already enslaved"); + return -EINVAL; + } + break; + + case NETDEV_CHANGEUPPER: + if (!netif_is_bridge_master(upper)) + break; + + if (info->linking) { + err = prestera_port_bridge_join(port, upper); + if (err) + return err; + } else { + prestera_port_bridge_leave(port, upper); + } + break; + } + + return 0; +} + +static int prestera_port_attr_br_flags_set(struct prestera_port *port, + struct switchdev_trans *trans, + struct net_device *dev, + unsigned long flags) +{ + struct prestera_bridge_port *br_port; + int err; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev); + if (!br_port) + return 0; + + err = prestera_hw_port_flood_set(port, flags & BR_FLOOD); + if (err) + return err; + + err = prestera_hw_port_learning_set(port, flags & BR_LEARNING); + if (err) + return err; + + memcpy(&br_port->flags, &flags, sizeof(flags)); + + return 0; +} + +static int prestera_port_attr_br_ageing_set(struct prestera_port *port, + struct switchdev_trans *trans, + unsigned long ageing_clock_t) +{ + unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); + u32 ageing_time_ms = jiffies_to_msecs(ageing_jiffies); + struct prestera_switch *sw = port->sw; + + if (switchdev_trans_ph_prepare(trans)) { + if (ageing_time_ms < PRESTERA_MIN_AGEING_TIME_MS || + ageing_time_ms > PRESTERA_MAX_AGEING_TIME_MS) + return -ERANGE; + else + return 0; + } + + return prestera_hw_switch_ageing_set(sw, ageing_time_ms); +} + +static int prestera_port_attr_br_vlan_set(struct prestera_port *port, + struct switchdev_trans *trans, + struct net_device *dev, + bool vlan_enabled) +{ + struct prestera_switch *sw = port->sw; + struct prestera_bridge *bridge; + + if (!switchdev_trans_ph_prepare(trans)) + return 0; + + bridge = prestera_bridge_by_dev(sw->swdev, dev); + if (WARN_ON(!bridge)) + return -EINVAL; + + if (bridge->vlan_enabled == vlan_enabled) + return 0; + + netdev_err(bridge->dev, "VLAN filtering can't be changed for existing bridge\n"); + + return -EINVAL; +} + +static int prestera_port_bridge_vlan_stp_set(struct prestera_port *port, + struct prestera_bridge_vlan *br_vlan, + u8 state) +{ + struct prestera_port_vlan *port_vlan; + + list_for_each_entry(port_vlan, &br_vlan->port_vlan_list, br_vlan_head) { + if (port_vlan->port != port) + continue; + + return prestera_port_vid_stp_set(port, br_vlan->vid, state); + } + + return 0; +} + +static int presterar_port_attr_stp_state_set(struct prestera_port *port, + struct switchdev_trans *trans, + struct net_device *dev, + u8 state) +{ + struct prestera_bridge_port *br_port; + struct prestera_bridge_vlan *br_vlan; + int err; + u16 vid; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev); + if (!br_port) + return 0; + + if (!br_port->bridge->vlan_enabled) { + vid = br_port->bridge->bridge_id; + err = prestera_port_vid_stp_set(port, vid, state); + if (err) + goto err_port_stp_set; + } else { + list_for_each_entry(br_vlan, &br_port->vlan_list, head) { + err = prestera_port_bridge_vlan_stp_set(port, br_vlan, + state); + if (err) + goto err_port_vlan_stp_set; + } + } + + br_port->stp_state = state; + + return 0; + +err_port_vlan_stp_set: + list_for_each_entry_continue_reverse(br_vlan, &br_port->vlan_list, head) + prestera_port_bridge_vlan_stp_set(port, br_vlan, br_port->stp_state); + return err; + +err_port_stp_set: + prestera_port_vid_stp_set(port, vid, br_port->stp_state); + + return err; +} + +static int prestera_port_obj_attr_set(struct net_device *dev, + const struct switchdev_attr *attr, + struct switchdev_trans *trans) +{ + struct prestera_port *port = netdev_priv(dev); + int err = 0; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_STP_STATE: + err = presterar_port_attr_stp_state_set(port, trans, + attr->orig_dev, + attr->u.stp_state); + break; + case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: + if (attr->u.brport_flags & + ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD)) + err = -EINVAL; + break; + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + err = prestera_port_attr_br_flags_set(port, trans, + attr->orig_dev, + attr->u.brport_flags); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: + err = prestera_port_attr_br_ageing_set(port, trans, + attr->u.ageing_time); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: + err = prestera_port_attr_br_vlan_set(port, trans, + attr->orig_dev, + attr->u.vlan_filtering); + break; + default: + err = -EOPNOTSUPP; + } + + return err; +} + +static void +prestera_fdb_offload_notify(struct prestera_port *port, + struct switchdev_notifier_fdb_info *info) +{ + struct switchdev_notifier_fdb_info send_info; + + send_info.addr = info->addr; + send_info.vid = info->vid; + send_info.offloaded = true; + + call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, port->dev, + &send_info.info, NULL); +} + +static int prestera_port_fdb_set(struct prestera_port *port, + struct switchdev_notifier_fdb_info *fdb_info, + bool adding) +{ + struct prestera_switch *sw = port->sw; + struct prestera_bridge_port *br_port; + struct prestera_bridge *bridge; + int err; + u16 vid; + + br_port = prestera_bridge_port_by_dev(sw->swdev, port->dev); + if (!br_port) + return -EINVAL; + + bridge = br_port->bridge; + + if (bridge->vlan_enabled) + vid = fdb_info->vid; + else + vid = bridge->bridge_id; + + if (adding) + err = prestera_hw_fdb_add(port, fdb_info->addr, vid, false); + else + err = prestera_hw_fdb_del(port, fdb_info->addr, vid); + + return err; +} + +static void prestera_fdb_event_work(struct work_struct *work) +{ + struct switchdev_notifier_fdb_info *fdb_info; + struct prestera_fdb_event_work *swdev_work; + struct prestera_port *port; + struct net_device *dev; + int err; + + swdev_work = container_of(work, struct prestera_fdb_event_work, work); + dev = swdev_work->dev; + + rtnl_lock(); + + port = prestera_port_dev_lower_find(dev); + if (!port) + goto out_unlock; + + switch (swdev_work->event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + fdb_info = &swdev_work->fdb_info; + if (!fdb_info->added_by_user) + break; + + err = prestera_port_fdb_set(port, fdb_info, true); + if (err) + break; + + prestera_fdb_offload_notify(port, fdb_info); + break; + + case SWITCHDEV_FDB_DEL_TO_DEVICE: + fdb_info = &swdev_work->fdb_info; + prestera_port_fdb_set(port, fdb_info, false); + break; + } + +out_unlock: + rtnl_unlock(); + + kfree(swdev_work->fdb_info.addr); + kfree(swdev_work); + dev_put(dev); +} + +static int prestera_switchdev_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + struct switchdev_notifier_fdb_info *fdb_info; + struct switchdev_notifier_info *info = ptr; + struct prestera_fdb_event_work *swdev_work; + struct net_device *upper; + int err; + + if (event == SWITCHDEV_PORT_ATTR_SET) { + err = switchdev_handle_port_attr_set(dev, ptr, + prestera_netdev_check, + prestera_port_obj_attr_set); + return notifier_from_errno(err); + } + + if (!prestera_netdev_check(dev)) + return NOTIFY_DONE; + + upper = netdev_master_upper_dev_get_rcu(dev); + if (!upper) + return NOTIFY_DONE; + + if (!netif_is_bridge_master(upper)) + return NOTIFY_DONE; + + swdev_work = kzalloc(sizeof(*swdev_work), GFP_ATOMIC); + if (!swdev_work) + return NOTIFY_BAD; + + swdev_work->event = event; + swdev_work->dev = dev; + + switch (event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + case SWITCHDEV_FDB_DEL_TO_DEVICE: + fdb_info = container_of(info, + struct switchdev_notifier_fdb_info, + info); + + INIT_WORK(&swdev_work->work, prestera_fdb_event_work); + memcpy(&swdev_work->fdb_info, ptr, + sizeof(swdev_work->fdb_info)); + + swdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); + if (!swdev_work->fdb_info.addr) + goto out_bad; + + ether_addr_copy((u8 *)swdev_work->fdb_info.addr, + fdb_info->addr); + dev_hold(dev); + break; + + default: + kfree(swdev_work); + return NOTIFY_DONE; + } + + queue_work(swdev_wq, &swdev_work->work); + return NOTIFY_DONE; + +out_bad: + kfree(swdev_work); + return NOTIFY_BAD; +} + +static int +prestera_port_vlan_bridge_join(struct prestera_port_vlan *port_vlan, + struct prestera_bridge_port *br_port) +{ + struct prestera_port *port = port_vlan->port; + struct prestera_bridge_vlan *br_vlan; + u16 vid = port_vlan->vid; + int err; + + if (port_vlan->br_port) + return 0; + + err = prestera_hw_port_flood_set(port, br_port->flags & BR_FLOOD); + if (err) + return err; + + err = prestera_hw_port_learning_set(port, br_port->flags & BR_LEARNING); + if (err) + goto err_port_learning_set; + + err = prestera_port_vid_stp_set(port, vid, br_port->stp_state); + if (err) + goto err_port_vid_stp_set; + + br_vlan = prestera_bridge_vlan_by_vid(br_port, vid); + if (!br_vlan) { + br_vlan = prestera_bridge_vlan_create(br_port, vid); + if (!br_vlan) { + err = -ENOMEM; + goto err_bridge_vlan_get; + } + } + + list_add(&port_vlan->br_vlan_head, &br_vlan->port_vlan_list); + + prestera_bridge_port_get(br_port); + port_vlan->br_port = br_port; + + return 0; + +err_bridge_vlan_get: + prestera_port_vid_stp_set(port, vid, BR_STATE_FORWARDING); +err_port_vid_stp_set: + prestera_hw_port_learning_set(port, false); +err_port_learning_set: + return err; +} + +static int +prestera_bridge_port_vlan_add(struct prestera_port *port, + struct prestera_bridge_port *br_port, + u16 vid, bool is_untagged, bool is_pvid, + struct netlink_ext_ack *extack) +{ + struct prestera_port_vlan *port_vlan; + u16 old_pvid = port->pvid; + u16 pvid; + int err; + + if (is_pvid) + pvid = vid; + else + pvid = port->pvid == vid ? 0 : port->pvid; + + port_vlan = prestera_port_vlan_by_vid(port, vid); + if (port_vlan && port_vlan->br_port != br_port) + return -EEXIST; + + if (!port_vlan) { + port_vlan = prestera_port_vlan_create(port, vid, is_untagged); + if (IS_ERR(port_vlan)) + return PTR_ERR(port_vlan); + } else { + err = prestera_hw_vlan_port_set(port, vid, true, is_untagged); + if (err) + goto err_port_vlan_set; + } + + err = prestera_port_pvid_set(port, pvid); + if (err) + goto err_port_pvid_set; + + err = prestera_port_vlan_bridge_join(port_vlan, br_port); + if (err) + goto err_port_vlan_bridge_join; + + return 0; + +err_port_vlan_bridge_join: + prestera_port_pvid_set(port, old_pvid); +err_port_pvid_set: + prestera_hw_vlan_port_set(port, vid, false, false); +err_port_vlan_set: + prestera_port_vlan_destroy(port_vlan); + + return err; +} + +static void +prestera_bridge_port_vlan_del(struct prestera_port *port, + struct prestera_bridge_port *br_port, u16 vid) +{ + u16 pvid = port->pvid == vid ? 0 : port->pvid; + struct prestera_port_vlan *port_vlan; + + port_vlan = prestera_port_vlan_by_vid(port, vid); + if (WARN_ON(!port_vlan)) + return; + + prestera_port_vlan_bridge_leave(port_vlan); + prestera_port_pvid_set(port, pvid); + prestera_port_vlan_destroy(port_vlan); +} + +static int prestera_port_vlans_add(struct prestera_port *port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans, + struct netlink_ext_ack *extack) +{ + bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + struct net_device *dev = vlan->obj.orig_dev; + struct prestera_bridge_port *br_port; + struct prestera_switch *sw = port->sw; + struct prestera_bridge *bridge; + u16 vid; + + if (netif_is_bridge_master(dev)) + return 0; + + if (switchdev_trans_ph_commit(trans)) + return 0; + + br_port = prestera_bridge_port_by_dev(sw->swdev, dev); + if (WARN_ON(!br_port)) + return -EINVAL; + + bridge = br_port->bridge; + if (!bridge->vlan_enabled) + return 0; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + int err; + + err = prestera_bridge_port_vlan_add(port, br_port, + vid, flag_untagged, + flag_pvid, extack); + if (err) + return err; + } + + return 0; +} + +static int prestera_port_obj_add(struct net_device *dev, + const struct switchdev_obj *obj, + struct switchdev_trans *trans, + struct netlink_ext_ack *extack) +{ + struct prestera_port *port = netdev_priv(dev); + const struct switchdev_obj_port_vlan *vlan; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); + return prestera_port_vlans_add(port, vlan, trans, extack); + default: + return -EOPNOTSUPP; + } +} + +static int prestera_port_vlans_del(struct prestera_port *port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct net_device *dev = vlan->obj.orig_dev; + struct prestera_bridge_port *br_port; + struct prestera_switch *sw = port->sw; + u16 vid; + + if (netif_is_bridge_master(dev)) + return -EOPNOTSUPP; + + br_port = prestera_bridge_port_by_dev(sw->swdev, dev); + if (WARN_ON(!br_port)) + return -EINVAL; + + if (!br_port->bridge->vlan_enabled) + return 0; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) + prestera_bridge_port_vlan_del(port, br_port, vid); + + return 0; +} + +static int prestera_port_obj_del(struct net_device *dev, + const struct switchdev_obj *obj) +{ + struct prestera_port *port = netdev_priv(dev); + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + return prestera_port_vlans_del(port, SWITCHDEV_OBJ_PORT_VLAN(obj)); + default: + return -EOPNOTSUPP; + } +} + +static int prestera_switchdev_blk_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + int err; + + switch (event) { + case SWITCHDEV_PORT_OBJ_ADD: + err = switchdev_handle_port_obj_add(dev, ptr, + prestera_netdev_check, + prestera_port_obj_add); + break; + case SWITCHDEV_PORT_OBJ_DEL: + err = switchdev_handle_port_obj_del(dev, ptr, + prestera_netdev_check, + prestera_port_obj_del); + break; + case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + prestera_netdev_check, + prestera_port_obj_attr_set); + break; + default: + err = -EOPNOTSUPP; + } + + return notifier_from_errno(err); +} + +static void prestera_fdb_event(struct prestera_switch *sw, + struct prestera_event *evt, void *arg) +{ + struct switchdev_notifier_fdb_info info; + struct prestera_port *port; + + port = prestera_find_port(sw, evt->fdb_evt.port_id); + if (!port) + return; + + info.addr = evt->fdb_evt.data.mac; + info.vid = evt->fdb_evt.vid; + info.offloaded = true; + + rtnl_lock(); + + switch (evt->id) { + case PRESTERA_FDB_EVENT_LEARNED: + call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, + port->dev, &info.info, NULL); + break; + case PRESTERA_FDB_EVENT_AGED: + call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, + port->dev, &info.info, NULL); + break; + } + + rtnl_unlock(); +} + +static int prestera_fdb_init(struct prestera_switch *sw) +{ + int err; + + err = prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_FDB, + prestera_fdb_event, NULL); + if (err) + return err; + + err = prestera_hw_switch_ageing_set(sw, PRESTERA_DEFAULT_AGEING_TIME_MS); + if (err) + goto err_ageing_set; + + return 0; + +err_ageing_set: + prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_FDB, + prestera_fdb_event); + return err; +} + +static void prestera_fdb_fini(struct prestera_switch *sw) +{ + prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_FDB, + prestera_fdb_event); +} + +static int prestera_switchdev_handler_init(struct prestera_switchdev *swdev) +{ + int err; + + swdev->swdev_nb.notifier_call = prestera_switchdev_event; + err = register_switchdev_notifier(&swdev->swdev_nb); + if (err) + goto err_register_swdev_notifier; + + swdev->swdev_nb_blk.notifier_call = prestera_switchdev_blk_event; + err = register_switchdev_blocking_notifier(&swdev->swdev_nb_blk); + if (err) + goto err_register_blk_swdev_notifier; + + return 0; + +err_register_blk_swdev_notifier: + unregister_switchdev_notifier(&swdev->swdev_nb); +err_register_swdev_notifier: + destroy_workqueue(swdev_wq); + return err; +} + +static void prestera_switchdev_handler_fini(struct prestera_switchdev *swdev) +{ + unregister_switchdev_blocking_notifier(&swdev->swdev_nb_blk); + unregister_switchdev_notifier(&swdev->swdev_nb); +} + +int prestera_switchdev_init(struct prestera_switch *sw) +{ + struct prestera_switchdev *swdev; + int err; + + swdev = kzalloc(sizeof(*swdev), GFP_KERNEL); + if (!swdev) + return -ENOMEM; + + sw->swdev = swdev; + swdev->sw = sw; + + INIT_LIST_HEAD(&swdev->bridge_list); + + swdev_wq = alloc_ordered_workqueue("%s_ordered", 0, "prestera_br"); + if (!swdev_wq) { + err = -ENOMEM; + goto err_alloc_wq; + } + + err = prestera_switchdev_handler_init(swdev); + if (err) + goto err_swdev_init; + + err = prestera_fdb_init(sw); + if (err) + goto err_fdb_init; + + return 0; + +err_fdb_init: +err_swdev_init: + destroy_workqueue(swdev_wq); +err_alloc_wq: + kfree(swdev); + + return err; +} + +void prestera_switchdev_fini(struct prestera_switch *sw) +{ + struct prestera_switchdev *swdev = sw->swdev; + + prestera_fdb_fini(sw); + prestera_switchdev_handler_fini(swdev); + destroy_workqueue(swdev_wq); + kfree(swdev); +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h new file mode 100644 index 000000000..606e21d23 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */ + +#ifndef _PRESTERA_SWITCHDEV_H_ +#define _PRESTERA_SWITCHDEV_H_ + +int prestera_switchdev_init(struct prestera_switch *sw); +void prestera_switchdev_fini(struct prestera_switch *sw); + +int prestera_bridge_port_event(struct net_device *dev, unsigned long event, + void *ptr); + +#endif /* _PRESTERA_SWITCHDEV_H_ */ diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c new file mode 100644 index 000000000..3712e1786 --- /dev/null +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -0,0 +1,1597 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * PXA168 ethernet driver. + * Most of the code is derived from mv643xx ethernet driver. + * + * Copyright (C) 2010 Marvell International Ltd. + * Sachin Sanap <ssanap@marvell.com> + * Zhangfei Gao <zgao6@marvell.com> + * Philip Rakity <prakity@marvell.com> + * Mark Brown <markb@marvell.com> + */ + +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/in.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/ip.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_net.h> +#include <linux/phy.h> +#include <linux/platform_device.h> +#include <linux/pxa168_eth.h> +#include <linux/tcp.h> +#include <linux/types.h> +#include <linux/udp.h> +#include <linux/workqueue.h> +#include <linux/pgtable.h> + +#include <asm/cacheflush.h> + +#define DRIVER_NAME "pxa168-eth" +#define DRIVER_VERSION "0.3" + +/* + * Registers + */ + +#define PHY_ADDRESS 0x0000 +#define SMI 0x0010 +#define PORT_CONFIG 0x0400 +#define PORT_CONFIG_EXT 0x0408 +#define PORT_COMMAND 0x0410 +#define PORT_STATUS 0x0418 +#define HTPR 0x0428 +#define MAC_ADDR_LOW 0x0430 +#define MAC_ADDR_HIGH 0x0438 +#define SDMA_CONFIG 0x0440 +#define SDMA_CMD 0x0448 +#define INT_CAUSE 0x0450 +#define INT_W_CLEAR 0x0454 +#define INT_MASK 0x0458 +#define ETH_F_RX_DESC_0 0x0480 +#define ETH_C_RX_DESC_0 0x04A0 +#define ETH_C_TX_DESC_1 0x04E4 + +/* smi register */ +#define SMI_BUSY (1 << 28) /* 0 - Write, 1 - Read */ +#define SMI_R_VALID (1 << 27) /* 0 - Write, 1 - Read */ +#define SMI_OP_W (0 << 26) /* Write operation */ +#define SMI_OP_R (1 << 26) /* Read operation */ + +#define PHY_WAIT_ITERATIONS 10 + +#define PXA168_ETH_PHY_ADDR_DEFAULT 0 +/* RX & TX descriptor command */ +#define BUF_OWNED_BY_DMA (1 << 31) + +/* RX descriptor status */ +#define RX_EN_INT (1 << 23) +#define RX_FIRST_DESC (1 << 17) +#define RX_LAST_DESC (1 << 16) +#define RX_ERROR (1 << 15) + +/* TX descriptor command */ +#define TX_EN_INT (1 << 23) +#define TX_GEN_CRC (1 << 22) +#define TX_ZERO_PADDING (1 << 18) +#define TX_FIRST_DESC (1 << 17) +#define TX_LAST_DESC (1 << 16) +#define TX_ERROR (1 << 15) + +/* SDMA_CMD */ +#define SDMA_CMD_AT (1 << 31) +#define SDMA_CMD_TXDL (1 << 24) +#define SDMA_CMD_TXDH (1 << 23) +#define SDMA_CMD_AR (1 << 15) +#define SDMA_CMD_ERD (1 << 7) + +/* Bit definitions of the Port Config Reg */ +#define PCR_DUPLEX_FULL (1 << 15) +#define PCR_HS (1 << 12) +#define PCR_EN (1 << 7) +#define PCR_PM (1 << 0) + +/* Bit definitions of the Port Config Extend Reg */ +#define PCXR_2BSM (1 << 28) +#define PCXR_DSCP_EN (1 << 21) +#define PCXR_RMII_EN (1 << 20) +#define PCXR_AN_SPEED_DIS (1 << 19) +#define PCXR_SPEED_100 (1 << 18) +#define PCXR_MFL_1518 (0 << 14) +#define PCXR_MFL_1536 (1 << 14) +#define PCXR_MFL_2048 (2 << 14) +#define PCXR_MFL_64K (3 << 14) +#define PCXR_FLOWCTL_DIS (1 << 12) +#define PCXR_FLP (1 << 11) +#define PCXR_AN_FLOWCTL_DIS (1 << 10) +#define PCXR_AN_DUPLEX_DIS (1 << 9) +#define PCXR_PRIO_TX_OFF 3 +#define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF) + +/* Bit definitions of the SDMA Config Reg */ +#define SDCR_BSZ_OFF 12 +#define SDCR_BSZ8 (3 << SDCR_BSZ_OFF) +#define SDCR_BSZ4 (2 << SDCR_BSZ_OFF) +#define SDCR_BSZ2 (1 << SDCR_BSZ_OFF) +#define SDCR_BSZ1 (0 << SDCR_BSZ_OFF) +#define SDCR_BLMR (1 << 6) +#define SDCR_BLMT (1 << 7) +#define SDCR_RIFB (1 << 9) +#define SDCR_RC_OFF 2 +#define SDCR_RC_MAX_RETRANS (0xf << SDCR_RC_OFF) + +/* + * Bit definitions of the Interrupt Cause Reg + * and Interrupt MASK Reg is the same + */ +#define ICR_RXBUF (1 << 0) +#define ICR_TXBUF_H (1 << 2) +#define ICR_TXBUF_L (1 << 3) +#define ICR_TXEND_H (1 << 6) +#define ICR_TXEND_L (1 << 7) +#define ICR_RXERR (1 << 8) +#define ICR_TXERR_H (1 << 10) +#define ICR_TXERR_L (1 << 11) +#define ICR_TX_UDR (1 << 13) +#define ICR_MII_CH (1 << 28) + +#define ALL_INTS (ICR_TXBUF_H | ICR_TXBUF_L | ICR_TX_UDR |\ + ICR_TXERR_H | ICR_TXERR_L |\ + ICR_TXEND_H | ICR_TXEND_L |\ + ICR_RXBUF | ICR_RXERR | ICR_MII_CH) + +#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */ + +#define NUM_RX_DESCS 64 +#define NUM_TX_DESCS 64 + +#define HASH_ADD 0 +#define HASH_DELETE 1 +#define HASH_ADDR_TABLE_SIZE 0x4000 /* 16K (1/2K address - PCR_HS == 1) */ +#define HOP_NUMBER 12 + +/* Bit definitions for Port status */ +#define PORT_SPEED_100 (1 << 0) +#define FULL_DUPLEX (1 << 1) +#define FLOW_CONTROL_DISABLED (1 << 2) +#define LINK_UP (1 << 3) + +/* Bit definitions for work to be done */ +#define WORK_TX_DONE (1 << 1) + +/* + * Misc definitions. + */ +#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) + +struct rx_desc { + u32 cmd_sts; /* Descriptor command status */ + u16 byte_cnt; /* Descriptor buffer byte count */ + u16 buf_size; /* Buffer size */ + u32 buf_ptr; /* Descriptor buffer pointer */ + u32 next_desc_ptr; /* Next descriptor pointer */ +}; + +struct tx_desc { + u32 cmd_sts; /* Command/status field */ + u16 reserved; + u16 byte_cnt; /* buffer byte count */ + u32 buf_ptr; /* pointer to buffer for this descriptor */ + u32 next_desc_ptr; /* Pointer to next descriptor */ +}; + +struct pxa168_eth_private { + struct platform_device *pdev; + int port_num; /* User Ethernet port number */ + int phy_addr; + int phy_speed; + int phy_duplex; + phy_interface_t phy_intf; + + int rx_resource_err; /* Rx ring resource error flag */ + + /* Next available and first returning Rx resource */ + int rx_curr_desc_q, rx_used_desc_q; + + /* Next available and first returning Tx resource */ + int tx_curr_desc_q, tx_used_desc_q; + + struct rx_desc *p_rx_desc_area; + dma_addr_t rx_desc_dma; + int rx_desc_area_size; + struct sk_buff **rx_skb; + + struct tx_desc *p_tx_desc_area; + dma_addr_t tx_desc_dma; + int tx_desc_area_size; + struct sk_buff **tx_skb; + + struct work_struct tx_timeout_task; + + struct net_device *dev; + struct napi_struct napi; + u8 work_todo; + int skb_size; + + /* Size of Tx Ring per queue */ + int tx_ring_size; + /* Number of tx descriptors in use */ + int tx_desc_count; + /* Size of Rx Ring per queue */ + int rx_ring_size; + /* Number of rx descriptors in use */ + int rx_desc_count; + + /* + * Used in case RX Ring is empty, which can occur when + * system does not have resources (skb's) + */ + struct timer_list timeout; + struct mii_bus *smi_bus; + + /* clock */ + struct clk *clk; + struct pxa168_eth_platform_data *pd; + /* + * Ethernet controller base address. + */ + void __iomem *base; + + /* Pointer to the hardware address filter table */ + void *htpr; + dma_addr_t htpr_dma; +}; + +struct addr_table_entry { + __le32 lo; + __le32 hi; +}; + +/* Bit fields of a Hash Table Entry */ +enum hash_table_entry { + HASH_ENTRY_VALID = 1, + SKIP = 2, + HASH_ENTRY_RECEIVE_DISCARD = 4, + HASH_ENTRY_RECEIVE_DISCARD_BIT = 2 +}; + +static int pxa168_init_hw(struct pxa168_eth_private *pep); +static int pxa168_init_phy(struct net_device *dev); +static void eth_port_reset(struct net_device *dev); +static void eth_port_start(struct net_device *dev); +static int pxa168_eth_open(struct net_device *dev); +static int pxa168_eth_stop(struct net_device *dev); + +static inline u32 rdl(struct pxa168_eth_private *pep, int offset) +{ + return readl_relaxed(pep->base + offset); +} + +static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data) +{ + writel_relaxed(data, pep->base + offset); +} + +static void abort_dma(struct pxa168_eth_private *pep) +{ + int delay; + int max_retries = 40; + + do { + wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT); + udelay(100); + + delay = 10; + while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT)) + && delay-- > 0) { + udelay(10); + } + } while (max_retries-- > 0 && delay <= 0); + + if (max_retries <= 0) + netdev_err(pep->dev, "%s : DMA Stuck\n", __func__); +} + +static void rxq_refill(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct sk_buff *skb; + struct rx_desc *p_used_rx_desc; + int used_rx_desc; + + while (pep->rx_desc_count < pep->rx_ring_size) { + int size; + + skb = netdev_alloc_skb(dev, pep->skb_size); + if (!skb) + break; + if (SKB_DMA_REALIGN) + skb_reserve(skb, SKB_DMA_REALIGN); + pep->rx_desc_count++; + /* Get 'used' Rx descriptor */ + used_rx_desc = pep->rx_used_desc_q; + p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc]; + size = skb_end_pointer(skb) - skb->data; + p_used_rx_desc->buf_ptr = dma_map_single(&pep->pdev->dev, + skb->data, + size, + DMA_FROM_DEVICE); + p_used_rx_desc->buf_size = size; + pep->rx_skb[used_rx_desc] = skb; + + /* Return the descriptor to DMA ownership */ + dma_wmb(); + p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT; + dma_wmb(); + + /* Move the used descriptor pointer to the next descriptor */ + pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size; + + /* Any Rx return cancels the Rx resource error status */ + pep->rx_resource_err = 0; + + skb_reserve(skb, ETH_HW_IP_ALIGN); + } + + /* + * If RX ring is empty of SKB, set a timer to try allocating + * again at a later time. + */ + if (pep->rx_desc_count == 0) { + pep->timeout.expires = jiffies + (HZ / 10); + add_timer(&pep->timeout); + } +} + +static inline void rxq_refill_timer_wrapper(struct timer_list *t) +{ + struct pxa168_eth_private *pep = from_timer(pep, t, timeout); + napi_schedule(&pep->napi); +} + +static inline u8 flip_8_bits(u8 x) +{ + return (((x) & 0x01) << 3) | (((x) & 0x02) << 1) + | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3) + | (((x) & 0x10) << 3) | (((x) & 0x20) << 1) + | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3); +} + +static void nibble_swap_every_byte(unsigned char *mac_addr) +{ + int i; + for (i = 0; i < ETH_ALEN; i++) { + mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) | + ((mac_addr[i] & 0xf0) >> 4); + } +} + +static void inverse_every_nibble(unsigned char *mac_addr) +{ + int i; + for (i = 0; i < ETH_ALEN; i++) + mac_addr[i] = flip_8_bits(mac_addr[i]); +} + +/* + * ---------------------------------------------------------------------------- + * This function will calculate the hash function of the address. + * Inputs + * mac_addr_orig - MAC address. + * Outputs + * return the calculated entry. + */ +static u32 hash_function(unsigned char *mac_addr_orig) +{ + u32 hash_result; + u32 addr0; + u32 addr1; + u32 addr2; + u32 addr3; + unsigned char mac_addr[ETH_ALEN]; + + /* Make a copy of MAC address since we are going to performe bit + * operations on it + */ + memcpy(mac_addr, mac_addr_orig, ETH_ALEN); + + nibble_swap_every_byte(mac_addr); + inverse_every_nibble(mac_addr); + + addr0 = (mac_addr[5] >> 2) & 0x3f; + addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2); + addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1; + addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8); + + hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3); + hash_result = hash_result & 0x07ff; + return hash_result; +} + +/* + * ---------------------------------------------------------------------------- + * This function will add/del an entry to the address table. + * Inputs + * pep - ETHERNET . + * mac_addr - MAC address. + * skip - if 1, skip this address.Used in case of deleting an entry which is a + * part of chain in the hash table.We can't just delete the entry since + * that will break the chain.We need to defragment the tables time to + * time. + * rd - 0 Discard packet upon match. + * - 1 Receive packet upon match. + * Outputs + * address table entry is added/deleted. + * 0 if success. + * -ENOSPC if table full + */ +static int add_del_hash_entry(struct pxa168_eth_private *pep, + unsigned char *mac_addr, + u32 rd, u32 skip, int del) +{ + struct addr_table_entry *entry, *start; + u32 new_high; + u32 new_low; + u32 i; + + new_low = (((mac_addr[1] >> 4) & 0xf) << 15) + | (((mac_addr[1] >> 0) & 0xf) << 11) + | (((mac_addr[0] >> 4) & 0xf) << 7) + | (((mac_addr[0] >> 0) & 0xf) << 3) + | (((mac_addr[3] >> 4) & 0x1) << 31) + | (((mac_addr[3] >> 0) & 0xf) << 27) + | (((mac_addr[2] >> 4) & 0xf) << 23) + | (((mac_addr[2] >> 0) & 0xf) << 19) + | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT) + | HASH_ENTRY_VALID; + + new_high = (((mac_addr[5] >> 4) & 0xf) << 15) + | (((mac_addr[5] >> 0) & 0xf) << 11) + | (((mac_addr[4] >> 4) & 0xf) << 7) + | (((mac_addr[4] >> 0) & 0xf) << 3) + | (((mac_addr[3] >> 5) & 0x7) << 0); + + /* + * Pick the appropriate table, start scanning for free/reusable + * entries at the index obtained by hashing the specified MAC address + */ + start = pep->htpr; + entry = start + hash_function(mac_addr); + for (i = 0; i < HOP_NUMBER; i++) { + if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) { + break; + } else { + /* if same address put in same position */ + if (((le32_to_cpu(entry->lo) & 0xfffffff8) == + (new_low & 0xfffffff8)) && + (le32_to_cpu(entry->hi) == new_high)) { + break; + } + } + if (entry == start + 0x7ff) + entry = start; + else + entry++; + } + + if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) && + (le32_to_cpu(entry->hi) != new_high) && del) + return 0; + + if (i == HOP_NUMBER) { + if (!del) { + netdev_info(pep->dev, + "%s: table section is full, need to " + "move to 16kB implementation?\n", + __FILE__); + return -ENOSPC; + } else + return 0; + } + + /* + * Update the selected entry + */ + if (del) { + entry->hi = 0; + entry->lo = 0; + } else { + entry->hi = cpu_to_le32(new_high); + entry->lo = cpu_to_le32(new_low); + } + + return 0; +} + +/* + * ---------------------------------------------------------------------------- + * Create an addressTable entry from MAC address info + * found in the specifed net_device struct + * + * Input : pointer to ethernet interface network device structure + * Output : N/A + */ +static void update_hash_table_mac_address(struct pxa168_eth_private *pep, + unsigned char *oaddr, + unsigned char *addr) +{ + /* Delete old entry */ + if (oaddr) + add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE); + /* Add new entry */ + add_del_hash_entry(pep, addr, 1, 0, HASH_ADD); +} + +static int init_hash_table(struct pxa168_eth_private *pep) +{ + /* + * Hardware expects CPU to build a hash table based on a predefined + * hash function and populate it based on hardware address. The + * location of the hash table is identified by 32-bit pointer stored + * in HTPR internal register. Two possible sizes exists for the hash + * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB + * (16kB of DRAM required (4 x 4 kB banks)).We currently only support + * 1/2kB. + */ + /* TODO: Add support for 8kB hash table and alternative hash + * function.Driver can dynamically switch to them if the 1/2kB hash + * table is full. + */ + if (!pep->htpr) { + pep->htpr = dma_alloc_coherent(pep->dev->dev.parent, + HASH_ADDR_TABLE_SIZE, + &pep->htpr_dma, GFP_KERNEL); + if (!pep->htpr) + return -ENOMEM; + } else { + memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE); + } + wrl(pep, HTPR, pep->htpr_dma); + return 0; +} + +static void pxa168_eth_set_rx_mode(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct netdev_hw_addr *ha; + u32 val; + + val = rdl(pep, PORT_CONFIG); + if (dev->flags & IFF_PROMISC) + val |= PCR_PM; + else + val &= ~PCR_PM; + wrl(pep, PORT_CONFIG, val); + + /* + * Remove the old list of MAC address and add dev->addr + * and multicast address. + */ + memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE); + update_hash_table_mac_address(pep, NULL, dev->dev_addr); + + netdev_for_each_mc_addr(ha, dev) + update_hash_table_mac_address(pep, NULL, ha->addr); +} + +static void pxa168_eth_get_mac_address(struct net_device *dev, + unsigned char *addr) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + unsigned int mac_h = rdl(pep, MAC_ADDR_HIGH); + unsigned int mac_l = rdl(pep, MAC_ADDR_LOW); + + addr[0] = (mac_h >> 24) & 0xff; + addr[1] = (mac_h >> 16) & 0xff; + addr[2] = (mac_h >> 8) & 0xff; + addr[3] = mac_h & 0xff; + addr[4] = (mac_l >> 8) & 0xff; + addr[5] = mac_l & 0xff; +} + +static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr) +{ + struct sockaddr *sa = addr; + struct pxa168_eth_private *pep = netdev_priv(dev); + unsigned char oldMac[ETH_ALEN]; + u32 mac_h, mac_l; + + if (!is_valid_ether_addr(sa->sa_data)) + return -EADDRNOTAVAIL; + memcpy(oldMac, dev->dev_addr, ETH_ALEN); + memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); + + mac_h = dev->dev_addr[0] << 24; + mac_h |= dev->dev_addr[1] << 16; + mac_h |= dev->dev_addr[2] << 8; + mac_h |= dev->dev_addr[3]; + mac_l = dev->dev_addr[4] << 8; + mac_l |= dev->dev_addr[5]; + wrl(pep, MAC_ADDR_HIGH, mac_h); + wrl(pep, MAC_ADDR_LOW, mac_l); + + netif_addr_lock_bh(dev); + update_hash_table_mac_address(pep, oldMac, dev->dev_addr); + netif_addr_unlock_bh(dev); + return 0; +} + +static void eth_port_start(struct net_device *dev) +{ + unsigned int val = 0; + struct pxa168_eth_private *pep = netdev_priv(dev); + int tx_curr_desc, rx_curr_desc; + + phy_start(dev->phydev); + + /* Assignment of Tx CTRP of given queue */ + tx_curr_desc = pep->tx_curr_desc_q; + wrl(pep, ETH_C_TX_DESC_1, + (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc))); + + /* Assignment of Rx CRDP of given queue */ + rx_curr_desc = pep->rx_curr_desc_q; + wrl(pep, ETH_C_RX_DESC_0, + (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc))); + + wrl(pep, ETH_F_RX_DESC_0, + (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc))); + + /* Clear all interrupts */ + wrl(pep, INT_CAUSE, 0); + + /* Enable all interrupts for receive, transmit and error. */ + wrl(pep, INT_MASK, ALL_INTS); + + val = rdl(pep, PORT_CONFIG); + val |= PCR_EN; + wrl(pep, PORT_CONFIG, val); + + /* Start RX DMA engine */ + val = rdl(pep, SDMA_CMD); + val |= SDMA_CMD_ERD; + wrl(pep, SDMA_CMD, val); +} + +static void eth_port_reset(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + unsigned int val = 0; + + /* Stop all interrupts for receive, transmit and error. */ + wrl(pep, INT_MASK, 0); + + /* Clear all interrupts */ + wrl(pep, INT_CAUSE, 0); + + /* Stop RX DMA */ + val = rdl(pep, SDMA_CMD); + val &= ~SDMA_CMD_ERD; /* abort dma command */ + + /* Abort any transmit and receive operations and put DMA + * in idle state. + */ + abort_dma(pep); + + /* Disable port */ + val = rdl(pep, PORT_CONFIG); + val &= ~PCR_EN; + wrl(pep, PORT_CONFIG, val); + + phy_stop(dev->phydev); +} + +/* + * txq_reclaim - Free the tx desc data for completed descriptors + * If force is non-zero, frees uncompleted descriptors as well + */ +static int txq_reclaim(struct net_device *dev, int force) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct tx_desc *desc; + u32 cmd_sts; + struct sk_buff *skb; + int tx_index; + dma_addr_t addr; + int count; + int released = 0; + + netif_tx_lock(dev); + + pep->work_todo &= ~WORK_TX_DONE; + while (pep->tx_desc_count > 0) { + tx_index = pep->tx_used_desc_q; + desc = &pep->p_tx_desc_area[tx_index]; + cmd_sts = desc->cmd_sts; + if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) { + if (released > 0) { + goto txq_reclaim_end; + } else { + released = -1; + goto txq_reclaim_end; + } + } + pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size; + pep->tx_desc_count--; + addr = desc->buf_ptr; + count = desc->byte_cnt; + skb = pep->tx_skb[tx_index]; + if (skb) + pep->tx_skb[tx_index] = NULL; + + if (cmd_sts & TX_ERROR) { + if (net_ratelimit()) + netdev_err(dev, "Error in TX\n"); + dev->stats.tx_errors++; + } + dma_unmap_single(&pep->pdev->dev, addr, count, DMA_TO_DEVICE); + if (skb) + dev_kfree_skb_irq(skb); + released++; + } +txq_reclaim_end: + netif_tx_unlock(dev); + return released; +} + +static void pxa168_eth_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + + netdev_info(dev, "TX timeout desc_count %d\n", pep->tx_desc_count); + + schedule_work(&pep->tx_timeout_task); +} + +static void pxa168_eth_tx_timeout_task(struct work_struct *work) +{ + struct pxa168_eth_private *pep = container_of(work, + struct pxa168_eth_private, + tx_timeout_task); + struct net_device *dev = pep->dev; + pxa168_eth_stop(dev); + pxa168_eth_open(dev); +} + +static int rxq_process(struct net_device *dev, int budget) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + unsigned int received_packets = 0; + struct sk_buff *skb; + + while (budget-- > 0) { + int rx_next_curr_desc, rx_curr_desc, rx_used_desc; + struct rx_desc *rx_desc; + unsigned int cmd_sts; + + /* Do not process Rx ring in case of Rx ring resource error */ + if (pep->rx_resource_err) + break; + rx_curr_desc = pep->rx_curr_desc_q; + rx_used_desc = pep->rx_used_desc_q; + rx_desc = &pep->p_rx_desc_area[rx_curr_desc]; + cmd_sts = rx_desc->cmd_sts; + dma_rmb(); + if (cmd_sts & (BUF_OWNED_BY_DMA)) + break; + skb = pep->rx_skb[rx_curr_desc]; + pep->rx_skb[rx_curr_desc] = NULL; + + rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size; + pep->rx_curr_desc_q = rx_next_curr_desc; + + /* Rx descriptors exhausted. */ + /* Set the Rx ring resource error flag */ + if (rx_next_curr_desc == rx_used_desc) + pep->rx_resource_err = 1; + pep->rx_desc_count--; + dma_unmap_single(&pep->pdev->dev, rx_desc->buf_ptr, + rx_desc->buf_size, + DMA_FROM_DEVICE); + received_packets++; + /* + * Update statistics. + * Note byte count includes 4 byte CRC count + */ + stats->rx_packets++; + stats->rx_bytes += rx_desc->byte_cnt; + /* + * In case received a packet without first / last bits on OR + * the error summary bit is on, the packets needs to be droped. + */ + if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != + (RX_FIRST_DESC | RX_LAST_DESC)) + || (cmd_sts & RX_ERROR)) { + + stats->rx_dropped++; + if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != + (RX_FIRST_DESC | RX_LAST_DESC)) { + if (net_ratelimit()) + netdev_err(dev, + "Rx pkt on multiple desc\n"); + } + if (cmd_sts & RX_ERROR) + stats->rx_errors++; + dev_kfree_skb_irq(skb); + } else { + /* + * The -4 is for the CRC in the trailer of the + * received packet + */ + skb_put(skb, rx_desc->byte_cnt - 4); + skb->protocol = eth_type_trans(skb, dev); + netif_receive_skb(skb); + } + } + /* Fill RX ring with skb's */ + rxq_refill(dev); + return received_packets; +} + +static int pxa168_eth_collect_events(struct pxa168_eth_private *pep, + struct net_device *dev) +{ + u32 icr; + int ret = 0; + + icr = rdl(pep, INT_CAUSE); + if (icr == 0) + return IRQ_NONE; + + wrl(pep, INT_CAUSE, ~icr); + if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) { + pep->work_todo |= WORK_TX_DONE; + ret = 1; + } + if (icr & ICR_RXBUF) + ret = 1; + return ret; +} + +static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct pxa168_eth_private *pep = netdev_priv(dev); + + if (unlikely(!pxa168_eth_collect_events(pep, dev))) + return IRQ_NONE; + /* Disable interrupts */ + wrl(pep, INT_MASK, 0); + napi_schedule(&pep->napi); + return IRQ_HANDLED; +} + +static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep) +{ + int skb_size; + + /* + * Reserve 2+14 bytes for an ethernet header (the hardware + * automatically prepends 2 bytes of dummy data to each + * received packet), 16 bytes for up to four VLAN tags, and + * 4 bytes for the trailing FCS -- 36 bytes total. + */ + skb_size = pep->dev->mtu + 36; + + /* + * Make sure that the skb size is a multiple of 8 bytes, as + * the lower three bits of the receive descriptor's buffer + * size field are ignored by the hardware. + */ + pep->skb_size = (skb_size + 7) & ~7; + + /* + * If NET_SKB_PAD is smaller than a cache line, + * netdev_alloc_skb() will cause skb->data to be misaligned + * to a cache line boundary. If this is the case, include + * some extra space to allow re-aligning the data area. + */ + pep->skb_size += SKB_DMA_REALIGN; + +} + +static int set_port_config_ext(struct pxa168_eth_private *pep) +{ + int skb_size; + + pxa168_eth_recalc_skb_size(pep); + if (pep->skb_size <= 1518) + skb_size = PCXR_MFL_1518; + else if (pep->skb_size <= 1536) + skb_size = PCXR_MFL_1536; + else if (pep->skb_size <= 2048) + skb_size = PCXR_MFL_2048; + else + skb_size = PCXR_MFL_64K; + + /* Extended Port Configuration */ + wrl(pep, PORT_CONFIG_EXT, + PCXR_AN_SPEED_DIS | /* Disable HW AN */ + PCXR_AN_DUPLEX_DIS | + PCXR_AN_FLOWCTL_DIS | + PCXR_2BSM | /* Two byte prefix aligns IP hdr */ + PCXR_DSCP_EN | /* Enable DSCP in IP */ + skb_size | PCXR_FLP | /* do not force link pass */ + PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */ + + return 0; +} + +static void pxa168_eth_adjust_link(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct phy_device *phy = dev->phydev; + u32 cfg, cfg_o = rdl(pep, PORT_CONFIG); + u32 cfgext, cfgext_o = rdl(pep, PORT_CONFIG_EXT); + + cfg = cfg_o & ~PCR_DUPLEX_FULL; + cfgext = cfgext_o & ~(PCXR_SPEED_100 | PCXR_FLOWCTL_DIS | PCXR_RMII_EN); + + if (phy->interface == PHY_INTERFACE_MODE_RMII) + cfgext |= PCXR_RMII_EN; + if (phy->speed == SPEED_100) + cfgext |= PCXR_SPEED_100; + if (phy->duplex) + cfg |= PCR_DUPLEX_FULL; + if (!phy->pause) + cfgext |= PCXR_FLOWCTL_DIS; + + /* Bail out if there has nothing changed */ + if (cfg == cfg_o && cfgext == cfgext_o) + return; + + wrl(pep, PORT_CONFIG, cfg); + wrl(pep, PORT_CONFIG_EXT, cfgext); + + phy_print_status(phy); +} + +static int pxa168_init_phy(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct ethtool_link_ksettings cmd; + struct phy_device *phy = NULL; + int err; + + if (dev->phydev) + return 0; + + phy = mdiobus_scan(pep->smi_bus, pep->phy_addr); + if (IS_ERR(phy)) + return PTR_ERR(phy); + + err = phy_connect_direct(dev, phy, pxa168_eth_adjust_link, + pep->phy_intf); + if (err) + return err; + + cmd.base.phy_address = pep->phy_addr; + cmd.base.speed = pep->phy_speed; + cmd.base.duplex = pep->phy_duplex; + bitmap_copy(cmd.link_modes.advertising, PHY_BASIC_FEATURES, + __ETHTOOL_LINK_MODE_MASK_NBITS); + cmd.base.autoneg = AUTONEG_ENABLE; + + if (cmd.base.speed != 0) + cmd.base.autoneg = AUTONEG_DISABLE; + + return phy_ethtool_set_link_ksettings(dev, &cmd); +} + +static int pxa168_init_hw(struct pxa168_eth_private *pep) +{ + int err = 0; + + /* Disable interrupts */ + wrl(pep, INT_MASK, 0); + wrl(pep, INT_CAUSE, 0); + /* Write to ICR to clear interrupts. */ + wrl(pep, INT_W_CLEAR, 0); + /* Abort any transmit and receive operations and put DMA + * in idle state. + */ + abort_dma(pep); + /* Initialize address hash table */ + err = init_hash_table(pep); + if (err) + return err; + /* SDMA configuration */ + wrl(pep, SDMA_CONFIG, SDCR_BSZ8 | /* Burst size = 32 bytes */ + SDCR_RIFB | /* Rx interrupt on frame */ + SDCR_BLMT | /* Little endian transmit */ + SDCR_BLMR | /* Little endian receive */ + SDCR_RC_MAX_RETRANS); /* Max retransmit count */ + /* Port Configuration */ + wrl(pep, PORT_CONFIG, PCR_HS); /* Hash size is 1/2kb */ + set_port_config_ext(pep); + + return err; +} + +static int rxq_init(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct rx_desc *p_rx_desc; + int size = 0, i = 0; + int rx_desc_num = pep->rx_ring_size; + + /* Allocate RX skb rings */ + pep->rx_skb = kcalloc(rx_desc_num, sizeof(*pep->rx_skb), GFP_KERNEL); + if (!pep->rx_skb) + return -ENOMEM; + + /* Allocate RX ring */ + pep->rx_desc_count = 0; + size = pep->rx_ring_size * sizeof(struct rx_desc); + pep->rx_desc_area_size = size; + pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, + &pep->rx_desc_dma, + GFP_KERNEL); + if (!pep->p_rx_desc_area) + goto out; + + /* initialize the next_desc_ptr links in the Rx descriptors ring */ + p_rx_desc = pep->p_rx_desc_area; + for (i = 0; i < rx_desc_num; i++) { + p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma + + ((i + 1) % rx_desc_num) * sizeof(struct rx_desc); + } + /* Save Rx desc pointer to driver struct. */ + pep->rx_curr_desc_q = 0; + pep->rx_used_desc_q = 0; + pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc); + return 0; +out: + kfree(pep->rx_skb); + return -ENOMEM; +} + +static void rxq_deinit(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + int curr; + + /* Free preallocated skb's on RX rings */ + for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) { + if (pep->rx_skb[curr]) { + dev_kfree_skb(pep->rx_skb[curr]); + pep->rx_desc_count--; + } + } + if (pep->rx_desc_count) + netdev_err(dev, "Error in freeing Rx Ring. %d skb's still\n", + pep->rx_desc_count); + /* Free RX ring */ + if (pep->p_rx_desc_area) + dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size, + pep->p_rx_desc_area, pep->rx_desc_dma); + kfree(pep->rx_skb); +} + +static int txq_init(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct tx_desc *p_tx_desc; + int size = 0, i = 0; + int tx_desc_num = pep->tx_ring_size; + + pep->tx_skb = kcalloc(tx_desc_num, sizeof(*pep->tx_skb), GFP_KERNEL); + if (!pep->tx_skb) + return -ENOMEM; + + /* Allocate TX ring */ + pep->tx_desc_count = 0; + size = pep->tx_ring_size * sizeof(struct tx_desc); + pep->tx_desc_area_size = size; + pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, + &pep->tx_desc_dma, + GFP_KERNEL); + if (!pep->p_tx_desc_area) + goto out; + /* Initialize the next_desc_ptr links in the Tx descriptors ring */ + p_tx_desc = pep->p_tx_desc_area; + for (i = 0; i < tx_desc_num; i++) { + p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma + + ((i + 1) % tx_desc_num) * sizeof(struct tx_desc); + } + pep->tx_curr_desc_q = 0; + pep->tx_used_desc_q = 0; + pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc); + return 0; +out: + kfree(pep->tx_skb); + return -ENOMEM; +} + +static void txq_deinit(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + + /* Free outstanding skb's on TX ring */ + txq_reclaim(dev, 1); + BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q); + /* Free TX ring */ + if (pep->p_tx_desc_area) + dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size, + pep->p_tx_desc_area, pep->tx_desc_dma); + kfree(pep->tx_skb); +} + +static int pxa168_eth_open(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + int err; + + err = pxa168_init_phy(dev); + if (err) + return err; + + err = request_irq(dev->irq, pxa168_eth_int_handler, 0, dev->name, dev); + if (err) { + dev_err(&dev->dev, "can't assign irq\n"); + return -EAGAIN; + } + pep->rx_resource_err = 0; + err = rxq_init(dev); + if (err != 0) + goto out_free_irq; + err = txq_init(dev); + if (err != 0) + goto out_free_rx_skb; + pep->rx_used_desc_q = 0; + pep->rx_curr_desc_q = 0; + + /* Fill RX ring with skb's */ + rxq_refill(dev); + pep->rx_used_desc_q = 0; + pep->rx_curr_desc_q = 0; + netif_carrier_off(dev); + napi_enable(&pep->napi); + eth_port_start(dev); + return 0; +out_free_rx_skb: + rxq_deinit(dev); +out_free_irq: + free_irq(dev->irq, dev); + return err; +} + +static int pxa168_eth_stop(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + eth_port_reset(dev); + + /* Disable interrupts */ + wrl(pep, INT_MASK, 0); + wrl(pep, INT_CAUSE, 0); + /* Write to ICR to clear interrupts. */ + wrl(pep, INT_W_CLEAR, 0); + napi_disable(&pep->napi); + del_timer_sync(&pep->timeout); + netif_carrier_off(dev); + free_irq(dev->irq, dev); + rxq_deinit(dev); + txq_deinit(dev); + + return 0; +} + +static int pxa168_eth_change_mtu(struct net_device *dev, int mtu) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + + dev->mtu = mtu; + set_port_config_ext(pep); + + if (!netif_running(dev)) + return 0; + + /* + * Stop and then re-open the interface. This will allocate RX + * skbs of the new MTU. + * There is a possible danger that the open will not succeed, + * due to memory being full. + */ + pxa168_eth_stop(dev); + if (pxa168_eth_open(dev)) { + dev_err(&dev->dev, + "fatal error on re-opening device after MTU change\n"); + } + + return 0; +} + +static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep) +{ + int tx_desc_curr; + + tx_desc_curr = pep->tx_curr_desc_q; + pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size; + BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q); + pep->tx_desc_count++; + + return tx_desc_curr; +} + +static int pxa168_rx_poll(struct napi_struct *napi, int budget) +{ + struct pxa168_eth_private *pep = + container_of(napi, struct pxa168_eth_private, napi); + struct net_device *dev = pep->dev; + int work_done = 0; + + /* + * We call txq_reclaim every time since in NAPI interupts are disabled + * and due to this we miss the TX_DONE interrupt,which is not updated in + * interrupt status register. + */ + txq_reclaim(dev, 0); + if (netif_queue_stopped(dev) + && pep->tx_ring_size - pep->tx_desc_count > 1) { + netif_wake_queue(dev); + } + work_done = rxq_process(dev, budget); + if (work_done < budget) { + napi_complete_done(napi, work_done); + wrl(pep, INT_MASK, ALL_INTS); + } + + return work_done; +} + +static netdev_tx_t +pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct tx_desc *desc; + int tx_index; + int length; + + tx_index = eth_alloc_tx_desc_index(pep); + desc = &pep->p_tx_desc_area[tx_index]; + length = skb->len; + pep->tx_skb[tx_index] = skb; + desc->byte_cnt = length; + desc->buf_ptr = dma_map_single(&pep->pdev->dev, skb->data, length, + DMA_TO_DEVICE); + + skb_tx_timestamp(skb); + + dma_wmb(); + desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC | + TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT; + wmb(); + wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD); + + stats->tx_bytes += length; + stats->tx_packets++; + netif_trans_update(dev); + if (pep->tx_ring_size - pep->tx_desc_count <= 1) { + /* We handled the current skb, but now we are out of space.*/ + netif_stop_queue(dev); + } + + return NETDEV_TX_OK; +} + +static int smi_wait_ready(struct pxa168_eth_private *pep) +{ + int i = 0; + + /* wait for the SMI register to become available */ + for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) { + if (i == PHY_WAIT_ITERATIONS) + return -ETIMEDOUT; + msleep(10); + } + + return 0; +} + +static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum) +{ + struct pxa168_eth_private *pep = bus->priv; + int i = 0; + int val; + + if (smi_wait_ready(pep)) { + netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n"); + return -ETIMEDOUT; + } + wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R); + /* now wait for the data to be valid */ + for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) { + if (i == PHY_WAIT_ITERATIONS) { + netdev_warn(pep->dev, + "pxa168_eth: SMI bus read not valid\n"); + return -ENODEV; + } + msleep(10); + } + + return val & 0xffff; +} + +static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum, + u16 value) +{ + struct pxa168_eth_private *pep = bus->priv; + + if (smi_wait_ready(pep)) { + netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n"); + return -ETIMEDOUT; + } + + wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | + SMI_OP_W | (value & 0xffff)); + + if (smi_wait_ready(pep)) { + netdev_err(pep->dev, "pxa168_eth: SMI bus busy timeout\n"); + return -ETIMEDOUT; + } + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void pxa168_eth_netpoll(struct net_device *dev) +{ + disable_irq(dev->irq); + pxa168_eth_int_handler(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +static void pxa168_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver)); + strlcpy(info->version, DRIVER_VERSION, sizeof(info->version)); + strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); + strlcpy(info->bus_info, "N/A", sizeof(info->bus_info)); +} + +static const struct ethtool_ops pxa168_ethtool_ops = { + .get_drvinfo = pxa168_get_drvinfo, + .nway_reset = phy_ethtool_nway_reset, + .get_link = ethtool_op_get_link, + .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, +}; + +static const struct net_device_ops pxa168_eth_netdev_ops = { + .ndo_open = pxa168_eth_open, + .ndo_stop = pxa168_eth_stop, + .ndo_start_xmit = pxa168_eth_start_xmit, + .ndo_set_rx_mode = pxa168_eth_set_rx_mode, + .ndo_set_mac_address = pxa168_eth_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = phy_do_ioctl, + .ndo_change_mtu = pxa168_eth_change_mtu, + .ndo_tx_timeout = pxa168_eth_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = pxa168_eth_netpoll, +#endif +}; + +static int pxa168_eth_probe(struct platform_device *pdev) +{ + struct pxa168_eth_private *pep = NULL; + struct net_device *dev = NULL; + struct resource *res; + struct clk *clk; + struct device_node *np; + const unsigned char *mac_addr = NULL; + int err; + + printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n"); + + clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "Fast Ethernet failed to get clock\n"); + return -ENODEV; + } + clk_prepare_enable(clk); + + dev = alloc_etherdev(sizeof(struct pxa168_eth_private)); + if (!dev) { + err = -ENOMEM; + goto err_clk; + } + + platform_set_drvdata(pdev, dev); + pep = netdev_priv(dev); + pep->dev = dev; + pep->clk = clk; + + pep->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(pep->base)) { + err = PTR_ERR(pep->base); + goto err_netdev; + } + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + BUG_ON(!res); + dev->irq = res->start; + dev->netdev_ops = &pxa168_eth_netdev_ops; + dev->watchdog_timeo = 2 * HZ; + dev->base_addr = 0; + dev->ethtool_ops = &pxa168_ethtool_ops; + + /* MTU range: 68 - 9500 */ + dev->min_mtu = ETH_MIN_MTU; + dev->max_mtu = 9500; + + INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task); + + if (pdev->dev.of_node) + mac_addr = of_get_mac_address(pdev->dev.of_node); + + if (!IS_ERR_OR_NULL(mac_addr)) { + ether_addr_copy(dev->dev_addr, mac_addr); + } else { + /* try reading the mac address, if set by the bootloader */ + pxa168_eth_get_mac_address(dev, dev->dev_addr); + if (!is_valid_ether_addr(dev->dev_addr)) { + dev_info(&pdev->dev, "Using random mac address\n"); + eth_hw_addr_random(dev); + } + } + + pep->rx_ring_size = NUM_RX_DESCS; + pep->tx_ring_size = NUM_TX_DESCS; + + pep->pd = dev_get_platdata(&pdev->dev); + if (pep->pd) { + if (pep->pd->rx_queue_size) + pep->rx_ring_size = pep->pd->rx_queue_size; + + if (pep->pd->tx_queue_size) + pep->tx_ring_size = pep->pd->tx_queue_size; + + pep->port_num = pep->pd->port_number; + pep->phy_addr = pep->pd->phy_addr; + pep->phy_speed = pep->pd->speed; + pep->phy_duplex = pep->pd->duplex; + pep->phy_intf = pep->pd->intf; + + if (pep->pd->init) + pep->pd->init(); + } else if (pdev->dev.of_node) { + of_property_read_u32(pdev->dev.of_node, "port-id", + &pep->port_num); + + np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); + if (!np) { + dev_err(&pdev->dev, "missing phy-handle\n"); + err = -EINVAL; + goto err_netdev; + } + of_property_read_u32(np, "reg", &pep->phy_addr); + of_node_put(np); + err = of_get_phy_mode(pdev->dev.of_node, &pep->phy_intf); + if (err && err != -ENODEV) + goto err_netdev; + } + + /* Hardware supports only 3 ports */ + BUG_ON(pep->port_num > 2); + netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size); + + memset(&pep->timeout, 0, sizeof(struct timer_list)); + timer_setup(&pep->timeout, rxq_refill_timer_wrapper, 0); + + pep->smi_bus = mdiobus_alloc(); + if (!pep->smi_bus) { + err = -ENOMEM; + goto err_netdev; + } + pep->smi_bus->priv = pep; + pep->smi_bus->name = "pxa168_eth smi"; + pep->smi_bus->read = pxa168_smi_read; + pep->smi_bus->write = pxa168_smi_write; + snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d", + pdev->name, pdev->id); + pep->smi_bus->parent = &pdev->dev; + pep->smi_bus->phy_mask = 0xffffffff; + err = mdiobus_register(pep->smi_bus); + if (err) + goto err_free_mdio; + + pep->pdev = pdev; + SET_NETDEV_DEV(dev, &pdev->dev); + pxa168_init_hw(pep); + err = register_netdev(dev); + if (err) + goto err_mdiobus; + return 0; + +err_mdiobus: + mdiobus_unregister(pep->smi_bus); +err_free_mdio: + mdiobus_free(pep->smi_bus); +err_netdev: + free_netdev(dev); +err_clk: + clk_disable_unprepare(clk); + return err; +} + +static int pxa168_eth_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct pxa168_eth_private *pep = netdev_priv(dev); + + if (pep->htpr) { + dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE, + pep->htpr, pep->htpr_dma); + pep->htpr = NULL; + } + if (dev->phydev) + phy_disconnect(dev->phydev); + + clk_disable_unprepare(pep->clk); + mdiobus_unregister(pep->smi_bus); + mdiobus_free(pep->smi_bus); + cancel_work_sync(&pep->tx_timeout_task); + unregister_netdev(dev); + free_netdev(dev); + return 0; +} + +static void pxa168_eth_shutdown(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + eth_port_reset(dev); +} + +#ifdef CONFIG_PM +static int pxa168_eth_resume(struct platform_device *pdev) +{ + return -ENOSYS; +} + +static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state) +{ + return -ENOSYS; +} + +#else +#define pxa168_eth_resume NULL +#define pxa168_eth_suspend NULL +#endif + +static const struct of_device_id pxa168_eth_of_match[] = { + { .compatible = "marvell,pxa168-eth" }, + { }, +}; +MODULE_DEVICE_TABLE(of, pxa168_eth_of_match); + +static struct platform_driver pxa168_eth_driver = { + .probe = pxa168_eth_probe, + .remove = pxa168_eth_remove, + .shutdown = pxa168_eth_shutdown, + .resume = pxa168_eth_resume, + .suspend = pxa168_eth_suspend, + .driver = { + .name = DRIVER_NAME, + .of_match_table = of_match_ptr(pxa168_eth_of_match), + }, +}; + +module_platform_driver(pxa168_eth_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168"); +MODULE_ALIAS("platform:pxa168_eth"); diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c new file mode 100644 index 000000000..8a9c0f490 --- /dev/null +++ b/drivers/net/ethernet/marvell/skge.c @@ -0,0 +1,4185 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * New driver for Marvell Yukon chipset and SysKonnect Gigabit + * Ethernet adapters. Based on earlier sk98lin, e100 and + * FreeBSD if_sk drivers. + * + * This driver intentionally does not support all the features + * of the original driver such as link fail-over and link management because + * those should be done at higher levels. + * + * Copyright (C) 2004, 2005 Stephen Hemminger <shemminger@osdl.org> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/in.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/pci.h> +#include <linux/if_vlan.h> +#include <linux/ip.h> +#include <linux/delay.h> +#include <linux/crc32.h> +#include <linux/dma-mapping.h> +#include <linux/debugfs.h> +#include <linux/sched.h> +#include <linux/seq_file.h> +#include <linux/mii.h> +#include <linux/slab.h> +#include <linux/dmi.h> +#include <linux/prefetch.h> +#include <asm/irq.h> + +#include "skge.h" + +#define DRV_NAME "skge" +#define DRV_VERSION "1.14" + +#define DEFAULT_TX_RING_SIZE 128 +#define DEFAULT_RX_RING_SIZE 512 +#define MAX_TX_RING_SIZE 1024 +#define TX_LOW_WATER (MAX_SKB_FRAGS + 1) +#define MAX_RX_RING_SIZE 4096 +#define RX_COPY_THRESHOLD 128 +#define RX_BUF_SIZE 1536 +#define PHY_RETRIES 1000 +#define ETH_JUMBO_MTU 9000 +#define TX_WATCHDOG (5 * HZ) +#define NAPI_WEIGHT 64 +#define BLINK_MS 250 +#define LINK_HZ HZ + +#define SKGE_EEPROM_MAGIC 0x9933aabb + + +MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver"); +MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static const u32 default_msg = (NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFUP | + NETIF_MSG_IFDOWN); + +static int debug = -1; /* defaults above */ +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +static const struct pci_device_id skge_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x1700) }, /* 3Com 3C940 */ + { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x80EB) }, /* 3Com 3C940B */ +#ifdef CONFIG_SKGE_GENESIS + { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4300) }, /* SK-9xx */ +#endif + { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4320) }, /* SK-98xx V2.0 */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* D-Link DGE-530T (rev.B) */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4c00) }, /* D-Link DGE-530T */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302) }, /* D-Link DGE-530T Rev C1 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, /* Marvell Yukon 88E8001/8003/8010 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */ + { PCI_DEVICE(PCI_VENDOR_ID_CNET, 0x434E) }, /* CNet PowerG-2000 */ + { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, 0x1064) }, /* Linksys EG1064 v2 */ + { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015 }, /* Linksys EG1032 v2 */ + { 0 } +}; +MODULE_DEVICE_TABLE(pci, skge_id_table); + +static int skge_up(struct net_device *dev); +static int skge_down(struct net_device *dev); +static void skge_phy_reset(struct skge_port *skge); +static void skge_tx_clean(struct net_device *dev); +static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); +static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); +static void genesis_get_stats(struct skge_port *skge, u64 *data); +static void yukon_get_stats(struct skge_port *skge, u64 *data); +static void yukon_init(struct skge_hw *hw, int port); +static void genesis_mac_init(struct skge_hw *hw, int port); +static void genesis_link_up(struct skge_port *skge); +static void skge_set_multicast(struct net_device *dev); +static irqreturn_t skge_intr(int irq, void *dev_id); + +/* Avoid conditionals by using array */ +static const int txqaddr[] = { Q_XA1, Q_XA2 }; +static const int rxqaddr[] = { Q_R1, Q_R2 }; +static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; +static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; +static const u32 napimask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F }; +static const u32 portmask[] = { IS_PORT_1, IS_PORT_2 }; + +static inline bool is_genesis(const struct skge_hw *hw) +{ +#ifdef CONFIG_SKGE_GENESIS + return hw->chip_id == CHIP_ID_GENESIS; +#else + return false; +#endif +} + +static int skge_get_regs_len(struct net_device *dev) +{ + return 0x4000; +} + +/* + * Returns copy of whole control register region + * Note: skip RAM address register because accessing it will + * cause bus hangs! + */ +static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + const struct skge_port *skge = netdev_priv(dev); + const void __iomem *io = skge->hw->regs; + + regs->version = 1; + memset(p, 0, regs->len); + memcpy_fromio(p, io, B3_RAM_ADDR); + + if (regs->len > B3_RI_WTO_R1) { + memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, + regs->len - B3_RI_WTO_R1); + } +} + +/* Wake on Lan only supported on Yukon chips with rev 1 or above */ +static u32 wol_supported(const struct skge_hw *hw) +{ + if (is_genesis(hw)) + return 0; + + if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0) + return 0; + + return WAKE_MAGIC | WAKE_PHY; +} + +static void skge_wol_init(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 ctrl; + + skge_write16(hw, B0_CTST, CS_RST_CLR); + skge_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR); + + /* Turn on Vaux */ + skge_write8(hw, B0_POWER_CTRL, + PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); + + /* WA code for COMA mode -- clear PHY reset */ + if (hw->chip_id == CHIP_ID_YUKON_LITE && + hw->chip_rev >= CHIP_REV_YU_LITE_A3) { + u32 reg = skge_read32(hw, B2_GP_IO); + reg |= GP_DIR_9; + reg &= ~GP_IO_9; + skge_write32(hw, B2_GP_IO, reg); + } + + skge_write32(hw, SK_REG(port, GPHY_CTRL), + GPC_DIS_SLEEP | + GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 | + GPC_ANEG_1 | GPC_RST_SET); + + skge_write32(hw, SK_REG(port, GPHY_CTRL), + GPC_DIS_SLEEP | + GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 | + GPC_ANEG_1 | GPC_RST_CLR); + + skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); + + /* Force to 10/100 skge_reset will re-enable on resume */ + gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, + (PHY_AN_100FULL | PHY_AN_100HALF | + PHY_AN_10FULL | PHY_AN_10HALF | PHY_AN_CSMA)); + /* no 1000 HD/FD */ + gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, 0); + gm_phy_write(hw, port, PHY_MARV_CTRL, + PHY_CT_RESET | PHY_CT_SPS_LSB | PHY_CT_ANE | + PHY_CT_RE_CFG | PHY_CT_DUP_MD); + + + /* Set GMAC to no flow control and auto update for speed/duplex */ + gma_write16(hw, port, GM_GP_CTRL, + GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA| + GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS); + + /* Set WOL address */ + memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR), + skge->netdev->dev_addr, ETH_ALEN); + + /* Turn on appropriate WOL control bits */ + skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT); + ctrl = 0; + if (skge->wol & WAKE_PHY) + ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT; + else + ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT; + + if (skge->wol & WAKE_MAGIC) + ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT; + else + ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT; + + ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT; + skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl); + + /* block receiver */ + skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); +} + +static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct skge_port *skge = netdev_priv(dev); + + wol->supported = wol_supported(skge->hw); + wol->wolopts = skge->wol; +} + +static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + + if ((wol->wolopts & ~wol_supported(hw)) || + !device_can_wakeup(&hw->pdev->dev)) + return -EOPNOTSUPP; + + skge->wol = wol->wolopts; + + device_set_wakeup_enable(&hw->pdev->dev, skge->wol); + + return 0; +} + +/* Determine supported/advertised modes based on hardware. + * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx + */ +static u32 skge_supported_modes(const struct skge_hw *hw) +{ + u32 supported; + + if (hw->copper) { + supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP); + + if (is_genesis(hw)) + supported &= ~(SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full); + + else if (hw->chip_id == CHIP_ID_YUKON) + supported &= ~SUPPORTED_1000baseT_Half; + } else + supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + return supported; +} + +static int skge_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + u32 supported, advertising; + + supported = skge_supported_modes(hw); + + if (hw->copper) { + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy_addr; + } else + cmd->base.port = PORT_FIBRE; + + advertising = skge->advertising; + cmd->base.autoneg = skge->autoneg; + cmd->base.speed = skge->speed; + cmd->base.duplex = skge->duplex; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + + return 0; +} + +static int skge_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + struct skge_port *skge = netdev_priv(dev); + const struct skge_hw *hw = skge->hw; + u32 supported = skge_supported_modes(hw); + int err = 0; + u32 advertising; + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + advertising = supported; + skge->duplex = -1; + skge->speed = -1; + } else { + u32 setting; + u32 speed = cmd->base.speed; + + switch (speed) { + case SPEED_1000: + if (cmd->base.duplex == DUPLEX_FULL) + setting = SUPPORTED_1000baseT_Full; + else if (cmd->base.duplex == DUPLEX_HALF) + setting = SUPPORTED_1000baseT_Half; + else + return -EINVAL; + break; + case SPEED_100: + if (cmd->base.duplex == DUPLEX_FULL) + setting = SUPPORTED_100baseT_Full; + else if (cmd->base.duplex == DUPLEX_HALF) + setting = SUPPORTED_100baseT_Half; + else + return -EINVAL; + break; + + case SPEED_10: + if (cmd->base.duplex == DUPLEX_FULL) + setting = SUPPORTED_10baseT_Full; + else if (cmd->base.duplex == DUPLEX_HALF) + setting = SUPPORTED_10baseT_Half; + else + return -EINVAL; + break; + default: + return -EINVAL; + } + + if ((setting & supported) == 0) + return -EINVAL; + + skge->speed = speed; + skge->duplex = cmd->base.duplex; + } + + skge->autoneg = cmd->base.autoneg; + skge->advertising = advertising; + + if (netif_running(dev)) { + skge_down(dev); + err = skge_up(dev); + if (err) { + dev_close(dev); + return err; + } + } + + return 0; +} + +static void skge_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct skge_port *skge = netdev_priv(dev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(skge->hw->pdev), + sizeof(info->bus_info)); +} + +static const struct skge_stat { + char name[ETH_GSTRING_LEN]; + u16 xmac_offset; + u16 gma_offset; +} skge_stats[] = { + { "tx_bytes", XM_TXO_OK_HI, GM_TXO_OK_HI }, + { "rx_bytes", XM_RXO_OK_HI, GM_RXO_OK_HI }, + + { "tx_broadcast", XM_TXF_BC_OK, GM_TXF_BC_OK }, + { "rx_broadcast", XM_RXF_BC_OK, GM_RXF_BC_OK }, + { "tx_multicast", XM_TXF_MC_OK, GM_TXF_MC_OK }, + { "rx_multicast", XM_RXF_MC_OK, GM_RXF_MC_OK }, + { "tx_unicast", XM_TXF_UC_OK, GM_TXF_UC_OK }, + { "rx_unicast", XM_RXF_UC_OK, GM_RXF_UC_OK }, + { "tx_mac_pause", XM_TXF_MPAUSE, GM_TXF_MPAUSE }, + { "rx_mac_pause", XM_RXF_MPAUSE, GM_RXF_MPAUSE }, + + { "collisions", XM_TXF_SNG_COL, GM_TXF_SNG_COL }, + { "multi_collisions", XM_TXF_MUL_COL, GM_TXF_MUL_COL }, + { "aborted", XM_TXF_ABO_COL, GM_TXF_ABO_COL }, + { "late_collision", XM_TXF_LAT_COL, GM_TXF_LAT_COL }, + { "fifo_underrun", XM_TXE_FIFO_UR, GM_TXE_FIFO_UR }, + { "fifo_overflow", XM_RXE_FIFO_OV, GM_RXE_FIFO_OV }, + + { "rx_toolong", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR }, + { "rx_jabber", XM_RXF_JAB_PKT, GM_RXF_JAB_PKT }, + { "rx_runt", XM_RXE_RUNT, GM_RXE_FRAG }, + { "rx_too_long", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR }, + { "rx_fcs_error", XM_RXF_FCS_ERR, GM_RXF_FCS_ERR }, +}; + +static int skge_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(skge_stats); + default: + return -EOPNOTSUPP; + } +} + +static void skge_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct skge_port *skge = netdev_priv(dev); + + if (is_genesis(skge->hw)) + genesis_get_stats(skge, data); + else + yukon_get_stats(skge, data); +} + +/* Use hardware MIB variables for critical path statistics and + * transmit feedback not reported at interrupt. + * Other errors are accounted for in interrupt handler. + */ +static struct net_device_stats *skge_get_stats(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + u64 data[ARRAY_SIZE(skge_stats)]; + + if (is_genesis(skge->hw)) + genesis_get_stats(skge, data); + else + yukon_get_stats(skge, data); + + dev->stats.tx_bytes = data[0]; + dev->stats.rx_bytes = data[1]; + dev->stats.tx_packets = data[2] + data[4] + data[6]; + dev->stats.rx_packets = data[3] + data[5] + data[7]; + dev->stats.multicast = data[3] + data[5]; + dev->stats.collisions = data[10]; + dev->stats.tx_aborted_errors = data[12]; + + return &dev->stats; +} + +static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(skge_stats); i++) + memcpy(data + i * ETH_GSTRING_LEN, + skge_stats[i].name, ETH_GSTRING_LEN); + break; + } +} + +static void skge_get_ring_param(struct net_device *dev, + struct ethtool_ringparam *p) +{ + struct skge_port *skge = netdev_priv(dev); + + p->rx_max_pending = MAX_RX_RING_SIZE; + p->tx_max_pending = MAX_TX_RING_SIZE; + + p->rx_pending = skge->rx_ring.count; + p->tx_pending = skge->tx_ring.count; +} + +static int skge_set_ring_param(struct net_device *dev, + struct ethtool_ringparam *p) +{ + struct skge_port *skge = netdev_priv(dev); + int err = 0; + + if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE || + p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE) + return -EINVAL; + + skge->rx_ring.count = p->rx_pending; + skge->tx_ring.count = p->tx_pending; + + if (netif_running(dev)) { + skge_down(dev); + err = skge_up(dev); + if (err) + dev_close(dev); + } + + return err; +} + +static u32 skge_get_msglevel(struct net_device *netdev) +{ + struct skge_port *skge = netdev_priv(netdev); + return skge->msg_enable; +} + +static void skge_set_msglevel(struct net_device *netdev, u32 value) +{ + struct skge_port *skge = netdev_priv(netdev); + skge->msg_enable = value; +} + +static int skge_nway_reset(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + + if (skge->autoneg != AUTONEG_ENABLE || !netif_running(dev)) + return -EINVAL; + + skge_phy_reset(skge); + return 0; +} + +static void skge_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + + ecmd->rx_pause = ((skge->flow_control == FLOW_MODE_SYMMETRIC) || + (skge->flow_control == FLOW_MODE_SYM_OR_REM)); + ecmd->tx_pause = (ecmd->rx_pause || + (skge->flow_control == FLOW_MODE_LOC_SEND)); + + ecmd->autoneg = ecmd->rx_pause || ecmd->tx_pause; +} + +static int skge_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + struct ethtool_pauseparam old; + int err = 0; + + skge_get_pauseparam(dev, &old); + + if (ecmd->autoneg != old.autoneg) + skge->flow_control = ecmd->autoneg ? FLOW_MODE_NONE : FLOW_MODE_SYMMETRIC; + else { + if (ecmd->rx_pause && ecmd->tx_pause) + skge->flow_control = FLOW_MODE_SYMMETRIC; + else if (ecmd->rx_pause && !ecmd->tx_pause) + skge->flow_control = FLOW_MODE_SYM_OR_REM; + else if (!ecmd->rx_pause && ecmd->tx_pause) + skge->flow_control = FLOW_MODE_LOC_SEND; + else + skge->flow_control = FLOW_MODE_NONE; + } + + if (netif_running(dev)) { + skge_down(dev); + err = skge_up(dev); + if (err) { + dev_close(dev); + return err; + } + } + + return 0; +} + +/* Chip internal frequency for clock calculations */ +static inline u32 hwkhz(const struct skge_hw *hw) +{ + return is_genesis(hw) ? 53125 : 78125; +} + +/* Chip HZ to microseconds */ +static inline u32 skge_clk2usec(const struct skge_hw *hw, u32 ticks) +{ + return (ticks * 1000) / hwkhz(hw); +} + +/* Microseconds to chip HZ */ +static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec) +{ + return hwkhz(hw) * usec / 1000; +} + +static int skge_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + + ecmd->rx_coalesce_usecs = 0; + ecmd->tx_coalesce_usecs = 0; + + if (skge_read32(hw, B2_IRQM_CTRL) & TIM_START) { + u32 delay = skge_clk2usec(hw, skge_read32(hw, B2_IRQM_INI)); + u32 msk = skge_read32(hw, B2_IRQM_MSK); + + if (msk & rxirqmask[port]) + ecmd->rx_coalesce_usecs = delay; + if (msk & txirqmask[port]) + ecmd->tx_coalesce_usecs = delay; + } + + return 0; +} + +/* Note: interrupt timer is per board, but can turn on/off per port */ +static int skge_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + u32 msk = skge_read32(hw, B2_IRQM_MSK); + u32 delay = 25; + + if (ecmd->rx_coalesce_usecs == 0) + msk &= ~rxirqmask[port]; + else if (ecmd->rx_coalesce_usecs < 25 || + ecmd->rx_coalesce_usecs > 33333) + return -EINVAL; + else { + msk |= rxirqmask[port]; + delay = ecmd->rx_coalesce_usecs; + } + + if (ecmd->tx_coalesce_usecs == 0) + msk &= ~txirqmask[port]; + else if (ecmd->tx_coalesce_usecs < 25 || + ecmd->tx_coalesce_usecs > 33333) + return -EINVAL; + else { + msk |= txirqmask[port]; + delay = min(delay, ecmd->rx_coalesce_usecs); + } + + skge_write32(hw, B2_IRQM_MSK, msk); + if (msk == 0) + skge_write32(hw, B2_IRQM_CTRL, TIM_STOP); + else { + skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, delay)); + skge_write32(hw, B2_IRQM_CTRL, TIM_START); + } + return 0; +} + +enum led_mode { LED_MODE_OFF, LED_MODE_ON, LED_MODE_TST }; +static void skge_led(struct skge_port *skge, enum led_mode mode) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + + spin_lock_bh(&hw->phy_lock); + if (is_genesis(hw)) { + switch (mode) { + case LED_MODE_OFF: + if (hw->phy_type == SK_PHY_BCOM) + xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_OFF); + else { + skge_write32(hw, SK_REG(port, TX_LED_VAL), 0); + skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_T_OFF); + } + skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF); + skge_write32(hw, SK_REG(port, RX_LED_VAL), 0); + skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_T_OFF); + break; + + case LED_MODE_ON: + skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON); + skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON); + + skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START); + skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START); + + break; + + case LED_MODE_TST: + skge_write8(hw, SK_REG(port, RX_LED_TST), LED_T_ON); + skge_write32(hw, SK_REG(port, RX_LED_VAL), 100); + skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START); + + if (hw->phy_type == SK_PHY_BCOM) + xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_ON); + else { + skge_write8(hw, SK_REG(port, TX_LED_TST), LED_T_ON); + skge_write32(hw, SK_REG(port, TX_LED_VAL), 100); + skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START); + } + + } + } else { + switch (mode) { + case LED_MODE_OFF: + gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); + gm_phy_write(hw, port, PHY_MARV_LED_OVER, + PHY_M_LED_MO_DUP(MO_LED_OFF) | + PHY_M_LED_MO_10(MO_LED_OFF) | + PHY_M_LED_MO_100(MO_LED_OFF) | + PHY_M_LED_MO_1000(MO_LED_OFF) | + PHY_M_LED_MO_RX(MO_LED_OFF)); + break; + case LED_MODE_ON: + gm_phy_write(hw, port, PHY_MARV_LED_CTRL, + PHY_M_LED_PULS_DUR(PULS_170MS) | + PHY_M_LED_BLINK_RT(BLINK_84MS) | + PHY_M_LEDC_TX_CTRL | + PHY_M_LEDC_DP_CTRL); + + gm_phy_write(hw, port, PHY_MARV_LED_OVER, + PHY_M_LED_MO_RX(MO_LED_OFF) | + (skge->speed == SPEED_100 ? + PHY_M_LED_MO_100(MO_LED_ON) : 0)); + break; + case LED_MODE_TST: + gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); + gm_phy_write(hw, port, PHY_MARV_LED_OVER, + PHY_M_LED_MO_DUP(MO_LED_ON) | + PHY_M_LED_MO_10(MO_LED_ON) | + PHY_M_LED_MO_100(MO_LED_ON) | + PHY_M_LED_MO_1000(MO_LED_ON) | + PHY_M_LED_MO_RX(MO_LED_ON)); + } + } + spin_unlock_bh(&hw->phy_lock); +} + +/* blink LED's for finding board */ +static int skge_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct skge_port *skge = netdev_priv(dev); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 2; /* cycle on/off twice per second */ + + case ETHTOOL_ID_ON: + skge_led(skge, LED_MODE_TST); + break; + + case ETHTOOL_ID_OFF: + skge_led(skge, LED_MODE_OFF); + break; + + case ETHTOOL_ID_INACTIVE: + /* back to regular LED state */ + skge_led(skge, netif_running(dev) ? LED_MODE_ON : LED_MODE_OFF); + } + + return 0; +} + +static int skge_get_eeprom_len(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + u32 reg2; + + pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, ®2); + return 1 << (((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); +} + +static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset) +{ + u32 val; + + pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset); + + do { + pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); + } while (!(offset & PCI_VPD_ADDR_F)); + + pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val); + return val; +} + +static void skge_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val) +{ + pci_write_config_dword(pdev, cap + PCI_VPD_DATA, val); + pci_write_config_word(pdev, cap + PCI_VPD_ADDR, + offset | PCI_VPD_ADDR_F); + + do { + pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); + } while (offset & PCI_VPD_ADDR_F); +} + +static int skge_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct skge_port *skge = netdev_priv(dev); + struct pci_dev *pdev = skge->hw->pdev; + int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD); + int length = eeprom->len; + u16 offset = eeprom->offset; + + if (!cap) + return -EINVAL; + + eeprom->magic = SKGE_EEPROM_MAGIC; + + while (length > 0) { + u32 val = skge_vpd_read(pdev, cap, offset); + int n = min_t(int, length, sizeof(val)); + + memcpy(data, &val, n); + length -= n; + data += n; + offset += n; + } + return 0; +} + +static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct skge_port *skge = netdev_priv(dev); + struct pci_dev *pdev = skge->hw->pdev; + int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD); + int length = eeprom->len; + u16 offset = eeprom->offset; + + if (!cap) + return -EINVAL; + + if (eeprom->magic != SKGE_EEPROM_MAGIC) + return -EINVAL; + + while (length > 0) { + u32 val; + int n = min_t(int, length, sizeof(val)); + + if (n < sizeof(val)) + val = skge_vpd_read(pdev, cap, offset); + memcpy(&val, data, n); + + skge_vpd_write(pdev, cap, offset, val); + + length -= n; + data += n; + offset += n; + } + return 0; +} + +static const struct ethtool_ops skge_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, + .get_drvinfo = skge_get_drvinfo, + .get_regs_len = skge_get_regs_len, + .get_regs = skge_get_regs, + .get_wol = skge_get_wol, + .set_wol = skge_set_wol, + .get_msglevel = skge_get_msglevel, + .set_msglevel = skge_set_msglevel, + .nway_reset = skge_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = skge_get_eeprom_len, + .get_eeprom = skge_get_eeprom, + .set_eeprom = skge_set_eeprom, + .get_ringparam = skge_get_ring_param, + .set_ringparam = skge_set_ring_param, + .get_pauseparam = skge_get_pauseparam, + .set_pauseparam = skge_set_pauseparam, + .get_coalesce = skge_get_coalesce, + .set_coalesce = skge_set_coalesce, + .get_strings = skge_get_strings, + .set_phys_id = skge_set_phys_id, + .get_sset_count = skge_get_sset_count, + .get_ethtool_stats = skge_get_ethtool_stats, + .get_link_ksettings = skge_get_link_ksettings, + .set_link_ksettings = skge_set_link_ksettings, +}; + +/* + * Allocate ring elements and chain them together + * One-to-one association of board descriptors with ring elements + */ +static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base) +{ + struct skge_tx_desc *d; + struct skge_element *e; + int i; + + ring->start = kcalloc(ring->count, sizeof(*e), GFP_KERNEL); + if (!ring->start) + return -ENOMEM; + + for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) { + e->desc = d; + if (i == ring->count - 1) { + e->next = ring->start; + d->next_offset = base; + } else { + e->next = e + 1; + d->next_offset = base + (i+1) * sizeof(*d); + } + } + ring->to_use = ring->to_clean = ring->start; + + return 0; +} + +/* Allocate and setup a new buffer for receiving */ +static int skge_rx_setup(struct skge_port *skge, struct skge_element *e, + struct sk_buff *skb, unsigned int bufsize) +{ + struct skge_rx_desc *rd = e->desc; + dma_addr_t map; + + map = dma_map_single(&skge->hw->pdev->dev, skb->data, bufsize, + DMA_FROM_DEVICE); + + if (dma_mapping_error(&skge->hw->pdev->dev, map)) + return -1; + + rd->dma_lo = lower_32_bits(map); + rd->dma_hi = upper_32_bits(map); + e->skb = skb; + rd->csum1_start = ETH_HLEN; + rd->csum2_start = ETH_HLEN; + rd->csum1 = 0; + rd->csum2 = 0; + + wmb(); + + rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; + dma_unmap_addr_set(e, mapaddr, map); + dma_unmap_len_set(e, maplen, bufsize); + return 0; +} + +/* Resume receiving using existing skb, + * Note: DMA address is not changed by chip. + * MTU not changed while receiver active. + */ +static inline void skge_rx_reuse(struct skge_element *e, unsigned int size) +{ + struct skge_rx_desc *rd = e->desc; + + rd->csum2 = 0; + rd->csum2_start = ETH_HLEN; + + wmb(); + + rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size; +} + + +/* Free all buffers in receive ring, assumes receiver stopped */ +static void skge_rx_clean(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + struct skge_ring *ring = &skge->rx_ring; + struct skge_element *e; + + e = ring->start; + do { + struct skge_rx_desc *rd = e->desc; + rd->control = 0; + if (e->skb) { + dma_unmap_single(&hw->pdev->dev, + dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + DMA_FROM_DEVICE); + dev_kfree_skb(e->skb); + e->skb = NULL; + } + } while ((e = e->next) != ring->start); +} + + +/* Allocate buffers for receive ring + * For receive: to_clean is next received frame. + */ +static int skge_rx_fill(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_ring *ring = &skge->rx_ring; + struct skge_element *e; + + e = ring->start; + do { + struct sk_buff *skb; + + skb = __netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN, + GFP_KERNEL); + if (!skb) + return -ENOMEM; + + skb_reserve(skb, NET_IP_ALIGN); + if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) { + dev_kfree_skb(skb); + return -EIO; + } + } while ((e = e->next) != ring->start); + + ring->to_clean = ring->start; + return 0; +} + +static const char *skge_pause(enum pause_status status) +{ + switch (status) { + case FLOW_STAT_NONE: + return "none"; + case FLOW_STAT_REM_SEND: + return "rx only"; + case FLOW_STAT_LOC_SEND: + return "tx_only"; + case FLOW_STAT_SYMMETRIC: /* Both station may send PAUSE */ + return "both"; + default: + return "indeterminated"; + } +} + + +static void skge_link_up(struct skge_port *skge) +{ + skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), + LED_BLK_OFF|LED_SYNC_OFF|LED_REG_ON); + + netif_carrier_on(skge->netdev); + netif_wake_queue(skge->netdev); + + netif_info(skge, link, skge->netdev, + "Link is up at %d Mbps, %s duplex, flow control %s\n", + skge->speed, + skge->duplex == DUPLEX_FULL ? "full" : "half", + skge_pause(skge->flow_status)); +} + +static void skge_link_down(struct skge_port *skge) +{ + skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_REG_OFF); + netif_carrier_off(skge->netdev); + netif_stop_queue(skge->netdev); + + netif_info(skge, link, skge->netdev, "Link is down\n"); +} + +static void xm_link_down(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + struct skge_port *skge = netdev_priv(dev); + + xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); + + if (netif_carrier_ok(dev)) + skge_link_down(skge); +} + +static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) +{ + int i; + + xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); + *val = xm_read16(hw, port, XM_PHY_DATA); + + if (hw->phy_type == SK_PHY_XMAC) + goto ready; + + for (i = 0; i < PHY_RETRIES; i++) { + if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY) + goto ready; + udelay(1); + } + + return -ETIMEDOUT; + ready: + *val = xm_read16(hw, port, XM_PHY_DATA); + + return 0; +} + +static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg) +{ + u16 v = 0; + if (__xm_phy_read(hw, port, reg, &v)) + pr_warn("%s: phy read timed out\n", hw->dev[port]->name); + return v; +} + +static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) +{ + int i; + + xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); + for (i = 0; i < PHY_RETRIES; i++) { + if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) + goto ready; + udelay(1); + } + return -EIO; + + ready: + xm_write16(hw, port, XM_PHY_DATA, val); + for (i = 0; i < PHY_RETRIES; i++) { + if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) + return 0; + udelay(1); + } + return -ETIMEDOUT; +} + +static void genesis_init(struct skge_hw *hw) +{ + /* set blink source counter */ + skge_write32(hw, B2_BSC_INI, (SK_BLK_DUR * SK_FACT_53) / 100); + skge_write8(hw, B2_BSC_CTRL, BSC_START); + + /* configure mac arbiter */ + skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); + + /* configure mac arbiter timeout values */ + skge_write8(hw, B3_MA_TOINI_RX1, SK_MAC_TO_53); + skge_write8(hw, B3_MA_TOINI_RX2, SK_MAC_TO_53); + skge_write8(hw, B3_MA_TOINI_TX1, SK_MAC_TO_53); + skge_write8(hw, B3_MA_TOINI_TX2, SK_MAC_TO_53); + + skge_write8(hw, B3_MA_RCINI_RX1, 0); + skge_write8(hw, B3_MA_RCINI_RX2, 0); + skge_write8(hw, B3_MA_RCINI_TX1, 0); + skge_write8(hw, B3_MA_RCINI_TX2, 0); + + /* configure packet arbiter timeout */ + skge_write16(hw, B3_PA_CTRL, PA_RST_CLR); + skge_write16(hw, B3_PA_TOINI_RX1, SK_PKT_TO_MAX); + skge_write16(hw, B3_PA_TOINI_TX1, SK_PKT_TO_MAX); + skge_write16(hw, B3_PA_TOINI_RX2, SK_PKT_TO_MAX); + skge_write16(hw, B3_PA_TOINI_TX2, SK_PKT_TO_MAX); +} + +static void genesis_reset(struct skge_hw *hw, int port) +{ + static const u8 zero[8] = { 0 }; + u32 reg; + + skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); + + /* reset the statistics module */ + xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT); + xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); + xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */ + xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */ + xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */ + + /* disable Broadcom PHY IRQ */ + if (hw->phy_type == SK_PHY_BCOM) + xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff); + + xm_outhash(hw, port, XM_HSM, zero); + + /* Flush TX and RX fifo */ + reg = xm_read32(hw, port, XM_MODE); + xm_write32(hw, port, XM_MODE, reg | XM_MD_FTF); + xm_write32(hw, port, XM_MODE, reg | XM_MD_FRF); +} + +/* Convert mode to MII values */ +static const u16 phy_pause_map[] = { + [FLOW_MODE_NONE] = 0, + [FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM, + [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP, + [FLOW_MODE_SYM_OR_REM] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM, +}; + +/* special defines for FIBER (88E1011S only) */ +static const u16 fiber_pause_map[] = { + [FLOW_MODE_NONE] = PHY_X_P_NO_PAUSE, + [FLOW_MODE_LOC_SEND] = PHY_X_P_ASYM_MD, + [FLOW_MODE_SYMMETRIC] = PHY_X_P_SYM_MD, + [FLOW_MODE_SYM_OR_REM] = PHY_X_P_BOTH_MD, +}; + + +/* Check status of Broadcom phy link */ +static void bcom_check_link(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + struct skge_port *skge = netdev_priv(dev); + u16 status; + + /* read twice because of latch */ + xm_phy_read(hw, port, PHY_BCOM_STAT); + status = xm_phy_read(hw, port, PHY_BCOM_STAT); + + if ((status & PHY_ST_LSYNC) == 0) { + xm_link_down(hw, port); + return; + } + + if (skge->autoneg == AUTONEG_ENABLE) { + u16 lpa, aux; + + if (!(status & PHY_ST_AN_OVER)) + return; + + lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); + if (lpa & PHY_B_AN_RF) { + netdev_notice(dev, "remote fault\n"); + return; + } + + aux = xm_phy_read(hw, port, PHY_BCOM_AUX_STAT); + + /* Check Duplex mismatch */ + switch (aux & PHY_B_AS_AN_RES_MSK) { + case PHY_B_RES_1000FD: + skge->duplex = DUPLEX_FULL; + break; + case PHY_B_RES_1000HD: + skge->duplex = DUPLEX_HALF; + break; + default: + netdev_notice(dev, "duplex mismatch\n"); + return; + } + + /* We are using IEEE 802.3z/D5.0 Table 37-4 */ + switch (aux & PHY_B_AS_PAUSE_MSK) { + case PHY_B_AS_PAUSE_MSK: + skge->flow_status = FLOW_STAT_SYMMETRIC; + break; + case PHY_B_AS_PRR: + skge->flow_status = FLOW_STAT_REM_SEND; + break; + case PHY_B_AS_PRT: + skge->flow_status = FLOW_STAT_LOC_SEND; + break; + default: + skge->flow_status = FLOW_STAT_NONE; + } + skge->speed = SPEED_1000; + } + + if (!netif_carrier_ok(dev)) + genesis_link_up(skge); +} + +/* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional + * Phy on for 100 or 10Mbit operation + */ +static void bcom_phy_init(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + int i; + u16 id1, r, ext, ctl; + + /* magic workaround patterns for Broadcom */ + static const struct { + u16 reg; + u16 val; + } A1hack[] = { + { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, + { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 }, + { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 }, + { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, + }, C0hack[] = { + { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 }, + { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 }, + }; + + /* read Id from external PHY (all have the same address) */ + id1 = xm_phy_read(hw, port, PHY_XMAC_ID1); + + /* Optimize MDIO transfer by suppressing preamble. */ + r = xm_read16(hw, port, XM_MMU_CMD); + r |= XM_MMU_NO_PRE; + xm_write16(hw, port, XM_MMU_CMD, r); + + switch (id1) { + case PHY_BCOM_ID1_C0: + /* + * Workaround BCOM Errata for the C0 type. + * Write magic patterns to reserved registers. + */ + for (i = 0; i < ARRAY_SIZE(C0hack); i++) + xm_phy_write(hw, port, + C0hack[i].reg, C0hack[i].val); + + break; + case PHY_BCOM_ID1_A1: + /* + * Workaround BCOM Errata for the A1 type. + * Write magic patterns to reserved registers. + */ + for (i = 0; i < ARRAY_SIZE(A1hack); i++) + xm_phy_write(hw, port, + A1hack[i].reg, A1hack[i].val); + break; + } + + /* + * Workaround BCOM Errata (#10523) for all BCom PHYs. + * Disable Power Management after reset. + */ + r = xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL); + r |= PHY_B_AC_DIS_PM; + xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r); + + /* Dummy read */ + xm_read16(hw, port, XM_ISRC); + + ext = PHY_B_PEC_EN_LTR; /* enable tx led */ + ctl = PHY_CT_SP1000; /* always 1000mbit */ + + if (skge->autoneg == AUTONEG_ENABLE) { + /* + * Workaround BCOM Errata #1 for the C5 type. + * 1000Base-T Link Acquisition Failure in Slave Mode + * Set Repeater/DTE bit 10 of the 1000Base-T Control Register + */ + u16 adv = PHY_B_1000C_RD; + if (skge->advertising & ADVERTISED_1000baseT_Half) + adv |= PHY_B_1000C_AHD; + if (skge->advertising & ADVERTISED_1000baseT_Full) + adv |= PHY_B_1000C_AFD; + xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, adv); + + ctl |= PHY_CT_ANE | PHY_CT_RE_CFG; + } else { + if (skge->duplex == DUPLEX_FULL) + ctl |= PHY_CT_DUP_MD; + /* Force to slave */ + xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, PHY_B_1000C_MSE); + } + + /* Set autonegotiation pause parameters */ + xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV, + phy_pause_map[skge->flow_control] | PHY_AN_CSMA); + + /* Handle Jumbo frames */ + if (hw->dev[port]->mtu > ETH_DATA_LEN) { + xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, + PHY_B_AC_TX_TST | PHY_B_AC_LONG_PACK); + + ext |= PHY_B_PEC_HIGH_LA; + + } + + xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext); + xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl); + + /* Use link status change interrupt */ + xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); +} + +static void xm_phy_init(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 ctrl = 0; + + if (skge->autoneg == AUTONEG_ENABLE) { + if (skge->advertising & ADVERTISED_1000baseT_Half) + ctrl |= PHY_X_AN_HD; + if (skge->advertising & ADVERTISED_1000baseT_Full) + ctrl |= PHY_X_AN_FD; + + ctrl |= fiber_pause_map[skge->flow_control]; + + xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl); + + /* Restart Auto-negotiation */ + ctrl = PHY_CT_ANE | PHY_CT_RE_CFG; + } else { + /* Set DuplexMode in Config register */ + if (skge->duplex == DUPLEX_FULL) + ctrl |= PHY_CT_DUP_MD; + /* + * Do NOT enable Auto-negotiation here. This would hold + * the link down because no IDLEs are transmitted + */ + } + + xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl); + + /* Poll PHY for status changes */ + mod_timer(&skge->link_timer, jiffies + LINK_HZ); +} + +static int xm_check_link(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 status; + + /* read twice because of latch */ + xm_phy_read(hw, port, PHY_XMAC_STAT); + status = xm_phy_read(hw, port, PHY_XMAC_STAT); + + if ((status & PHY_ST_LSYNC) == 0) { + xm_link_down(hw, port); + return 0; + } + + if (skge->autoneg == AUTONEG_ENABLE) { + u16 lpa, res; + + if (!(status & PHY_ST_AN_OVER)) + return 0; + + lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); + if (lpa & PHY_B_AN_RF) { + netdev_notice(dev, "remote fault\n"); + return 0; + } + + res = xm_phy_read(hw, port, PHY_XMAC_RES_ABI); + + /* Check Duplex mismatch */ + switch (res & (PHY_X_RS_HD | PHY_X_RS_FD)) { + case PHY_X_RS_FD: + skge->duplex = DUPLEX_FULL; + break; + case PHY_X_RS_HD: + skge->duplex = DUPLEX_HALF; + break; + default: + netdev_notice(dev, "duplex mismatch\n"); + return 0; + } + + /* We are using IEEE 802.3z/D5.0 Table 37-4 */ + if ((skge->flow_control == FLOW_MODE_SYMMETRIC || + skge->flow_control == FLOW_MODE_SYM_OR_REM) && + (lpa & PHY_X_P_SYM_MD)) + skge->flow_status = FLOW_STAT_SYMMETRIC; + else if (skge->flow_control == FLOW_MODE_SYM_OR_REM && + (lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD) + /* Enable PAUSE receive, disable PAUSE transmit */ + skge->flow_status = FLOW_STAT_REM_SEND; + else if (skge->flow_control == FLOW_MODE_LOC_SEND && + (lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD) + /* Disable PAUSE receive, enable PAUSE transmit */ + skge->flow_status = FLOW_STAT_LOC_SEND; + else + skge->flow_status = FLOW_STAT_NONE; + + skge->speed = SPEED_1000; + } + + if (!netif_carrier_ok(dev)) + genesis_link_up(skge); + return 1; +} + +/* Poll to check for link coming up. + * + * Since internal PHY is wired to a level triggered pin, can't + * get an interrupt when carrier is detected, need to poll for + * link coming up. + */ +static void xm_link_timer(struct timer_list *t) +{ + struct skge_port *skge = from_timer(skge, t, link_timer); + struct net_device *dev = skge->netdev; + struct skge_hw *hw = skge->hw; + int port = skge->port; + int i; + unsigned long flags; + + if (!netif_running(dev)) + return; + + spin_lock_irqsave(&hw->phy_lock, flags); + + /* + * Verify that the link by checking GPIO register three times. + * This pin has the signal from the link_sync pin connected to it. + */ + for (i = 0; i < 3; i++) { + if (xm_read16(hw, port, XM_GP_PORT) & XM_GP_INP_ASS) + goto link_down; + } + + /* Re-enable interrupt to detect link down */ + if (xm_check_link(dev)) { + u16 msk = xm_read16(hw, port, XM_IMSK); + msk &= ~XM_IS_INP_ASS; + xm_write16(hw, port, XM_IMSK, msk); + xm_read16(hw, port, XM_ISRC); + } else { +link_down: + mod_timer(&skge->link_timer, + round_jiffies(jiffies + LINK_HZ)); + } + spin_unlock_irqrestore(&hw->phy_lock, flags); +} + +static void genesis_mac_init(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + struct skge_port *skge = netdev_priv(dev); + int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN; + int i; + u32 r; + static const u8 zero[6] = { 0 }; + + for (i = 0; i < 10; i++) { + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), + MFF_SET_MAC_RST); + if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST) + goto reset_ok; + udelay(1); + } + + netdev_warn(dev, "genesis reset failed\n"); + + reset_ok: + /* Unreset the XMAC. */ + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); + + /* + * Perform additional initialization for external PHYs, + * namely for the 1000baseTX cards that use the XMAC's + * GMII mode. + */ + if (hw->phy_type != SK_PHY_XMAC) { + /* Take external Phy out of reset */ + r = skge_read32(hw, B2_GP_IO); + if (port == 0) + r |= GP_DIR_0|GP_IO_0; + else + r |= GP_DIR_2|GP_IO_2; + + skge_write32(hw, B2_GP_IO, r); + + /* Enable GMII interface */ + xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD); + } + + + switch (hw->phy_type) { + case SK_PHY_XMAC: + xm_phy_init(skge); + break; + case SK_PHY_BCOM: + bcom_phy_init(skge); + bcom_check_link(hw, port); + } + + /* Set Station Address */ + xm_outaddr(hw, port, XM_SA, dev->dev_addr); + + /* We don't use match addresses so clear */ + for (i = 1; i < 16; i++) + xm_outaddr(hw, port, XM_EXM(i), zero); + + /* Clear MIB counters */ + xm_write16(hw, port, XM_STAT_CMD, + XM_SC_CLR_RXC | XM_SC_CLR_TXC); + /* Clear two times according to Errata #3 */ + xm_write16(hw, port, XM_STAT_CMD, + XM_SC_CLR_RXC | XM_SC_CLR_TXC); + + /* configure Rx High Water Mark (XM_RX_HI_WM) */ + xm_write16(hw, port, XM_RX_HI_WM, 1450); + + /* We don't need the FCS appended to the packet. */ + r = XM_RX_LENERR_OK | XM_RX_STRIP_FCS; + if (jumbo) + r |= XM_RX_BIG_PK_OK; + + if (skge->duplex == DUPLEX_HALF) { + /* + * If in manual half duplex mode the other side might be in + * full duplex mode, so ignore if a carrier extension is not seen + * on frames received + */ + r |= XM_RX_DIS_CEXT; + } + xm_write16(hw, port, XM_RX_CMD, r); + + /* We want short frames padded to 60 bytes. */ + xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD); + + /* Increase threshold for jumbo frames on dual port */ + if (hw->ports > 1 && jumbo) + xm_write16(hw, port, XM_TX_THR, 1020); + else + xm_write16(hw, port, XM_TX_THR, 512); + + /* + * Enable the reception of all error frames. This is is + * a necessary evil due to the design of the XMAC. The + * XMAC's receive FIFO is only 8K in size, however jumbo + * frames can be up to 9000 bytes in length. When bad + * frame filtering is enabled, the XMAC's RX FIFO operates + * in 'store and forward' mode. For this to work, the + * entire frame has to fit into the FIFO, but that means + * that jumbo frames larger than 8192 bytes will be + * truncated. Disabling all bad frame filtering causes + * the RX FIFO to operate in streaming mode, in which + * case the XMAC will start transferring frames out of the + * RX FIFO as soon as the FIFO threshold is reached. + */ + xm_write32(hw, port, XM_MODE, XM_DEF_MODE); + + + /* + * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK) + * - Enable all bits excepting 'Octets Rx OK Low CntOv' + * and 'Octets Rx OK Hi Cnt Ov'. + */ + xm_write32(hw, port, XM_RX_EV_MSK, XMR_DEF_MSK); + + /* + * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK) + * - Enable all bits excepting 'Octets Tx OK Low CntOv' + * and 'Octets Tx OK Hi Cnt Ov'. + */ + xm_write32(hw, port, XM_TX_EV_MSK, XMT_DEF_MSK); + + /* Configure MAC arbiter */ + skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); + + /* configure timeout values */ + skge_write8(hw, B3_MA_TOINI_RX1, 72); + skge_write8(hw, B3_MA_TOINI_RX2, 72); + skge_write8(hw, B3_MA_TOINI_TX1, 72); + skge_write8(hw, B3_MA_TOINI_TX2, 72); + + skge_write8(hw, B3_MA_RCINI_RX1, 0); + skge_write8(hw, B3_MA_RCINI_RX2, 0); + skge_write8(hw, B3_MA_RCINI_TX1, 0); + skge_write8(hw, B3_MA_RCINI_TX2, 0); + + /* Configure Rx MAC FIFO */ + skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_CLR); + skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT); + skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD); + + /* Configure Tx MAC FIFO */ + skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_CLR); + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF); + skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD); + + if (jumbo) { + /* Enable frame flushing if jumbo frames used */ + skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_FLUSH); + } else { + /* enable timeout timers if normal frames */ + skge_write16(hw, B3_PA_CTRL, + (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2); + } +} + +static void genesis_stop(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + unsigned retries = 1000; + u16 cmd; + + /* Disable Tx and Rx */ + cmd = xm_read16(hw, port, XM_MMU_CMD); + cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); + xm_write16(hw, port, XM_MMU_CMD, cmd); + + genesis_reset(hw, port); + + /* Clear Tx packet arbiter timeout IRQ */ + skge_write16(hw, B3_PA_CTRL, + port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2); + + /* Reset the MAC */ + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); + do { + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST); + if (!(skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)) + break; + } while (--retries > 0); + + /* For external PHYs there must be special handling */ + if (hw->phy_type != SK_PHY_XMAC) { + u32 reg = skge_read32(hw, B2_GP_IO); + if (port == 0) { + reg |= GP_DIR_0; + reg &= ~GP_IO_0; + } else { + reg |= GP_DIR_2; + reg &= ~GP_IO_2; + } + skge_write32(hw, B2_GP_IO, reg); + skge_read32(hw, B2_GP_IO); + } + + xm_write16(hw, port, XM_MMU_CMD, + xm_read16(hw, port, XM_MMU_CMD) + & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX)); + + xm_read16(hw, port, XM_MMU_CMD); +} + + +static void genesis_get_stats(struct skge_port *skge, u64 *data) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + int i; + unsigned long timeout = jiffies + HZ; + + xm_write16(hw, port, + XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC); + + /* wait for update to complete */ + while (xm_read16(hw, port, XM_STAT_CMD) + & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) { + if (time_after(jiffies, timeout)) + break; + udelay(10); + } + + /* special case for 64 bit octet counter */ + data[0] = (u64) xm_read32(hw, port, XM_TXO_OK_HI) << 32 + | xm_read32(hw, port, XM_TXO_OK_LO); + data[1] = (u64) xm_read32(hw, port, XM_RXO_OK_HI) << 32 + | xm_read32(hw, port, XM_RXO_OK_LO); + + for (i = 2; i < ARRAY_SIZE(skge_stats); i++) + data[i] = xm_read32(hw, port, skge_stats[i].xmac_offset); +} + +static void genesis_mac_intr(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + struct skge_port *skge = netdev_priv(dev); + u16 status = xm_read16(hw, port, XM_ISRC); + + netif_printk(skge, intr, KERN_DEBUG, skge->netdev, + "mac interrupt status 0x%x\n", status); + + if (hw->phy_type == SK_PHY_XMAC && (status & XM_IS_INP_ASS)) { + xm_link_down(hw, port); + mod_timer(&skge->link_timer, jiffies + 1); + } + + if (status & XM_IS_TXF_UR) { + xm_write32(hw, port, XM_MODE, XM_MD_FTF); + ++dev->stats.tx_fifo_errors; + } +} + +static void genesis_link_up(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 cmd, msk; + u32 mode; + + cmd = xm_read16(hw, port, XM_MMU_CMD); + + /* + * enabling pause frame reception is required for 1000BT + * because the XMAC is not reset if the link is going down + */ + if (skge->flow_status == FLOW_STAT_NONE || + skge->flow_status == FLOW_STAT_LOC_SEND) + /* Disable Pause Frame Reception */ + cmd |= XM_MMU_IGN_PF; + else + /* Enable Pause Frame Reception */ + cmd &= ~XM_MMU_IGN_PF; + + xm_write16(hw, port, XM_MMU_CMD, cmd); + + mode = xm_read32(hw, port, XM_MODE); + if (skge->flow_status == FLOW_STAT_SYMMETRIC || + skge->flow_status == FLOW_STAT_LOC_SEND) { + /* + * Configure Pause Frame Generation + * Use internal and external Pause Frame Generation. + * Sending pause frames is edge triggered. + * Send a Pause frame with the maximum pause time if + * internal oder external FIFO full condition occurs. + * Send a zero pause time frame to re-start transmission. + */ + /* XM_PAUSE_DA = '010000C28001' (default) */ + /* XM_MAC_PTIME = 0xffff (maximum) */ + /* remember this value is defined in big endian (!) */ + xm_write16(hw, port, XM_MAC_PTIME, 0xffff); + + mode |= XM_PAUSE_MODE; + skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE); + } else { + /* + * disable pause frame generation is required for 1000BT + * because the XMAC is not reset if the link is going down + */ + /* Disable Pause Mode in Mode Register */ + mode &= ~XM_PAUSE_MODE; + + skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE); + } + + xm_write32(hw, port, XM_MODE, mode); + + /* Turn on detection of Tx underrun */ + msk = xm_read16(hw, port, XM_IMSK); + msk &= ~XM_IS_TXF_UR; + xm_write16(hw, port, XM_IMSK, msk); + + xm_read16(hw, port, XM_ISRC); + + /* get MMU Command Reg. */ + cmd = xm_read16(hw, port, XM_MMU_CMD); + if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL) + cmd |= XM_MMU_GMII_FD; + + /* + * Workaround BCOM Errata (#10523) for all BCom Phys + * Enable Power Management after link up + */ + if (hw->phy_type == SK_PHY_BCOM) { + xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, + xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL) + & ~PHY_B_AC_DIS_PM); + xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); + } + + /* enable Rx/Tx */ + xm_write16(hw, port, XM_MMU_CMD, + cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX); + skge_link_up(skge); +} + + +static inline void bcom_phy_intr(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 isrc; + + isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT); + netif_printk(skge, intr, KERN_DEBUG, skge->netdev, + "phy interrupt status 0x%x\n", isrc); + + if (isrc & PHY_B_IS_PSE) + pr_err("%s: uncorrectable pair swap error\n", + hw->dev[port]->name); + + /* Workaround BCom Errata: + * enable and disable loopback mode if "NO HCD" occurs. + */ + if (isrc & PHY_B_IS_NO_HDCL) { + u16 ctrl = xm_phy_read(hw, port, PHY_BCOM_CTRL); + xm_phy_write(hw, port, PHY_BCOM_CTRL, + ctrl | PHY_CT_LOOP); + xm_phy_write(hw, port, PHY_BCOM_CTRL, + ctrl & ~PHY_CT_LOOP); + } + + if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) + bcom_check_link(hw, port); + +} + +static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) +{ + int i; + + gma_write16(hw, port, GM_SMI_DATA, val); + gma_write16(hw, port, GM_SMI_CTRL, + GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg)); + for (i = 0; i < PHY_RETRIES; i++) { + udelay(1); + + if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY)) + return 0; + } + + pr_warn("%s: phy write timeout\n", hw->dev[port]->name); + return -EIO; +} + +static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) +{ + int i; + + gma_write16(hw, port, GM_SMI_CTRL, + GM_SMI_CT_PHY_AD(hw->phy_addr) + | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); + + for (i = 0; i < PHY_RETRIES; i++) { + udelay(1); + if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL) + goto ready; + } + + return -ETIMEDOUT; + ready: + *val = gma_read16(hw, port, GM_SMI_DATA); + return 0; +} + +static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg) +{ + u16 v = 0; + if (__gm_phy_read(hw, port, reg, &v)) + pr_warn("%s: phy read timeout\n", hw->dev[port]->name); + return v; +} + +/* Marvell Phy Initialization */ +static void yukon_init(struct skge_hw *hw, int port) +{ + struct skge_port *skge = netdev_priv(hw->dev[port]); + u16 ctrl, ct1000, adv; + + if (skge->autoneg == AUTONEG_ENABLE) { + u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); + + ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | + PHY_M_EC_MAC_S_MSK); + ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); + + ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1); + + gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); + } + + ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); + if (skge->autoneg == AUTONEG_DISABLE) + ctrl &= ~PHY_CT_ANE; + + ctrl |= PHY_CT_RESET; + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); + + ctrl = 0; + ct1000 = 0; + adv = PHY_AN_CSMA; + + if (skge->autoneg == AUTONEG_ENABLE) { + if (hw->copper) { + if (skge->advertising & ADVERTISED_1000baseT_Full) + ct1000 |= PHY_M_1000C_AFD; + if (skge->advertising & ADVERTISED_1000baseT_Half) + ct1000 |= PHY_M_1000C_AHD; + if (skge->advertising & ADVERTISED_100baseT_Full) + adv |= PHY_M_AN_100_FD; + if (skge->advertising & ADVERTISED_100baseT_Half) + adv |= PHY_M_AN_100_HD; + if (skge->advertising & ADVERTISED_10baseT_Full) + adv |= PHY_M_AN_10_FD; + if (skge->advertising & ADVERTISED_10baseT_Half) + adv |= PHY_M_AN_10_HD; + + /* Set Flow-control capabilities */ + adv |= phy_pause_map[skge->flow_control]; + } else { + if (skge->advertising & ADVERTISED_1000baseT_Full) + adv |= PHY_M_AN_1000X_AFD; + if (skge->advertising & ADVERTISED_1000baseT_Half) + adv |= PHY_M_AN_1000X_AHD; + + adv |= fiber_pause_map[skge->flow_control]; + } + + /* Restart Auto-negotiation */ + ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG; + } else { + /* forced speed/duplex settings */ + ct1000 = PHY_M_1000C_MSE; + + if (skge->duplex == DUPLEX_FULL) + ctrl |= PHY_CT_DUP_MD; + + switch (skge->speed) { + case SPEED_1000: + ctrl |= PHY_CT_SP1000; + break; + case SPEED_100: + ctrl |= PHY_CT_SP100; + break; + } + + ctrl |= PHY_CT_RESET; + } + + gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000); + + gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); + + /* Enable phy interrupt on autonegotiation complete (or link up) */ + if (skge->autoneg == AUTONEG_ENABLE) + gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_MSK); + else + gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK); +} + +static void yukon_reset(struct skge_hw *hw, int port) +{ + gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */ + gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */ + gma_write16(hw, port, GM_MC_ADDR_H2, 0); + gma_write16(hw, port, GM_MC_ADDR_H3, 0); + gma_write16(hw, port, GM_MC_ADDR_H4, 0); + + gma_write16(hw, port, GM_RX_CTRL, + gma_read16(hw, port, GM_RX_CTRL) + | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); +} + +/* Apparently, early versions of Yukon-Lite had wrong chip_id? */ +static int is_yukon_lite_a0(struct skge_hw *hw) +{ + u32 reg; + int ret; + + if (hw->chip_id != CHIP_ID_YUKON) + return 0; + + reg = skge_read32(hw, B2_FAR); + skge_write8(hw, B2_FAR + 3, 0xff); + ret = (skge_read8(hw, B2_FAR + 3) != 0); + skge_write32(hw, B2_FAR, reg); + return ret; +} + +static void yukon_mac_init(struct skge_hw *hw, int port) +{ + struct skge_port *skge = netdev_priv(hw->dev[port]); + int i; + u32 reg; + const u8 *addr = hw->dev[port]->dev_addr; + + /* WA code for COMA mode -- set PHY reset */ + if (hw->chip_id == CHIP_ID_YUKON_LITE && + hw->chip_rev >= CHIP_REV_YU_LITE_A3) { + reg = skge_read32(hw, B2_GP_IO); + reg |= GP_DIR_9 | GP_IO_9; + skge_write32(hw, B2_GP_IO, reg); + } + + /* hard reset */ + skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); + skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); + + /* WA code for COMA mode -- clear PHY reset */ + if (hw->chip_id == CHIP_ID_YUKON_LITE && + hw->chip_rev >= CHIP_REV_YU_LITE_A3) { + reg = skge_read32(hw, B2_GP_IO); + reg |= GP_DIR_9; + reg &= ~GP_IO_9; + skge_write32(hw, B2_GP_IO, reg); + } + + /* Set hardware config mode */ + reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP | + GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE; + reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB; + + /* Clear GMC reset */ + skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET); + skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR); + skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR); + + if (skge->autoneg == AUTONEG_DISABLE) { + reg = GM_GPCR_AU_ALL_DIS; + gma_write16(hw, port, GM_GP_CTRL, + gma_read16(hw, port, GM_GP_CTRL) | reg); + + switch (skge->speed) { + case SPEED_1000: + reg &= ~GM_GPCR_SPEED_100; + reg |= GM_GPCR_SPEED_1000; + break; + case SPEED_100: + reg &= ~GM_GPCR_SPEED_1000; + reg |= GM_GPCR_SPEED_100; + break; + case SPEED_10: + reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100); + break; + } + + if (skge->duplex == DUPLEX_FULL) + reg |= GM_GPCR_DUP_FULL; + } else + reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; + + switch (skge->flow_control) { + case FLOW_MODE_NONE: + skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); + reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; + break; + case FLOW_MODE_LOC_SEND: + /* disable Rx flow-control */ + reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; + break; + case FLOW_MODE_SYMMETRIC: + case FLOW_MODE_SYM_OR_REM: + /* enable Tx & Rx flow-control */ + break; + } + + gma_write16(hw, port, GM_GP_CTRL, reg); + skge_read16(hw, SK_REG(port, GMAC_IRQ_SRC)); + + yukon_init(hw, port); + + /* MIB clear */ + reg = gma_read16(hw, port, GM_PHY_ADDR); + gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR); + + for (i = 0; i < GM_MIB_CNT_SIZE; i++) + gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i); + gma_write16(hw, port, GM_PHY_ADDR, reg); + + /* transmit control */ + gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); + + /* receive control reg: unicast + multicast + no FCS */ + gma_write16(hw, port, GM_RX_CTRL, + GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA); + + /* transmit flow control */ + gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff); + + /* transmit parameter */ + gma_write16(hw, port, GM_TX_PARAM, + TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | + TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | + TX_IPG_JAM_DATA(TX_IPG_JAM_DEF)); + + /* configure the Serial Mode Register */ + reg = DATA_BLIND_VAL(DATA_BLIND_DEF) + | GM_SMOD_VLAN_ENA + | IPG_DATA_VAL(IPG_DATA_DEF); + + if (hw->dev[port]->mtu > ETH_DATA_LEN) + reg |= GM_SMOD_JUMBO_ENA; + + gma_write16(hw, port, GM_SERIAL_MODE, reg); + + /* physical address: used for pause frames */ + gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr); + /* virtual address for data */ + gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr); + + /* enable interrupt mask for counter overflows */ + gma_write16(hw, port, GM_TX_IRQ_MSK, 0); + gma_write16(hw, port, GM_RX_IRQ_MSK, 0); + gma_write16(hw, port, GM_TR_IRQ_MSK, 0); + + /* Initialize Mac Fifo */ + + /* Configure Rx MAC FIFO */ + skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK); + reg = GMF_OPER_ON | GMF_RX_F_FL_ON; + + /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */ + if (is_yukon_lite_a0(hw)) + reg &= ~GMF_RX_F_FL_ON; + + skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); + skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg); + /* + * because Pause Packet Truncation in GMAC is not working + * we have to increase the Flush Threshold to 64 bytes + * in order to flush pause packets in Rx FIFO on Yukon-1 + */ + skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1); + + /* Configure Tx MAC FIFO */ + skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); + skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); +} + +/* Go into power down mode */ +static void yukon_suspend(struct skge_hw *hw, int port) +{ + u16 ctrl; + + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); + ctrl |= PHY_M_PC_POL_R_DIS; + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); + + ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); + ctrl |= PHY_CT_RESET; + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); + + /* switch IEEE compatible power down mode on */ + ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); + ctrl |= PHY_CT_PDOWN; + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); +} + +static void yukon_stop(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + + skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); + yukon_reset(hw, port); + + gma_write16(hw, port, GM_GP_CTRL, + gma_read16(hw, port, GM_GP_CTRL) + & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA)); + gma_read16(hw, port, GM_GP_CTRL); + + yukon_suspend(hw, port); + + /* set GPHY Control reset */ + skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); + skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); +} + +static void yukon_get_stats(struct skge_port *skge, u64 *data) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + int i; + + data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32 + | gma_read32(hw, port, GM_TXO_OK_LO); + data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32 + | gma_read32(hw, port, GM_RXO_OK_LO); + + for (i = 2; i < ARRAY_SIZE(skge_stats); i++) + data[i] = gma_read32(hw, port, + skge_stats[i].gma_offset); +} + +static void yukon_mac_intr(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + struct skge_port *skge = netdev_priv(dev); + u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC)); + + netif_printk(skge, intr, KERN_DEBUG, skge->netdev, + "mac interrupt status 0x%x\n", status); + + if (status & GM_IS_RX_FF_OR) { + ++dev->stats.rx_fifo_errors; + skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); + } + + if (status & GM_IS_TX_FF_UR) { + ++dev->stats.tx_fifo_errors; + skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); + } + +} + +static u16 yukon_speed(const struct skge_hw *hw, u16 aux) +{ + switch (aux & PHY_M_PS_SPEED_MSK) { + case PHY_M_PS_SPEED_1000: + return SPEED_1000; + case PHY_M_PS_SPEED_100: + return SPEED_100; + default: + return SPEED_10; + } +} + +static void yukon_link_up(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 reg; + + /* Enable Transmit FIFO Underrun */ + skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK); + + reg = gma_read16(hw, port, GM_GP_CTRL); + if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE) + reg |= GM_GPCR_DUP_FULL; + + /* enable Rx/Tx */ + reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; + gma_write16(hw, port, GM_GP_CTRL, reg); + + gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK); + skge_link_up(skge); +} + +static void yukon_link_down(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 ctrl; + + ctrl = gma_read16(hw, port, GM_GP_CTRL); + ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); + gma_write16(hw, port, GM_GP_CTRL, ctrl); + + if (skge->flow_status == FLOW_STAT_REM_SEND) { + ctrl = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV); + ctrl |= PHY_M_AN_ASP; + /* restore Asymmetric Pause bit */ + gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl); + } + + skge_link_down(skge); + + yukon_init(hw, port); +} + +static void yukon_phy_intr(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + const char *reason = NULL; + u16 istatus, phystat; + + istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT); + phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT); + + netif_printk(skge, intr, KERN_DEBUG, skge->netdev, + "phy interrupt status 0x%x 0x%x\n", istatus, phystat); + + if (istatus & PHY_M_IS_AN_COMPL) { + if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP) + & PHY_M_AN_RF) { + reason = "remote fault"; + goto failed; + } + + if (gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) { + reason = "master/slave fault"; + goto failed; + } + + if (!(phystat & PHY_M_PS_SPDUP_RES)) { + reason = "speed/duplex"; + goto failed; + } + + skge->duplex = (phystat & PHY_M_PS_FULL_DUP) + ? DUPLEX_FULL : DUPLEX_HALF; + skge->speed = yukon_speed(hw, phystat); + + /* We are using IEEE 802.3z/D5.0 Table 37-4 */ + switch (phystat & PHY_M_PS_PAUSE_MSK) { + case PHY_M_PS_PAUSE_MSK: + skge->flow_status = FLOW_STAT_SYMMETRIC; + break; + case PHY_M_PS_RX_P_EN: + skge->flow_status = FLOW_STAT_REM_SEND; + break; + case PHY_M_PS_TX_P_EN: + skge->flow_status = FLOW_STAT_LOC_SEND; + break; + default: + skge->flow_status = FLOW_STAT_NONE; + } + + if (skge->flow_status == FLOW_STAT_NONE || + (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF)) + skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); + else + skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); + yukon_link_up(skge); + return; + } + + if (istatus & PHY_M_IS_LSP_CHANGE) + skge->speed = yukon_speed(hw, phystat); + + if (istatus & PHY_M_IS_DUP_CHANGE) + skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; + if (istatus & PHY_M_IS_LST_CHANGE) { + if (phystat & PHY_M_PS_LINK_UP) + yukon_link_up(skge); + else + yukon_link_down(skge); + } + return; + failed: + pr_err("%s: autonegotiation failed (%s)\n", skge->netdev->name, reason); + + /* XXX restart autonegotiation? */ +} + +static void skge_phy_reset(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + struct net_device *dev = hw->dev[port]; + + netif_stop_queue(skge->netdev); + netif_carrier_off(skge->netdev); + + spin_lock_bh(&hw->phy_lock); + if (is_genesis(hw)) { + genesis_reset(hw, port); + genesis_mac_init(hw, port); + } else { + yukon_reset(hw, port); + yukon_init(hw, port); + } + spin_unlock_bh(&hw->phy_lock); + + skge_set_multicast(dev); +} + +/* Basic MII support */ +static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *data = if_mii(ifr); + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int err = -EOPNOTSUPP; + + if (!netif_running(dev)) + return -ENODEV; /* Phy still in reset */ + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = hw->phy_addr; + + fallthrough; + case SIOCGMIIREG: { + u16 val = 0; + spin_lock_bh(&hw->phy_lock); + + if (is_genesis(hw)) + err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); + else + err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); + spin_unlock_bh(&hw->phy_lock); + data->val_out = val; + break; + } + + case SIOCSMIIREG: + spin_lock_bh(&hw->phy_lock); + if (is_genesis(hw)) + err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f, + data->val_in); + else + err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f, + data->val_in); + spin_unlock_bh(&hw->phy_lock); + break; + } + return err; +} + +static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len) +{ + u32 end; + + start /= 8; + len /= 8; + end = start + len - 1; + + skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); + skge_write32(hw, RB_ADDR(q, RB_START), start); + skge_write32(hw, RB_ADDR(q, RB_WP), start); + skge_write32(hw, RB_ADDR(q, RB_RP), start); + skge_write32(hw, RB_ADDR(q, RB_END), end); + + if (q == Q_R1 || q == Q_R2) { + /* Set thresholds on receive queue's */ + skge_write32(hw, RB_ADDR(q, RB_RX_UTPP), + start + (2*len)/3); + skge_write32(hw, RB_ADDR(q, RB_RX_LTPP), + start + (len/3)); + } else { + /* Enable store & forward on Tx queue's because + * Tx FIFO is only 4K on Genesis and 1K on Yukon + */ + skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD); + } + + skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD); +} + +/* Setup Bus Memory Interface */ +static void skge_qset(struct skge_port *skge, u16 q, + const struct skge_element *e) +{ + struct skge_hw *hw = skge->hw; + u32 watermark = 0x600; + u64 base = skge->dma + (e->desc - skge->mem); + + /* optimization to reduce window on 32bit/33mhz */ + if ((skge_read16(hw, B0_CTST) & (CS_BUS_CLOCK | CS_BUS_SLOT_SZ)) == 0) + watermark /= 2; + + skge_write32(hw, Q_ADDR(q, Q_CSR), CSR_CLR_RESET); + skge_write32(hw, Q_ADDR(q, Q_F), watermark); + skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32)); + skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base); +} + +static int skge_up(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + u32 chunk, ram_addr; + size_t rx_size, tx_size; + int err; + + if (!is_valid_ether_addr(dev->dev_addr)) + return -EINVAL; + + netif_info(skge, ifup, skge->netdev, "enabling interface\n"); + + if (dev->mtu > RX_BUF_SIZE) + skge->rx_buf_size = dev->mtu + ETH_HLEN; + else + skge->rx_buf_size = RX_BUF_SIZE; + + + rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc); + tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc); + skge->mem_size = tx_size + rx_size; + skge->mem = dma_alloc_coherent(&hw->pdev->dev, skge->mem_size, + &skge->dma, GFP_KERNEL); + if (!skge->mem) + return -ENOMEM; + + BUG_ON(skge->dma & 7); + + if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) { + dev_err(&hw->pdev->dev, "dma_alloc_coherent region crosses 4G boundary\n"); + err = -EINVAL; + goto free_pci_mem; + } + + err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma); + if (err) + goto free_pci_mem; + + err = skge_rx_fill(dev); + if (err) + goto free_rx_ring; + + err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size, + skge->dma + rx_size); + if (err) + goto free_rx_ring; + + if (hw->ports == 1) { + err = request_irq(hw->pdev->irq, skge_intr, IRQF_SHARED, + dev->name, hw); + if (err) { + netdev_err(dev, "Unable to allocate interrupt %d error: %d\n", + hw->pdev->irq, err); + goto free_tx_ring; + } + } + + /* Initialize MAC */ + netif_carrier_off(dev); + spin_lock_bh(&hw->phy_lock); + if (is_genesis(hw)) + genesis_mac_init(hw, port); + else + yukon_mac_init(hw, port); + spin_unlock_bh(&hw->phy_lock); + + /* Configure RAMbuffers - equally between ports and tx/rx */ + chunk = (hw->ram_size - hw->ram_offset) / (hw->ports * 2); + ram_addr = hw->ram_offset + 2 * chunk * port; + + skge_ramset(hw, rxqaddr[port], ram_addr, chunk); + skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean); + + BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean); + skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk); + skge_qset(skge, txqaddr[port], skge->tx_ring.to_use); + + /* Start receiver BMU */ + wmb(); + skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); + skge_led(skge, LED_MODE_ON); + + spin_lock_irq(&hw->hw_lock); + hw->intr_mask |= portmask[port]; + skge_write32(hw, B0_IMSK, hw->intr_mask); + skge_read32(hw, B0_IMSK); + spin_unlock_irq(&hw->hw_lock); + + napi_enable(&skge->napi); + + skge_set_multicast(dev); + + return 0; + + free_tx_ring: + kfree(skge->tx_ring.start); + free_rx_ring: + skge_rx_clean(skge); + kfree(skge->rx_ring.start); + free_pci_mem: + dma_free_coherent(&hw->pdev->dev, skge->mem_size, skge->mem, + skge->dma); + skge->mem = NULL; + + return err; +} + +/* stop receiver */ +static void skge_rx_stop(struct skge_hw *hw, int port) +{ + skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP); + skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL), + RB_RST_SET|RB_DIS_OP_MD); + skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET); +} + +static int skge_down(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + + if (!skge->mem) + return 0; + + netif_info(skge, ifdown, skge->netdev, "disabling interface\n"); + + netif_tx_disable(dev); + + if (is_genesis(hw) && hw->phy_type == SK_PHY_XMAC) + del_timer_sync(&skge->link_timer); + + napi_disable(&skge->napi); + netif_carrier_off(dev); + + spin_lock_irq(&hw->hw_lock); + hw->intr_mask &= ~portmask[port]; + skge_write32(hw, B0_IMSK, (hw->ports == 1) ? 0 : hw->intr_mask); + skge_read32(hw, B0_IMSK); + spin_unlock_irq(&hw->hw_lock); + + if (hw->ports == 1) + free_irq(hw->pdev->irq, hw); + + skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_REG_OFF); + if (is_genesis(hw)) + genesis_stop(skge); + else + yukon_stop(skge); + + /* Stop transmitter */ + skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); + skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), + RB_RST_SET|RB_DIS_OP_MD); + + + /* Disable Force Sync bit and Enable Alloc bit */ + skge_write8(hw, SK_REG(port, TXA_CTRL), + TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); + + /* Stop Interval Timer and Limit Counter of Tx Arbiter */ + skge_write32(hw, SK_REG(port, TXA_ITI_INI), 0L); + skge_write32(hw, SK_REG(port, TXA_LIM_INI), 0L); + + /* Reset PCI FIFO */ + skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET); + skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET); + + /* Reset the RAM Buffer async Tx queue */ + skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET); + + skge_rx_stop(hw, port); + + if (is_genesis(hw)) { + skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET); + skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET); + } else { + skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); + skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); + } + + skge_led(skge, LED_MODE_OFF); + + netif_tx_lock_bh(dev); + skge_tx_clean(dev); + netif_tx_unlock_bh(dev); + + skge_rx_clean(skge); + + kfree(skge->rx_ring.start); + kfree(skge->tx_ring.start); + dma_free_coherent(&hw->pdev->dev, skge->mem_size, skge->mem, + skge->dma); + skge->mem = NULL; + return 0; +} + +static inline int skge_avail(const struct skge_ring *ring) +{ + smp_mb(); + return ((ring->to_clean > ring->to_use) ? 0 : ring->count) + + (ring->to_clean - ring->to_use) - 1; +} + +static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, + struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + struct skge_element *e; + struct skge_tx_desc *td; + int i; + u32 control, len; + dma_addr_t map; + + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + + if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) + return NETDEV_TX_BUSY; + + e = skge->tx_ring.to_use; + td = e->desc; + BUG_ON(td->control & BMU_OWN); + e->skb = skb; + len = skb_headlen(skb); + map = dma_map_single(&hw->pdev->dev, skb->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(&hw->pdev->dev, map)) + goto mapping_error; + + dma_unmap_addr_set(e, mapaddr, map); + dma_unmap_len_set(e, maplen, len); + + td->dma_lo = lower_32_bits(map); + td->dma_hi = upper_32_bits(map); + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + const int offset = skb_checksum_start_offset(skb); + + /* This seems backwards, but it is what the sk98lin + * does. Looks like hardware is wrong? + */ + if (ipip_hdr(skb)->protocol == IPPROTO_UDP && + hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON) + control = BMU_TCP_CHECK; + else + control = BMU_UDP_CHECK; + + td->csum_offs = 0; + td->csum_start = offset; + td->csum_write = offset + skb->csum_offset; + } else + control = BMU_CHECK; + + if (!skb_shinfo(skb)->nr_frags) /* single buffer i.e. no fragments */ + control |= BMU_EOF | BMU_IRQ_EOF; + else { + struct skge_tx_desc *tf = td; + + control |= BMU_STFWD; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + if (dma_mapping_error(&hw->pdev->dev, map)) + goto mapping_unwind; + + e = e->next; + e->skb = skb; + tf = e->desc; + BUG_ON(tf->control & BMU_OWN); + + tf->dma_lo = lower_32_bits(map); + tf->dma_hi = upper_32_bits(map); + dma_unmap_addr_set(e, mapaddr, map); + dma_unmap_len_set(e, maplen, skb_frag_size(frag)); + + tf->control = BMU_OWN | BMU_SW | control | skb_frag_size(frag); + } + tf->control |= BMU_EOF | BMU_IRQ_EOF; + } + /* Make sure all the descriptors written */ + wmb(); + td->control = BMU_OWN | BMU_SW | BMU_STF | control | len; + wmb(); + + netdev_sent_queue(dev, skb->len); + + skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START); + + netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev, + "tx queued, slot %td, len %d\n", + e - skge->tx_ring.start, skb->len); + + skge->tx_ring.to_use = e->next; + smp_wmb(); + + if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) { + netdev_dbg(dev, "transmit queue full\n"); + netif_stop_queue(dev); + } + + return NETDEV_TX_OK; + +mapping_unwind: + e = skge->tx_ring.to_use; + dma_unmap_single(&hw->pdev->dev, dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), DMA_TO_DEVICE); + while (i-- > 0) { + e = e->next; + dma_unmap_page(&hw->pdev->dev, dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), DMA_TO_DEVICE); + } + +mapping_error: + if (net_ratelimit()) + dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + + +/* Free resources associated with this reing element */ +static inline void skge_tx_unmap(struct pci_dev *pdev, struct skge_element *e, + u32 control) +{ + /* skb header vs. fragment */ + if (control & BMU_STF) + dma_unmap_single(&pdev->dev, dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), DMA_TO_DEVICE); + else + dma_unmap_page(&pdev->dev, dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), DMA_TO_DEVICE); +} + +/* Free all buffers in transmit ring */ +static void skge_tx_clean(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_element *e; + + for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { + struct skge_tx_desc *td = e->desc; + + skge_tx_unmap(skge->hw->pdev, e, td->control); + + if (td->control & BMU_EOF) + dev_kfree_skb(e->skb); + td->control = 0; + } + + netdev_reset_queue(dev); + skge->tx_ring.to_clean = e; +} + +static void skge_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct skge_port *skge = netdev_priv(dev); + + netif_printk(skge, timer, KERN_DEBUG, skge->netdev, "tx timeout\n"); + + skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP); + skge_tx_clean(dev); + netif_wake_queue(dev); +} + +static int skge_change_mtu(struct net_device *dev, int new_mtu) +{ + int err; + + if (!netif_running(dev)) { + dev->mtu = new_mtu; + return 0; + } + + skge_down(dev); + + dev->mtu = new_mtu; + + err = skge_up(dev); + if (err) + dev_close(dev); + + return err; +} + +static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 }; + +static void genesis_add_filter(u8 filter[8], const u8 *addr) +{ + u32 crc, bit; + + crc = ether_crc_le(ETH_ALEN, addr); + bit = ~crc & 0x3f; + filter[bit/8] |= 1 << (bit%8); +} + +static void genesis_set_multicast(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + struct netdev_hw_addr *ha; + u32 mode; + u8 filter[8]; + + mode = xm_read32(hw, port, XM_MODE); + mode |= XM_MD_ENA_HASH; + if (dev->flags & IFF_PROMISC) + mode |= XM_MD_ENA_PROM; + else + mode &= ~XM_MD_ENA_PROM; + + if (dev->flags & IFF_ALLMULTI) + memset(filter, 0xff, sizeof(filter)); + else { + memset(filter, 0, sizeof(filter)); + + if (skge->flow_status == FLOW_STAT_REM_SEND || + skge->flow_status == FLOW_STAT_SYMMETRIC) + genesis_add_filter(filter, pause_mc_addr); + + netdev_for_each_mc_addr(ha, dev) + genesis_add_filter(filter, ha->addr); + } + + xm_write32(hw, port, XM_MODE, mode); + xm_outhash(hw, port, XM_HSM, filter); +} + +static void yukon_add_filter(u8 filter[8], const u8 *addr) +{ + u32 bit = ether_crc(ETH_ALEN, addr) & 0x3f; + filter[bit/8] |= 1 << (bit%8); +} + +static void yukon_set_multicast(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + struct netdev_hw_addr *ha; + int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND || + skge->flow_status == FLOW_STAT_SYMMETRIC); + u16 reg; + u8 filter[8]; + + memset(filter, 0, sizeof(filter)); + + reg = gma_read16(hw, port, GM_RX_CTRL); + reg |= GM_RXCR_UCF_ENA; + + if (dev->flags & IFF_PROMISC) /* promiscuous */ + reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); + else if (dev->flags & IFF_ALLMULTI) /* all multicast */ + memset(filter, 0xff, sizeof(filter)); + else if (netdev_mc_empty(dev) && !rx_pause)/* no multicast */ + reg &= ~GM_RXCR_MCF_ENA; + else { + reg |= GM_RXCR_MCF_ENA; + + if (rx_pause) + yukon_add_filter(filter, pause_mc_addr); + + netdev_for_each_mc_addr(ha, dev) + yukon_add_filter(filter, ha->addr); + } + + + gma_write16(hw, port, GM_MC_ADDR_H1, + (u16)filter[0] | ((u16)filter[1] << 8)); + gma_write16(hw, port, GM_MC_ADDR_H2, + (u16)filter[2] | ((u16)filter[3] << 8)); + gma_write16(hw, port, GM_MC_ADDR_H3, + (u16)filter[4] | ((u16)filter[5] << 8)); + gma_write16(hw, port, GM_MC_ADDR_H4, + (u16)filter[6] | ((u16)filter[7] << 8)); + + gma_write16(hw, port, GM_RX_CTRL, reg); +} + +static inline u16 phy_length(const struct skge_hw *hw, u32 status) +{ + if (is_genesis(hw)) + return status >> XMR_FS_LEN_SHIFT; + else + return status >> GMR_FS_LEN_SHIFT; +} + +static inline int bad_phy_status(const struct skge_hw *hw, u32 status) +{ + if (is_genesis(hw)) + return (status & (XMR_FS_ERR | XMR_FS_2L_VLAN)) != 0; + else + return (status & GMR_FS_ANY_ERR) || + (status & GMR_FS_RX_OK) == 0; +} + +static void skge_set_multicast(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + + if (is_genesis(skge->hw)) + genesis_set_multicast(dev); + else + yukon_set_multicast(dev); + +} + + +/* Get receive buffer from descriptor. + * Handles copy of small buffers and reallocation failures + */ +static struct sk_buff *skge_rx_get(struct net_device *dev, + struct skge_element *e, + u32 control, u32 status, u16 csum) +{ + struct skge_port *skge = netdev_priv(dev); + struct sk_buff *skb; + u16 len = control & BMU_BBC; + + netif_printk(skge, rx_status, KERN_DEBUG, skge->netdev, + "rx slot %td status 0x%x len %d\n", + e - skge->rx_ring.start, status, len); + + if (len > skge->rx_buf_size) + goto error; + + if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)) + goto error; + + if (bad_phy_status(skge->hw, status)) + goto error; + + if (phy_length(skge->hw, status) != len) + goto error; + + if (len < RX_COPY_THRESHOLD) { + skb = netdev_alloc_skb_ip_align(dev, len); + if (!skb) + goto resubmit; + + dma_sync_single_for_cpu(&skge->hw->pdev->dev, + dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + DMA_FROM_DEVICE); + skb_copy_from_linear_data(e->skb, skb->data, len); + dma_sync_single_for_device(&skge->hw->pdev->dev, + dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + DMA_FROM_DEVICE); + skge_rx_reuse(e, skge->rx_buf_size); + } else { + struct skge_element ee; + struct sk_buff *nskb; + + nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size); + if (!nskb) + goto resubmit; + + ee = *e; + + skb = ee.skb; + prefetch(skb->data); + + if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { + dev_kfree_skb(nskb); + goto resubmit; + } + + dma_unmap_single(&skge->hw->pdev->dev, + dma_unmap_addr(&ee, mapaddr), + dma_unmap_len(&ee, maplen), DMA_FROM_DEVICE); + } + + skb_put(skb, len); + + if (dev->features & NETIF_F_RXCSUM) { + skb->csum = le16_to_cpu(csum); + skb->ip_summed = CHECKSUM_COMPLETE; + } + + skb->protocol = eth_type_trans(skb, dev); + + return skb; +error: + + netif_printk(skge, rx_err, KERN_DEBUG, skge->netdev, + "rx err, slot %td control 0x%x status 0x%x\n", + e - skge->rx_ring.start, control, status); + + if (is_genesis(skge->hw)) { + if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR)) + dev->stats.rx_length_errors++; + if (status & XMR_FS_FRA_ERR) + dev->stats.rx_frame_errors++; + if (status & XMR_FS_FCS_ERR) + dev->stats.rx_crc_errors++; + } else { + if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE)) + dev->stats.rx_length_errors++; + if (status & GMR_FS_FRAGMENT) + dev->stats.rx_frame_errors++; + if (status & GMR_FS_CRC_ERR) + dev->stats.rx_crc_errors++; + } + +resubmit: + skge_rx_reuse(e, skge->rx_buf_size); + return NULL; +} + +/* Free all buffers in Tx ring which are no longer owned by device */ +static void skge_tx_done(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_ring *ring = &skge->tx_ring; + struct skge_element *e; + unsigned int bytes_compl = 0, pkts_compl = 0; + + skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); + + for (e = ring->to_clean; e != ring->to_use; e = e->next) { + u32 control = ((const struct skge_tx_desc *) e->desc)->control; + + if (control & BMU_OWN) + break; + + skge_tx_unmap(skge->hw->pdev, e, control); + + if (control & BMU_EOF) { + netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev, + "tx done slot %td\n", + e - skge->tx_ring.start); + + pkts_compl++; + bytes_compl += e->skb->len; + + dev_consume_skb_any(e->skb); + } + } + netdev_completed_queue(dev, pkts_compl, bytes_compl); + skge->tx_ring.to_clean = e; + + /* Can run lockless until we need to synchronize to restart queue. */ + smp_mb(); + + if (unlikely(netif_queue_stopped(dev) && + skge_avail(&skge->tx_ring) > TX_LOW_WATER)) { + netif_tx_lock(dev); + if (unlikely(netif_queue_stopped(dev) && + skge_avail(&skge->tx_ring) > TX_LOW_WATER)) { + netif_wake_queue(dev); + + } + netif_tx_unlock(dev); + } +} + +static int skge_poll(struct napi_struct *napi, int budget) +{ + struct skge_port *skge = container_of(napi, struct skge_port, napi); + struct net_device *dev = skge->netdev; + struct skge_hw *hw = skge->hw; + struct skge_ring *ring = &skge->rx_ring; + struct skge_element *e; + int work_done = 0; + + skge_tx_done(dev); + + skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); + + for (e = ring->to_clean; prefetch(e->next), work_done < budget; e = e->next) { + struct skge_rx_desc *rd = e->desc; + struct sk_buff *skb; + u32 control; + + rmb(); + control = rd->control; + if (control & BMU_OWN) + break; + + skb = skge_rx_get(dev, e, control, rd->status, rd->csum2); + if (likely(skb)) { + napi_gro_receive(napi, skb); + ++work_done; + } + } + ring->to_clean = e; + + /* restart receiver */ + wmb(); + skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START); + + if (work_done < budget && napi_complete_done(napi, work_done)) { + unsigned long flags; + + spin_lock_irqsave(&hw->hw_lock, flags); + hw->intr_mask |= napimask[skge->port]; + skge_write32(hw, B0_IMSK, hw->intr_mask); + skge_read32(hw, B0_IMSK); + spin_unlock_irqrestore(&hw->hw_lock, flags); + } + + return work_done; +} + +/* Parity errors seem to happen when Genesis is connected to a switch + * with no other ports present. Heartbeat error?? + */ +static void skge_mac_parity(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + + ++dev->stats.tx_heartbeat_errors; + + if (is_genesis(hw)) + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), + MFF_CLR_PERR); + else + /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */ + skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), + (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0) + ? GMF_CLI_TX_FC : GMF_CLI_TX_PE); +} + +static void skge_mac_intr(struct skge_hw *hw, int port) +{ + if (is_genesis(hw)) + genesis_mac_intr(hw, port); + else + yukon_mac_intr(hw, port); +} + +/* Handle device specific framing and timeout interrupts */ +static void skge_error_irq(struct skge_hw *hw) +{ + struct pci_dev *pdev = hw->pdev; + u32 hwstatus = skge_read32(hw, B0_HWE_ISRC); + + if (is_genesis(hw)) { + /* clear xmac errors */ + if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1)) + skge_write16(hw, RX_MFF_CTRL1, MFF_CLR_INSTAT); + if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2)) + skge_write16(hw, RX_MFF_CTRL2, MFF_CLR_INSTAT); + } else { + /* Timestamp (unused) overflow */ + if (hwstatus & IS_IRQ_TIST_OV) + skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); + } + + if (hwstatus & IS_RAM_RD_PAR) { + dev_err(&pdev->dev, "Ram read data parity error\n"); + skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR); + } + + if (hwstatus & IS_RAM_WR_PAR) { + dev_err(&pdev->dev, "Ram write data parity error\n"); + skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR); + } + + if (hwstatus & IS_M1_PAR_ERR) + skge_mac_parity(hw, 0); + + if (hwstatus & IS_M2_PAR_ERR) + skge_mac_parity(hw, 1); + + if (hwstatus & IS_R1_PAR_ERR) { + dev_err(&pdev->dev, "%s: receive queue parity error\n", + hw->dev[0]->name); + skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P); + } + + if (hwstatus & IS_R2_PAR_ERR) { + dev_err(&pdev->dev, "%s: receive queue parity error\n", + hw->dev[1]->name); + skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P); + } + + if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) { + u16 pci_status, pci_cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + pci_read_config_word(pdev, PCI_STATUS, &pci_status); + + dev_err(&pdev->dev, "PCI error cmd=%#x status=%#x\n", + pci_cmd, pci_status); + + /* Write the error bits back to clear them. */ + pci_status &= PCI_STATUS_ERROR_BITS; + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + pci_write_config_word(pdev, PCI_COMMAND, + pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY); + pci_write_config_word(pdev, PCI_STATUS, pci_status); + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + + /* if error still set then just ignore it */ + hwstatus = skge_read32(hw, B0_HWE_ISRC); + if (hwstatus & IS_IRQ_STAT) { + dev_warn(&hw->pdev->dev, "unable to clear error (so ignoring them)\n"); + hw->intr_mask &= ~IS_HW_ERR; + } + } +} + +/* + * Interrupt from PHY are handled in tasklet (softirq) + * because accessing phy registers requires spin wait which might + * cause excess interrupt latency. + */ +static void skge_extirq(struct tasklet_struct *t) +{ + struct skge_hw *hw = from_tasklet(hw, t, phy_task); + int port; + + for (port = 0; port < hw->ports; port++) { + struct net_device *dev = hw->dev[port]; + + if (netif_running(dev)) { + struct skge_port *skge = netdev_priv(dev); + + spin_lock(&hw->phy_lock); + if (!is_genesis(hw)) + yukon_phy_intr(skge); + else if (hw->phy_type == SK_PHY_BCOM) + bcom_phy_intr(skge); + spin_unlock(&hw->phy_lock); + } + } + + spin_lock_irq(&hw->hw_lock); + hw->intr_mask |= IS_EXT_REG; + skge_write32(hw, B0_IMSK, hw->intr_mask); + skge_read32(hw, B0_IMSK); + spin_unlock_irq(&hw->hw_lock); +} + +static irqreturn_t skge_intr(int irq, void *dev_id) +{ + struct skge_hw *hw = dev_id; + u32 status; + int handled = 0; + + spin_lock(&hw->hw_lock); + /* Reading this register masks IRQ */ + status = skge_read32(hw, B0_SP_ISRC); + if (status == 0 || status == ~0) + goto out; + + handled = 1; + status &= hw->intr_mask; + if (status & IS_EXT_REG) { + hw->intr_mask &= ~IS_EXT_REG; + tasklet_schedule(&hw->phy_task); + } + + if (status & (IS_XA1_F|IS_R1_F)) { + struct skge_port *skge = netdev_priv(hw->dev[0]); + hw->intr_mask &= ~(IS_XA1_F|IS_R1_F); + napi_schedule(&skge->napi); + } + + if (status & IS_PA_TO_TX1) + skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1); + + if (status & IS_PA_TO_RX1) { + ++hw->dev[0]->stats.rx_over_errors; + skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1); + } + + + if (status & IS_MAC1) + skge_mac_intr(hw, 0); + + if (hw->dev[1]) { + struct skge_port *skge = netdev_priv(hw->dev[1]); + + if (status & (IS_XA2_F|IS_R2_F)) { + hw->intr_mask &= ~(IS_XA2_F|IS_R2_F); + napi_schedule(&skge->napi); + } + + if (status & IS_PA_TO_RX2) { + ++hw->dev[1]->stats.rx_over_errors; + skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2); + } + + if (status & IS_PA_TO_TX2) + skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2); + + if (status & IS_MAC2) + skge_mac_intr(hw, 1); + } + + if (status & IS_HW_ERR) + skge_error_irq(hw); +out: + skge_write32(hw, B0_IMSK, hw->intr_mask); + skge_read32(hw, B0_IMSK); + spin_unlock(&hw->hw_lock); + + return IRQ_RETVAL(handled); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void skge_netpoll(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + + disable_irq(dev->irq); + skge_intr(dev->irq, skge->hw); + enable_irq(dev->irq); +} +#endif + +static int skge_set_mac_address(struct net_device *dev, void *p) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + unsigned port = skge->port; + const struct sockaddr *addr = p; + u16 ctrl; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + + if (!netif_running(dev)) { + memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); + memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); + } else { + /* disable Rx */ + spin_lock_bh(&hw->phy_lock); + ctrl = gma_read16(hw, port, GM_GP_CTRL); + gma_write16(hw, port, GM_GP_CTRL, ctrl & ~GM_GPCR_RX_ENA); + + memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); + memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); + + if (is_genesis(hw)) + xm_outaddr(hw, port, XM_SA, dev->dev_addr); + else { + gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); + gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); + } + + gma_write16(hw, port, GM_GP_CTRL, ctrl); + spin_unlock_bh(&hw->phy_lock); + } + + return 0; +} + +static const struct { + u8 id; + const char *name; +} skge_chips[] = { + { CHIP_ID_GENESIS, "Genesis" }, + { CHIP_ID_YUKON, "Yukon" }, + { CHIP_ID_YUKON_LITE, "Yukon-Lite"}, + { CHIP_ID_YUKON_LP, "Yukon-LP"}, +}; + +static const char *skge_board_name(const struct skge_hw *hw) +{ + int i; + static char buf[16]; + + for (i = 0; i < ARRAY_SIZE(skge_chips); i++) + if (skge_chips[i].id == hw->chip_id) + return skge_chips[i].name; + + snprintf(buf, sizeof(buf), "chipid 0x%x", hw->chip_id); + return buf; +} + + +/* + * Setup the board data structure, but don't bring up + * the port(s) + */ +static int skge_reset(struct skge_hw *hw) +{ + u32 reg; + u16 ctst, pci_status; + u8 t8, mac_cfg, pmd_type; + int i; + + ctst = skge_read16(hw, B0_CTST); + + /* do a SW reset */ + skge_write8(hw, B0_CTST, CS_RST_SET); + skge_write8(hw, B0_CTST, CS_RST_CLR); + + /* clear PCI errors, if any */ + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + skge_write8(hw, B2_TST_CTRL2, 0); + + pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status); + pci_write_config_word(hw->pdev, PCI_STATUS, + pci_status | PCI_STATUS_ERROR_BITS); + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + skge_write8(hw, B0_CTST, CS_MRST_CLR); + + /* restore CLK_RUN bits (for Yukon-Lite) */ + skge_write16(hw, B0_CTST, + ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA)); + + hw->chip_id = skge_read8(hw, B2_CHIP_ID); + hw->phy_type = skge_read8(hw, B2_E_1) & 0xf; + pmd_type = skge_read8(hw, B2_PMD_TYP); + hw->copper = (pmd_type == 'T' || pmd_type == '1'); + + switch (hw->chip_id) { + case CHIP_ID_GENESIS: +#ifdef CONFIG_SKGE_GENESIS + switch (hw->phy_type) { + case SK_PHY_XMAC: + hw->phy_addr = PHY_ADDR_XMAC; + break; + case SK_PHY_BCOM: + hw->phy_addr = PHY_ADDR_BCOM; + break; + default: + dev_err(&hw->pdev->dev, "unsupported phy type 0x%x\n", + hw->phy_type); + return -EOPNOTSUPP; + } + break; +#else + dev_err(&hw->pdev->dev, "Genesis chip detected but not configured\n"); + return -EOPNOTSUPP; +#endif + + case CHIP_ID_YUKON: + case CHIP_ID_YUKON_LITE: + case CHIP_ID_YUKON_LP: + if (hw->phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S') + hw->copper = 1; + + hw->phy_addr = PHY_ADDR_MARV; + break; + + default: + dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n", + hw->chip_id); + return -EOPNOTSUPP; + } + + mac_cfg = skge_read8(hw, B2_MAC_CFG); + hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2; + hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4; + + /* read the adapters RAM size */ + t8 = skge_read8(hw, B2_E_0); + if (is_genesis(hw)) { + if (t8 == 3) { + /* special case: 4 x 64k x 36, offset = 0x80000 */ + hw->ram_size = 0x100000; + hw->ram_offset = 0x80000; + } else + hw->ram_size = t8 * 512; + } else if (t8 == 0) + hw->ram_size = 0x20000; + else + hw->ram_size = t8 * 4096; + + hw->intr_mask = IS_HW_ERR; + + /* Use PHY IRQ for all but fiber based Genesis board */ + if (!(is_genesis(hw) && hw->phy_type == SK_PHY_XMAC)) + hw->intr_mask |= IS_EXT_REG; + + if (is_genesis(hw)) + genesis_init(hw); + else { + /* switch power to VCC (WA for VAUX problem) */ + skge_write8(hw, B0_POWER_CTRL, + PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); + + /* avoid boards with stuck Hardware error bits */ + if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) && + (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) { + dev_warn(&hw->pdev->dev, "stuck hardware sensor bit\n"); + hw->intr_mask &= ~IS_HW_ERR; + } + + /* Clear PHY COMA */ + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + pci_read_config_dword(hw->pdev, PCI_DEV_REG1, ®); + reg &= ~PCI_PHY_COMA; + pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg); + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + + + for (i = 0; i < hw->ports; i++) { + skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); + skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); + } + } + + /* turn off hardware timer (unused) */ + skge_write8(hw, B2_TI_CTRL, TIM_STOP); + skge_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ); + skge_write8(hw, B0_LED, LED_STAT_ON); + + /* enable the Tx Arbiters */ + for (i = 0; i < hw->ports; i++) + skge_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB); + + /* Initialize ram interface */ + skge_write16(hw, B3_RI_CTRL, RI_RST_CLR); + + skge_write8(hw, B3_RI_WTO_R1, SK_RI_TO_53); + skge_write8(hw, B3_RI_WTO_XA1, SK_RI_TO_53); + skge_write8(hw, B3_RI_WTO_XS1, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_R1, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_XA1, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_XS1, SK_RI_TO_53); + skge_write8(hw, B3_RI_WTO_R2, SK_RI_TO_53); + skge_write8(hw, B3_RI_WTO_XA2, SK_RI_TO_53); + skge_write8(hw, B3_RI_WTO_XS2, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_R2, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_XA2, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_XS2, SK_RI_TO_53); + + skge_write32(hw, B0_HWE_IMSK, IS_ERR_MSK); + + /* Set interrupt moderation for Transmit only + * Receive interrupts avoided by NAPI + */ + skge_write32(hw, B2_IRQM_MSK, IS_XA1_F|IS_XA2_F); + skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100)); + skge_write32(hw, B2_IRQM_CTRL, TIM_START); + + /* Leave irq disabled until first port is brought up. */ + skge_write32(hw, B0_IMSK, 0); + + for (i = 0; i < hw->ports; i++) { + if (is_genesis(hw)) + genesis_reset(hw, i); + else + yukon_reset(hw, i); + } + + return 0; +} + + +#ifdef CONFIG_SKGE_DEBUG + +static struct dentry *skge_debug; + +static int skge_debug_show(struct seq_file *seq, void *v) +{ + struct net_device *dev = seq->private; + const struct skge_port *skge = netdev_priv(dev); + const struct skge_hw *hw = skge->hw; + const struct skge_element *e; + + if (!netif_running(dev)) + return -ENETDOWN; + + seq_printf(seq, "IRQ src=%x mask=%x\n", skge_read32(hw, B0_ISRC), + skge_read32(hw, B0_IMSK)); + + seq_printf(seq, "Tx Ring: (%d)\n", skge_avail(&skge->tx_ring)); + for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { + const struct skge_tx_desc *t = e->desc; + seq_printf(seq, "%#x dma=%#x%08x %#x csum=%#x/%x/%x\n", + t->control, t->dma_hi, t->dma_lo, t->status, + t->csum_offs, t->csum_write, t->csum_start); + } + + seq_puts(seq, "\nRx Ring:\n"); + for (e = skge->rx_ring.to_clean; ; e = e->next) { + const struct skge_rx_desc *r = e->desc; + + if (r->control & BMU_OWN) + break; + + seq_printf(seq, "%#x dma=%#x%08x %#x %#x csum=%#x/%x\n", + r->control, r->dma_hi, r->dma_lo, r->status, + r->timestamp, r->csum1, r->csum1_start); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(skge_debug); + +/* + * Use network device events to create/remove/rename + * debugfs file entries + */ +static int skge_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct skge_port *skge; + + if (dev->netdev_ops->ndo_open != &skge_up || !skge_debug) + goto done; + + skge = netdev_priv(dev); + switch (event) { + case NETDEV_CHANGENAME: + if (skge->debugfs) + skge->debugfs = debugfs_rename(skge_debug, + skge->debugfs, + skge_debug, dev->name); + break; + + case NETDEV_GOING_DOWN: + debugfs_remove(skge->debugfs); + skge->debugfs = NULL; + break; + + case NETDEV_UP: + skge->debugfs = debugfs_create_file(dev->name, 0444, skge_debug, + dev, &skge_debug_fops); + break; + } + +done: + return NOTIFY_DONE; +} + +static struct notifier_block skge_notifier = { + .notifier_call = skge_device_event, +}; + + +static __init void skge_debug_init(void) +{ + skge_debug = debugfs_create_dir("skge", NULL); + + register_netdevice_notifier(&skge_notifier); +} + +static __exit void skge_debug_cleanup(void) +{ + if (skge_debug) { + unregister_netdevice_notifier(&skge_notifier); + debugfs_remove(skge_debug); + skge_debug = NULL; + } +} + +#else +#define skge_debug_init() +#define skge_debug_cleanup() +#endif + +static const struct net_device_ops skge_netdev_ops = { + .ndo_open = skge_up, + .ndo_stop = skge_down, + .ndo_start_xmit = skge_xmit_frame, + .ndo_do_ioctl = skge_ioctl, + .ndo_get_stats = skge_get_stats, + .ndo_tx_timeout = skge_tx_timeout, + .ndo_change_mtu = skge_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = skge_set_multicast, + .ndo_set_mac_address = skge_set_mac_address, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = skge_netpoll, +#endif +}; + + +/* Initialize network device */ +static struct net_device *skge_devinit(struct skge_hw *hw, int port, + int highmem) +{ + struct skge_port *skge; + struct net_device *dev = alloc_etherdev(sizeof(*skge)); + + if (!dev) + return NULL; + + SET_NETDEV_DEV(dev, &hw->pdev->dev); + dev->netdev_ops = &skge_netdev_ops; + dev->ethtool_ops = &skge_ethtool_ops; + dev->watchdog_timeo = TX_WATCHDOG; + dev->irq = hw->pdev->irq; + + /* MTU range: 60 - 9000 */ + dev->min_mtu = ETH_ZLEN; + dev->max_mtu = ETH_JUMBO_MTU; + + if (highmem) + dev->features |= NETIF_F_HIGHDMA; + + skge = netdev_priv(dev); + netif_napi_add(dev, &skge->napi, skge_poll, NAPI_WEIGHT); + skge->netdev = dev; + skge->hw = hw; + skge->msg_enable = netif_msg_init(debug, default_msg); + + skge->tx_ring.count = DEFAULT_TX_RING_SIZE; + skge->rx_ring.count = DEFAULT_RX_RING_SIZE; + + /* Auto speed and flow control */ + skge->autoneg = AUTONEG_ENABLE; + skge->flow_control = FLOW_MODE_SYM_OR_REM; + skge->duplex = -1; + skge->speed = -1; + skge->advertising = skge_supported_modes(hw); + + if (device_can_wakeup(&hw->pdev->dev)) { + skge->wol = wol_supported(hw) & WAKE_MAGIC; + device_set_wakeup_enable(&hw->pdev->dev, skge->wol); + } + + hw->dev[port] = dev; + + skge->port = port; + + /* Only used for Genesis XMAC */ + if (is_genesis(hw)) + timer_setup(&skge->link_timer, xm_link_timer, 0); + else { + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_RXCSUM; + dev->features |= dev->hw_features; + } + + /* read the mac address */ + memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); + + return dev; +} + +static void skge_show_addr(struct net_device *dev) +{ + const struct skge_port *skge = netdev_priv(dev); + + netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr); +} + +static int only_32bit_dma; + +static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *dev, *dev1; + struct skge_hw *hw; + int err, using_dac = 0; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "cannot enable PCI device\n"); + goto err_out; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(&pdev->dev, "cannot obtain PCI resources\n"); + goto err_out_disable_pdev; + } + + pci_set_master(pdev); + + if (!only_32bit_dma && !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { + using_dac = 1; + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + } else if (!(err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) { + using_dac = 0; + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + } + + if (err) { + dev_err(&pdev->dev, "no usable DMA configuration\n"); + goto err_out_free_regions; + } + +#ifdef __BIG_ENDIAN + /* byte swap descriptors in hardware */ + { + u32 reg; + + pci_read_config_dword(pdev, PCI_DEV_REG2, ®); + reg |= PCI_REV_DESC; + pci_write_config_dword(pdev, PCI_DEV_REG2, reg); + } +#endif + + err = -ENOMEM; + /* space for skge@pci:0000:04:00.0 */ + hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:") + + strlen(pci_name(pdev)) + 1, GFP_KERNEL); + if (!hw) + goto err_out_free_regions; + + sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev)); + + hw->pdev = pdev; + spin_lock_init(&hw->hw_lock); + spin_lock_init(&hw->phy_lock); + tasklet_setup(&hw->phy_task, skge_extirq); + + hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000); + if (!hw->regs) { + dev_err(&pdev->dev, "cannot map device registers\n"); + goto err_out_free_hw; + } + + err = skge_reset(hw); + if (err) + goto err_out_iounmap; + + pr_info("%s addr 0x%llx irq %d chip %s rev %d\n", + DRV_VERSION, + (unsigned long long)pci_resource_start(pdev, 0), pdev->irq, + skge_board_name(hw), hw->chip_rev); + + dev = skge_devinit(hw, 0, using_dac); + if (!dev) { + err = -ENOMEM; + goto err_out_led_off; + } + + /* Some motherboards are broken and has zero in ROM. */ + if (!is_valid_ether_addr(dev->dev_addr)) + dev_warn(&pdev->dev, "bad (zero?) ethernet address in rom\n"); + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "cannot register net device\n"); + goto err_out_free_netdev; + } + + skge_show_addr(dev); + + if (hw->ports > 1) { + dev1 = skge_devinit(hw, 1, using_dac); + if (!dev1) { + err = -ENOMEM; + goto err_out_unregister; + } + + err = register_netdev(dev1); + if (err) { + dev_err(&pdev->dev, "cannot register second net device\n"); + goto err_out_free_dev1; + } + + err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, + hw->irq_name, hw); + if (err) { + dev_err(&pdev->dev, "cannot assign irq %d\n", + pdev->irq); + goto err_out_unregister_dev1; + } + + skge_show_addr(dev1); + } + pci_set_drvdata(pdev, hw); + + return 0; + +err_out_unregister_dev1: + unregister_netdev(dev1); +err_out_free_dev1: + free_netdev(dev1); +err_out_unregister: + unregister_netdev(dev); +err_out_free_netdev: + free_netdev(dev); +err_out_led_off: + skge_write16(hw, B0_LED, LED_STAT_OFF); +err_out_iounmap: + iounmap(hw->regs); +err_out_free_hw: + kfree(hw); +err_out_free_regions: + pci_release_regions(pdev); +err_out_disable_pdev: + pci_disable_device(pdev); +err_out: + return err; +} + +static void skge_remove(struct pci_dev *pdev) +{ + struct skge_hw *hw = pci_get_drvdata(pdev); + struct net_device *dev0, *dev1; + + if (!hw) + return; + + dev1 = hw->dev[1]; + if (dev1) + unregister_netdev(dev1); + dev0 = hw->dev[0]; + unregister_netdev(dev0); + + tasklet_kill(&hw->phy_task); + + spin_lock_irq(&hw->hw_lock); + hw->intr_mask = 0; + + if (hw->ports > 1) { + skge_write32(hw, B0_IMSK, 0); + skge_read32(hw, B0_IMSK); + } + spin_unlock_irq(&hw->hw_lock); + + skge_write16(hw, B0_LED, LED_STAT_OFF); + skge_write8(hw, B0_CTST, CS_RST_SET); + + if (hw->ports > 1) + free_irq(pdev->irq, hw); + pci_release_regions(pdev); + pci_disable_device(pdev); + if (dev1) + free_netdev(dev1); + free_netdev(dev0); + + iounmap(hw->regs); + kfree(hw); +} + +#ifdef CONFIG_PM_SLEEP +static int skge_suspend(struct device *dev) +{ + struct skge_hw *hw = dev_get_drvdata(dev); + int i; + + if (!hw) + return 0; + + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + struct skge_port *skge = netdev_priv(dev); + + if (netif_running(dev)) + skge_down(dev); + + if (skge->wol) + skge_wol_init(skge); + } + + skge_write32(hw, B0_IMSK, 0); + + return 0; +} + +static int skge_resume(struct device *dev) +{ + struct skge_hw *hw = dev_get_drvdata(dev); + int i, err; + + if (!hw) + return 0; + + err = skge_reset(hw); + if (err) + goto out; + + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + + if (netif_running(dev)) { + err = skge_up(dev); + + if (err) { + netdev_err(dev, "could not up: %d\n", err); + dev_close(dev); + goto out; + } + } + } +out: + return err; +} + +static SIMPLE_DEV_PM_OPS(skge_pm_ops, skge_suspend, skge_resume); +#define SKGE_PM_OPS (&skge_pm_ops) + +#else + +#define SKGE_PM_OPS NULL +#endif /* CONFIG_PM_SLEEP */ + +static void skge_shutdown(struct pci_dev *pdev) +{ + struct skge_hw *hw = pci_get_drvdata(pdev); + int i; + + if (!hw) + return; + + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + struct skge_port *skge = netdev_priv(dev); + + if (skge->wol) + skge_wol_init(skge); + } + + pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); + pci_set_power_state(pdev, PCI_D3hot); +} + +static struct pci_driver skge_driver = { + .name = DRV_NAME, + .id_table = skge_id_table, + .probe = skge_probe, + .remove = skge_remove, + .shutdown = skge_shutdown, + .driver.pm = SKGE_PM_OPS, +}; + +static const struct dmi_system_id skge_32bit_dma_boards[] = { + { + .ident = "Gigabyte nForce boards", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"), + DMI_MATCH(DMI_BOARD_NAME, "nForce"), + }, + }, + { + .ident = "ASUS P5NSLI", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "P5NSLI") + }, + }, + { + .ident = "FUJITSU SIEMENS A8NE-FM", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "A8NE-FM") + }, + }, + {} +}; + +static int __init skge_init_module(void) +{ + if (dmi_check_system(skge_32bit_dma_boards)) + only_32bit_dma = 1; + skge_debug_init(); + return pci_register_driver(&skge_driver); +} + +static void __exit skge_cleanup_module(void) +{ + pci_unregister_driver(&skge_driver); + skge_debug_cleanup(); +} + +module_init(skge_init_module); +module_exit(skge_cleanup_module); diff --git a/drivers/net/ethernet/marvell/skge.h b/drivers/net/ethernet/marvell/skge.h new file mode 100644 index 000000000..6928abcec --- /dev/null +++ b/drivers/net/ethernet/marvell/skge.h @@ -0,0 +1,2579 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Definitions for the new Marvell Yukon / SysKonnect driver. + */ +#ifndef _SKGE_H +#define _SKGE_H +#include <linux/interrupt.h> + +/* PCI config registers */ +#define PCI_DEV_REG1 0x40 +#define PCI_PHY_COMA 0x8000000 +#define PCI_VIO 0x2000000 + +#define PCI_DEV_REG2 0x44 +#define PCI_VPD_ROM_SZ 7L<<14 /* VPD ROM size 0=256, 1=512, ... */ +#define PCI_REV_DESC 1<<2 /* Reverse Descriptor bytes */ + +enum csr_regs { + B0_RAP = 0x0000, + B0_CTST = 0x0004, + B0_LED = 0x0006, + B0_POWER_CTRL = 0x0007, + B0_ISRC = 0x0008, + B0_IMSK = 0x000c, + B0_HWE_ISRC = 0x0010, + B0_HWE_IMSK = 0x0014, + B0_SP_ISRC = 0x0018, + B0_XM1_IMSK = 0x0020, + B0_XM1_ISRC = 0x0028, + B0_XM1_PHY_ADDR = 0x0030, + B0_XM1_PHY_DATA = 0x0034, + B0_XM2_IMSK = 0x0040, + B0_XM2_ISRC = 0x0048, + B0_XM2_PHY_ADDR = 0x0050, + B0_XM2_PHY_DATA = 0x0054, + B0_R1_CSR = 0x0060, + B0_R2_CSR = 0x0064, + B0_XS1_CSR = 0x0068, + B0_XA1_CSR = 0x006c, + B0_XS2_CSR = 0x0070, + B0_XA2_CSR = 0x0074, + + B2_MAC_1 = 0x0100, + B2_MAC_2 = 0x0108, + B2_MAC_3 = 0x0110, + B2_CONN_TYP = 0x0118, + B2_PMD_TYP = 0x0119, + B2_MAC_CFG = 0x011a, + B2_CHIP_ID = 0x011b, + B2_E_0 = 0x011c, + B2_E_1 = 0x011d, + B2_E_2 = 0x011e, + B2_E_3 = 0x011f, + B2_FAR = 0x0120, + B2_FDP = 0x0124, + B2_LD_CTRL = 0x0128, + B2_LD_TEST = 0x0129, + B2_TI_INI = 0x0130, + B2_TI_VAL = 0x0134, + B2_TI_CTRL = 0x0138, + B2_TI_TEST = 0x0139, + B2_IRQM_INI = 0x0140, + B2_IRQM_VAL = 0x0144, + B2_IRQM_CTRL = 0x0148, + B2_IRQM_TEST = 0x0149, + B2_IRQM_MSK = 0x014c, + B2_IRQM_HWE_MSK = 0x0150, + B2_TST_CTRL1 = 0x0158, + B2_TST_CTRL2 = 0x0159, + B2_GP_IO = 0x015c, + B2_I2C_CTRL = 0x0160, + B2_I2C_DATA = 0x0164, + B2_I2C_IRQ = 0x0168, + B2_I2C_SW = 0x016c, + B2_BSC_INI = 0x0170, + B2_BSC_VAL = 0x0174, + B2_BSC_CTRL = 0x0178, + B2_BSC_STAT = 0x0179, + B2_BSC_TST = 0x017a, + + B3_RAM_ADDR = 0x0180, + B3_RAM_DATA_LO = 0x0184, + B3_RAM_DATA_HI = 0x0188, + B3_RI_WTO_R1 = 0x0190, + B3_RI_WTO_XA1 = 0x0191, + B3_RI_WTO_XS1 = 0x0192, + B3_RI_RTO_R1 = 0x0193, + B3_RI_RTO_XA1 = 0x0194, + B3_RI_RTO_XS1 = 0x0195, + B3_RI_WTO_R2 = 0x0196, + B3_RI_WTO_XA2 = 0x0197, + B3_RI_WTO_XS2 = 0x0198, + B3_RI_RTO_R2 = 0x0199, + B3_RI_RTO_XA2 = 0x019a, + B3_RI_RTO_XS2 = 0x019b, + B3_RI_TO_VAL = 0x019c, + B3_RI_CTRL = 0x01a0, + B3_RI_TEST = 0x01a2, + B3_MA_TOINI_RX1 = 0x01b0, + B3_MA_TOINI_RX2 = 0x01b1, + B3_MA_TOINI_TX1 = 0x01b2, + B3_MA_TOINI_TX2 = 0x01b3, + B3_MA_TOVAL_RX1 = 0x01b4, + B3_MA_TOVAL_RX2 = 0x01b5, + B3_MA_TOVAL_TX1 = 0x01b6, + B3_MA_TOVAL_TX2 = 0x01b7, + B3_MA_TO_CTRL = 0x01b8, + B3_MA_TO_TEST = 0x01ba, + B3_MA_RCINI_RX1 = 0x01c0, + B3_MA_RCINI_RX2 = 0x01c1, + B3_MA_RCINI_TX1 = 0x01c2, + B3_MA_RCINI_TX2 = 0x01c3, + B3_MA_RCVAL_RX1 = 0x01c4, + B3_MA_RCVAL_RX2 = 0x01c5, + B3_MA_RCVAL_TX1 = 0x01c6, + B3_MA_RCVAL_TX2 = 0x01c7, + B3_MA_RC_CTRL = 0x01c8, + B3_MA_RC_TEST = 0x01ca, + B3_PA_TOINI_RX1 = 0x01d0, + B3_PA_TOINI_RX2 = 0x01d4, + B3_PA_TOINI_TX1 = 0x01d8, + B3_PA_TOINI_TX2 = 0x01dc, + B3_PA_TOVAL_RX1 = 0x01e0, + B3_PA_TOVAL_RX2 = 0x01e4, + B3_PA_TOVAL_TX1 = 0x01e8, + B3_PA_TOVAL_TX2 = 0x01ec, + B3_PA_CTRL = 0x01f0, + B3_PA_TEST = 0x01f2, +}; + +/* B0_CTST 16 bit Control/Status register */ +enum { + CS_CLK_RUN_HOT = 1<<13,/* CLK_RUN hot m. (YUKON-Lite only) */ + CS_CLK_RUN_RST = 1<<12,/* CLK_RUN reset (YUKON-Lite only) */ + CS_CLK_RUN_ENA = 1<<11,/* CLK_RUN enable (YUKON-Lite only) */ + CS_VAUX_AVAIL = 1<<10,/* VAUX available (YUKON only) */ + CS_BUS_CLOCK = 1<<9, /* Bus Clock 0/1 = 33/66 MHz */ + CS_BUS_SLOT_SZ = 1<<8, /* Slot Size 0/1 = 32/64 bit slot */ + CS_ST_SW_IRQ = 1<<7, /* Set IRQ SW Request */ + CS_CL_SW_IRQ = 1<<6, /* Clear IRQ SW Request */ + CS_STOP_DONE = 1<<5, /* Stop Master is finished */ + CS_STOP_MAST = 1<<4, /* Command Bit to stop the master */ + CS_MRST_CLR = 1<<3, /* Clear Master reset */ + CS_MRST_SET = 1<<2, /* Set Master reset */ + CS_RST_CLR = 1<<1, /* Clear Software reset */ + CS_RST_SET = 1, /* Set Software reset */ + +/* B0_LED 8 Bit LED register */ +/* Bit 7.. 2: reserved */ + LED_STAT_ON = 1<<1, /* Status LED on */ + LED_STAT_OFF = 1, /* Status LED off */ + +/* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */ + PC_VAUX_ENA = 1<<7, /* Switch VAUX Enable */ + PC_VAUX_DIS = 1<<6, /* Switch VAUX Disable */ + PC_VCC_ENA = 1<<5, /* Switch VCC Enable */ + PC_VCC_DIS = 1<<4, /* Switch VCC Disable */ + PC_VAUX_ON = 1<<3, /* Switch VAUX On */ + PC_VAUX_OFF = 1<<2, /* Switch VAUX Off */ + PC_VCC_ON = 1<<1, /* Switch VCC On */ + PC_VCC_OFF = 1<<0, /* Switch VCC Off */ +}; + +/* B2_IRQM_MSK 32 bit IRQ Moderation Mask */ +enum { + IS_ALL_MSK = 0xbffffffful, /* All Interrupt bits */ + IS_HW_ERR = 1<<31, /* Interrupt HW Error */ + /* Bit 30: reserved */ + IS_PA_TO_RX1 = 1<<29, /* Packet Arb Timeout Rx1 */ + IS_PA_TO_RX2 = 1<<28, /* Packet Arb Timeout Rx2 */ + IS_PA_TO_TX1 = 1<<27, /* Packet Arb Timeout Tx1 */ + IS_PA_TO_TX2 = 1<<26, /* Packet Arb Timeout Tx2 */ + IS_I2C_READY = 1<<25, /* IRQ on end of I2C Tx */ + IS_IRQ_SW = 1<<24, /* SW forced IRQ */ + IS_EXT_REG = 1<<23, /* IRQ from LM80 or PHY (GENESIS only) */ + /* IRQ from PHY (YUKON only) */ + IS_TIMINT = 1<<22, /* IRQ from Timer */ + IS_MAC1 = 1<<21, /* IRQ from MAC 1 */ + IS_LNK_SYNC_M1 = 1<<20, /* Link Sync Cnt wrap MAC 1 */ + IS_MAC2 = 1<<19, /* IRQ from MAC 2 */ + IS_LNK_SYNC_M2 = 1<<18, /* Link Sync Cnt wrap MAC 2 */ +/* Receive Queue 1 */ + IS_R1_B = 1<<17, /* Q_R1 End of Buffer */ + IS_R1_F = 1<<16, /* Q_R1 End of Frame */ + IS_R1_C = 1<<15, /* Q_R1 Encoding Error */ +/* Receive Queue 2 */ + IS_R2_B = 1<<14, /* Q_R2 End of Buffer */ + IS_R2_F = 1<<13, /* Q_R2 End of Frame */ + IS_R2_C = 1<<12, /* Q_R2 Encoding Error */ +/* Synchronous Transmit Queue 1 */ + IS_XS1_B = 1<<11, /* Q_XS1 End of Buffer */ + IS_XS1_F = 1<<10, /* Q_XS1 End of Frame */ + IS_XS1_C = 1<<9, /* Q_XS1 Encoding Error */ +/* Asynchronous Transmit Queue 1 */ + IS_XA1_B = 1<<8, /* Q_XA1 End of Buffer */ + IS_XA1_F = 1<<7, /* Q_XA1 End of Frame */ + IS_XA1_C = 1<<6, /* Q_XA1 Encoding Error */ +/* Synchronous Transmit Queue 2 */ + IS_XS2_B = 1<<5, /* Q_XS2 End of Buffer */ + IS_XS2_F = 1<<4, /* Q_XS2 End of Frame */ + IS_XS2_C = 1<<3, /* Q_XS2 Encoding Error */ +/* Asynchronous Transmit Queue 2 */ + IS_XA2_B = 1<<2, /* Q_XA2 End of Buffer */ + IS_XA2_F = 1<<1, /* Q_XA2 End of Frame */ + IS_XA2_C = 1<<0, /* Q_XA2 Encoding Error */ + + IS_TO_PORT1 = IS_PA_TO_RX1 | IS_PA_TO_TX1, + IS_TO_PORT2 = IS_PA_TO_RX2 | IS_PA_TO_TX2, + + IS_PORT_1 = IS_XA1_F| IS_R1_F | IS_TO_PORT1 | IS_MAC1, + IS_PORT_2 = IS_XA2_F| IS_R2_F | IS_TO_PORT2 | IS_MAC2, +}; + + +/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */ +enum { + IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */ + IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */ + IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */ + IS_IRQ_STAT = 1<<10, /* IRQ status exception */ + IS_NO_STAT_M1 = 1<<9, /* No Rx Status from MAC 1 */ + IS_NO_STAT_M2 = 1<<8, /* No Rx Status from MAC 2 */ + IS_NO_TIST_M1 = 1<<7, /* No Time Stamp from MAC 1 */ + IS_NO_TIST_M2 = 1<<6, /* No Time Stamp from MAC 2 */ + IS_RAM_RD_PAR = 1<<5, /* RAM Read Parity Error */ + IS_RAM_WR_PAR = 1<<4, /* RAM Write Parity Error */ + IS_M1_PAR_ERR = 1<<3, /* MAC 1 Parity Error */ + IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */ + IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */ + IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */ + + IS_ERR_MSK = IS_IRQ_MST_ERR | IS_IRQ_STAT + | IS_RAM_RD_PAR | IS_RAM_WR_PAR + | IS_M1_PAR_ERR | IS_M2_PAR_ERR + | IS_R1_PAR_ERR | IS_R2_PAR_ERR, +}; + +/* B2_TST_CTRL1 8 bit Test Control Register 1 */ +enum { + TST_FRC_DPERR_MR = 1<<7, /* force DATAPERR on MST RD */ + TST_FRC_DPERR_MW = 1<<6, /* force DATAPERR on MST WR */ + TST_FRC_DPERR_TR = 1<<5, /* force DATAPERR on TRG RD */ + TST_FRC_DPERR_TW = 1<<4, /* force DATAPERR on TRG WR */ + TST_FRC_APERR_M = 1<<3, /* force ADDRPERR on MST */ + TST_FRC_APERR_T = 1<<2, /* force ADDRPERR on TRG */ + TST_CFG_WRITE_ON = 1<<1, /* Enable Config Reg WR */ + TST_CFG_WRITE_OFF= 1<<0, /* Disable Config Reg WR */ +}; + +/* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */ +enum { + CFG_CHIP_R_MSK = 0xf<<4, /* Bit 7.. 4: Chip Revision */ + /* Bit 3.. 2: reserved */ + CFG_DIS_M2_CLK = 1<<1, /* Disable Clock for 2nd MAC */ + CFG_SNG_MAC = 1<<0, /* MAC Config: 0=2 MACs / 1=1 MAC*/ +}; + +/* B2_CHIP_ID 8 bit Chip Identification Number */ +enum { + CHIP_ID_GENESIS = 0x0a, /* Chip ID for GENESIS */ + CHIP_ID_YUKON = 0xb0, /* Chip ID for YUKON */ + CHIP_ID_YUKON_LITE = 0xb1, /* Chip ID for YUKON-Lite (Rev. A1-A3) */ + CHIP_ID_YUKON_LP = 0xb2, /* Chip ID for YUKON-LP */ + CHIP_ID_YUKON_XL = 0xb3, /* Chip ID for YUKON-2 XL */ + CHIP_ID_YUKON_EC = 0xb6, /* Chip ID for YUKON-2 EC */ + CHIP_ID_YUKON_FE = 0xb7, /* Chip ID for YUKON-2 FE */ + + CHIP_REV_YU_LITE_A1 = 3, /* Chip Rev. for YUKON-Lite A1,A2 */ + CHIP_REV_YU_LITE_A3 = 7, /* Chip Rev. for YUKON-Lite A3 */ +}; + +/* B2_TI_CTRL 8 bit Timer control */ +/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */ +enum { + TIM_START = 1<<2, /* Start Timer */ + TIM_STOP = 1<<1, /* Stop Timer */ + TIM_CLR_IRQ = 1<<0, /* Clear Timer IRQ (!IRQM) */ +}; + +/* B2_TI_TEST 8 Bit Timer Test */ +/* B2_IRQM_TEST 8 bit IRQ Moderation Timer Test */ +/* B28_DPT_TST 8 bit Descriptor Poll Timer Test Reg */ +enum { + TIM_T_ON = 1<<2, /* Test mode on */ + TIM_T_OFF = 1<<1, /* Test mode off */ + TIM_T_STEP = 1<<0, /* Test step */ +}; + +/* B2_GP_IO 32 bit General Purpose I/O Register */ +enum { + GP_DIR_9 = 1<<25, /* IO_9 direct, 0=In/1=Out */ + GP_DIR_8 = 1<<24, /* IO_8 direct, 0=In/1=Out */ + GP_DIR_7 = 1<<23, /* IO_7 direct, 0=In/1=Out */ + GP_DIR_6 = 1<<22, /* IO_6 direct, 0=In/1=Out */ + GP_DIR_5 = 1<<21, /* IO_5 direct, 0=In/1=Out */ + GP_DIR_4 = 1<<20, /* IO_4 direct, 0=In/1=Out */ + GP_DIR_3 = 1<<19, /* IO_3 direct, 0=In/1=Out */ + GP_DIR_2 = 1<<18, /* IO_2 direct, 0=In/1=Out */ + GP_DIR_1 = 1<<17, /* IO_1 direct, 0=In/1=Out */ + GP_DIR_0 = 1<<16, /* IO_0 direct, 0=In/1=Out */ + + GP_IO_9 = 1<<9, /* IO_9 pin */ + GP_IO_8 = 1<<8, /* IO_8 pin */ + GP_IO_7 = 1<<7, /* IO_7 pin */ + GP_IO_6 = 1<<6, /* IO_6 pin */ + GP_IO_5 = 1<<5, /* IO_5 pin */ + GP_IO_4 = 1<<4, /* IO_4 pin */ + GP_IO_3 = 1<<3, /* IO_3 pin */ + GP_IO_2 = 1<<2, /* IO_2 pin */ + GP_IO_1 = 1<<1, /* IO_1 pin */ + GP_IO_0 = 1<<0, /* IO_0 pin */ +}; + +/* Descriptor Bit Definition */ +/* TxCtrl Transmit Buffer Control Field */ +/* RxCtrl Receive Buffer Control Field */ +enum { + BMU_OWN = 1<<31, /* OWN bit: 0=host/1=BMU */ + BMU_STF = 1<<30, /* Start of Frame */ + BMU_EOF = 1<<29, /* End of Frame */ + BMU_IRQ_EOB = 1<<28, /* Req "End of Buffer" IRQ */ + BMU_IRQ_EOF = 1<<27, /* Req "End of Frame" IRQ */ + /* TxCtrl specific bits */ + BMU_STFWD = 1<<26, /* (Tx) Store & Forward Frame */ + BMU_NO_FCS = 1<<25, /* (Tx) Disable MAC FCS (CRC) generation */ + BMU_SW = 1<<24, /* (Tx) 1 bit res. for SW use */ + /* RxCtrl specific bits */ + BMU_DEV_0 = 1<<26, /* (Rx) Transfer data to Dev0 */ + BMU_STAT_VAL = 1<<25, /* (Rx) Rx Status Valid */ + BMU_TIST_VAL = 1<<24, /* (Rx) Rx TimeStamp Valid */ + /* Bit 23..16: BMU Check Opcodes */ + BMU_CHECK = 0x55<<16, /* Default BMU check */ + BMU_TCP_CHECK = 0x56<<16, /* Descr with TCP ext */ + BMU_UDP_CHECK = 0x57<<16, /* Descr with UDP ext (YUKON only) */ + BMU_BBC = 0xffffL, /* Bit 15.. 0: Buffer Byte Counter */ +}; + +/* B2_BSC_CTRL 8 bit Blink Source Counter Control */ +enum { + BSC_START = 1<<1, /* Start Blink Source Counter */ + BSC_STOP = 1<<0, /* Stop Blink Source Counter */ +}; + +/* B2_BSC_STAT 8 bit Blink Source Counter Status */ +enum { + BSC_SRC = 1<<0, /* Blink Source, 0=Off / 1=On */ +}; + +/* B2_BSC_TST 16 bit Blink Source Counter Test Reg */ +enum { + BSC_T_ON = 1<<2, /* Test mode on */ + BSC_T_OFF = 1<<1, /* Test mode off */ + BSC_T_STEP = 1<<0, /* Test step */ +}; + +/* B3_RAM_ADDR 32 bit RAM Address, to read or write */ + /* Bit 31..19: reserved */ +#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */ +/* RAM Interface Registers */ + +/* B3_RI_CTRL 16 bit RAM Iface Control Register */ +enum { + RI_CLR_RD_PERR = 1<<9, /* Clear IRQ RAM Read Parity Err */ + RI_CLR_WR_PERR = 1<<8, /* Clear IRQ RAM Write Parity Err*/ + + RI_RST_CLR = 1<<1, /* Clear RAM Interface Reset */ + RI_RST_SET = 1<<0, /* Set RAM Interface Reset */ +}; + +/* MAC Arbiter Registers */ +/* B3_MA_TO_CTRL 16 bit MAC Arbiter Timeout Ctrl Reg */ +enum { + MA_FOE_ON = 1<<3, /* XMAC Fast Output Enable ON */ + MA_FOE_OFF = 1<<2, /* XMAC Fast Output Enable OFF */ + MA_RST_CLR = 1<<1, /* Clear MAC Arbiter Reset */ + MA_RST_SET = 1<<0, /* Set MAC Arbiter Reset */ + +}; + +/* Timeout values */ +#define SK_MAC_TO_53 72 /* MAC arbiter timeout */ +#define SK_PKT_TO_53 0x2000 /* Packet arbiter timeout */ +#define SK_PKT_TO_MAX 0xffff /* Maximum value */ +#define SK_RI_TO_53 36 /* RAM interface timeout */ + +/* Packet Arbiter Registers */ +/* B3_PA_CTRL 16 bit Packet Arbiter Ctrl Register */ +enum { + PA_CLR_TO_TX2 = 1<<13,/* Clear IRQ Packet Timeout TX2 */ + PA_CLR_TO_TX1 = 1<<12,/* Clear IRQ Packet Timeout TX1 */ + PA_CLR_TO_RX2 = 1<<11,/* Clear IRQ Packet Timeout RX2 */ + PA_CLR_TO_RX1 = 1<<10,/* Clear IRQ Packet Timeout RX1 */ + PA_ENA_TO_TX2 = 1<<9, /* Enable Timeout Timer TX2 */ + PA_DIS_TO_TX2 = 1<<8, /* Disable Timeout Timer TX2 */ + PA_ENA_TO_TX1 = 1<<7, /* Enable Timeout Timer TX1 */ + PA_DIS_TO_TX1 = 1<<6, /* Disable Timeout Timer TX1 */ + PA_ENA_TO_RX2 = 1<<5, /* Enable Timeout Timer RX2 */ + PA_DIS_TO_RX2 = 1<<4, /* Disable Timeout Timer RX2 */ + PA_ENA_TO_RX1 = 1<<3, /* Enable Timeout Timer RX1 */ + PA_DIS_TO_RX1 = 1<<2, /* Disable Timeout Timer RX1 */ + PA_RST_CLR = 1<<1, /* Clear MAC Arbiter Reset */ + PA_RST_SET = 1<<0, /* Set MAC Arbiter Reset */ +}; + +#define PA_ENA_TO_ALL (PA_ENA_TO_RX1 | PA_ENA_TO_RX2 |\ + PA_ENA_TO_TX1 | PA_ENA_TO_TX2) + + +/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */ +/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */ +/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */ +/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */ +/* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */ + +#define TXA_MAX_VAL 0x00ffffffUL /* Bit 23.. 0: Max TXA Timer/Cnt Val */ + +/* TXA_CTRL 8 bit Tx Arbiter Control Register */ +enum { + TXA_ENA_FSYNC = 1<<7, /* Enable force of sync Tx queue */ + TXA_DIS_FSYNC = 1<<6, /* Disable force of sync Tx queue */ + TXA_ENA_ALLOC = 1<<5, /* Enable alloc of free bandwidth */ + TXA_DIS_ALLOC = 1<<4, /* Disable alloc of free bandwidth */ + TXA_START_RC = 1<<3, /* Start sync Rate Control */ + TXA_STOP_RC = 1<<2, /* Stop sync Rate Control */ + TXA_ENA_ARB = 1<<1, /* Enable Tx Arbiter */ + TXA_DIS_ARB = 1<<0, /* Disable Tx Arbiter */ +}; + +/* + * Bank 4 - 5 + */ +/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */ +enum { + TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/ + TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */ + TXA_LIM_INI = 0x0208,/* 32 bit Tx Arb Limit Counter Init Val */ + TXA_LIM_VAL = 0x020c,/* 32 bit Tx Arb Limit Counter Value */ + TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */ + TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */ + TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */ +}; + + +enum { + B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */ + B7_CFG_SPC = 0x0380,/* copy of the Configuration register */ + B8_RQ1_REGS = 0x0400,/* Receive Queue 1 */ + B8_RQ2_REGS = 0x0480,/* Receive Queue 2 */ + B8_TS1_REGS = 0x0600,/* Transmit sync queue 1 */ + B8_TA1_REGS = 0x0680,/* Transmit async queue 1 */ + B8_TS2_REGS = 0x0700,/* Transmit sync queue 2 */ + B8_TA2_REGS = 0x0780,/* Transmit sync queue 2 */ + B16_RAM_REGS = 0x0800,/* RAM Buffer Registers */ +}; + +/* Queue Register Offsets, use Q_ADDR() to access */ +enum { + B8_Q_REGS = 0x0400, /* base of Queue registers */ + Q_D = 0x00, /* 8*32 bit Current Descriptor */ + Q_DA_L = 0x20, /* 32 bit Current Descriptor Address Low dWord */ + Q_DA_H = 0x24, /* 32 bit Current Descriptor Address High dWord */ + Q_AC_L = 0x28, /* 32 bit Current Address Counter Low dWord */ + Q_AC_H = 0x2c, /* 32 bit Current Address Counter High dWord */ + Q_BC = 0x30, /* 32 bit Current Byte Counter */ + Q_CSR = 0x34, /* 32 bit BMU Control/Status Register */ + Q_F = 0x38, /* 32 bit Flag Register */ + Q_T1 = 0x3c, /* 32 bit Test Register 1 */ + Q_T1_TR = 0x3c, /* 8 bit Test Register 1 Transfer SM */ + Q_T1_WR = 0x3d, /* 8 bit Test Register 1 Write Descriptor SM */ + Q_T1_RD = 0x3e, /* 8 bit Test Register 1 Read Descriptor SM */ + Q_T1_SV = 0x3f, /* 8 bit Test Register 1 Supervisor SM */ + Q_T2 = 0x40, /* 32 bit Test Register 2 */ + Q_T3 = 0x44, /* 32 bit Test Register 3 */ + +}; +#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs)) + +/* RAM Buffer Register Offsets */ +enum { + + RB_START= 0x00,/* 32 bit RAM Buffer Start Address */ + RB_END = 0x04,/* 32 bit RAM Buffer End Address */ + RB_WP = 0x08,/* 32 bit RAM Buffer Write Pointer */ + RB_RP = 0x0c,/* 32 bit RAM Buffer Read Pointer */ + RB_RX_UTPP= 0x10,/* 32 bit Rx Upper Threshold, Pause Packet */ + RB_RX_LTPP= 0x14,/* 32 bit Rx Lower Threshold, Pause Packet */ + RB_RX_UTHP= 0x18,/* 32 bit Rx Upper Threshold, High Prio */ + RB_RX_LTHP= 0x1c,/* 32 bit Rx Lower Threshold, High Prio */ + /* 0x10 - 0x1f: reserved at Tx RAM Buffer Registers */ + RB_PC = 0x20,/* 32 bit RAM Buffer Packet Counter */ + RB_LEV = 0x24,/* 32 bit RAM Buffer Level Register */ + RB_CTRL = 0x28,/* 32 bit RAM Buffer Control Register */ + RB_TST1 = 0x29,/* 8 bit RAM Buffer Test Register 1 */ + RB_TST2 = 0x2a,/* 8 bit RAM Buffer Test Register 2 */ +}; + +/* Receive and Transmit Queues */ +enum { + Q_R1 = 0x0000, /* Receive Queue 1 */ + Q_R2 = 0x0080, /* Receive Queue 2 */ + Q_XS1 = 0x0200, /* Synchronous Transmit Queue 1 */ + Q_XA1 = 0x0280, /* Asynchronous Transmit Queue 1 */ + Q_XS2 = 0x0300, /* Synchronous Transmit Queue 2 */ + Q_XA2 = 0x0380, /* Asynchronous Transmit Queue 2 */ +}; + +/* Different MAC Types */ +enum { + SK_MAC_XMAC = 0, /* Xaqti XMAC II */ + SK_MAC_GMAC = 1, /* Marvell GMAC */ +}; + +/* Different PHY Types */ +enum { + SK_PHY_XMAC = 0,/* integrated in XMAC II */ + SK_PHY_BCOM = 1,/* Broadcom BCM5400 */ + SK_PHY_LONE = 2,/* Level One LXT1000 [not supported]*/ + SK_PHY_NAT = 3,/* National DP83891 [not supported] */ + SK_PHY_MARV_COPPER= 4,/* Marvell 88E1011S */ + SK_PHY_MARV_FIBER = 5,/* Marvell 88E1011S working on fiber */ +}; + +/* PHY addresses (bits 12..8 of PHY address reg) */ +enum { + PHY_ADDR_XMAC = 0<<8, + PHY_ADDR_BCOM = 1<<8, + +/* GPHY address (bits 15..11 of SMI control reg) */ + PHY_ADDR_MARV = 0, +}; + +#define RB_ADDR(offs, queue) ((u16)B16_RAM_REGS + (u16)(queue) + (offs)) + +/* Receive MAC FIFO, Receive LED, and Link_Sync regs (GENESIS only) */ +enum { + RX_MFF_EA = 0x0c00,/* 32 bit Receive MAC FIFO End Address */ + RX_MFF_WP = 0x0c04,/* 32 bit Receive MAC FIFO Write Pointer */ + + RX_MFF_RP = 0x0c0c,/* 32 bit Receive MAC FIFO Read Pointer */ + RX_MFF_PC = 0x0c10,/* 32 bit Receive MAC FIFO Packet Cnt */ + RX_MFF_LEV = 0x0c14,/* 32 bit Receive MAC FIFO Level */ + RX_MFF_CTRL1 = 0x0c18,/* 16 bit Receive MAC FIFO Control Reg 1*/ + RX_MFF_STAT_TO = 0x0c1a,/* 8 bit Receive MAC Status Timeout */ + RX_MFF_TIST_TO = 0x0c1b,/* 8 bit Receive MAC Time Stamp Timeout */ + RX_MFF_CTRL2 = 0x0c1c,/* 8 bit Receive MAC FIFO Control Reg 2*/ + RX_MFF_TST1 = 0x0c1d,/* 8 bit Receive MAC FIFO Test Reg 1 */ + RX_MFF_TST2 = 0x0c1e,/* 8 bit Receive MAC FIFO Test Reg 2 */ + + RX_LED_INI = 0x0c20,/* 32 bit Receive LED Cnt Init Value */ + RX_LED_VAL = 0x0c24,/* 32 bit Receive LED Cnt Current Value */ + RX_LED_CTRL = 0x0c28,/* 8 bit Receive LED Cnt Control Reg */ + RX_LED_TST = 0x0c29,/* 8 bit Receive LED Cnt Test Register */ + + LNK_SYNC_INI = 0x0c30,/* 32 bit Link Sync Cnt Init Value */ + LNK_SYNC_VAL = 0x0c34,/* 32 bit Link Sync Cnt Current Value */ + LNK_SYNC_CTRL = 0x0c38,/* 8 bit Link Sync Cnt Control Register */ + LNK_SYNC_TST = 0x0c39,/* 8 bit Link Sync Cnt Test Register */ + LNK_LED_REG = 0x0c3c,/* 8 bit Link LED Register */ +}; + +/* Receive and Transmit MAC FIFO Registers (GENESIS only) */ +/* RX_MFF_CTRL1 16 bit Receive MAC FIFO Control Reg 1 */ +enum { + MFF_ENA_RDY_PAT = 1<<13, /* Enable Ready Patch */ + MFF_DIS_RDY_PAT = 1<<12, /* Disable Ready Patch */ + MFF_ENA_TIM_PAT = 1<<11, /* Enable Timing Patch */ + MFF_DIS_TIM_PAT = 1<<10, /* Disable Timing Patch */ + MFF_ENA_ALM_FUL = 1<<9, /* Enable AlmostFull Sign */ + MFF_DIS_ALM_FUL = 1<<8, /* Disable AlmostFull Sign */ + MFF_ENA_PAUSE = 1<<7, /* Enable Pause Signaling */ + MFF_DIS_PAUSE = 1<<6, /* Disable Pause Signaling */ + MFF_ENA_FLUSH = 1<<5, /* Enable Frame Flushing */ + MFF_DIS_FLUSH = 1<<4, /* Disable Frame Flushing */ + MFF_ENA_TIST = 1<<3, /* Enable Time Stamp Gener */ + MFF_DIS_TIST = 1<<2, /* Disable Time Stamp Gener */ + MFF_CLR_INTIST = 1<<1, /* Clear IRQ No Time Stamp */ + MFF_CLR_INSTAT = 1<<0, /* Clear IRQ No Status */ + MFF_RX_CTRL_DEF = MFF_ENA_TIM_PAT, +}; + +/* TX_MFF_CTRL1 16 bit Transmit MAC FIFO Control Reg 1 */ +enum { + MFF_CLR_PERR = 1<<15, /* Clear Parity Error IRQ */ + + MFF_ENA_PKT_REC = 1<<13, /* Enable Packet Recovery */ + MFF_DIS_PKT_REC = 1<<12, /* Disable Packet Recovery */ + + MFF_ENA_W4E = 1<<7, /* Enable Wait for Empty */ + MFF_DIS_W4E = 1<<6, /* Disable Wait for Empty */ + + MFF_ENA_LOOPB = 1<<3, /* Enable Loopback */ + MFF_DIS_LOOPB = 1<<2, /* Disable Loopback */ + MFF_CLR_MAC_RST = 1<<1, /* Clear XMAC Reset */ + MFF_SET_MAC_RST = 1<<0, /* Set XMAC Reset */ + + MFF_TX_CTRL_DEF = MFF_ENA_PKT_REC | (u16) MFF_ENA_TIM_PAT | MFF_ENA_FLUSH, +}; + + +/* RX_MFF_TST2 8 bit Receive MAC FIFO Test Register 2 */ +/* TX_MFF_TST2 8 bit Transmit MAC FIFO Test Register 2 */ +enum { + MFF_WSP_T_ON = 1<<6, /* Tx: Write Shadow Ptr TestOn */ + MFF_WSP_T_OFF = 1<<5, /* Tx: Write Shadow Ptr TstOff */ + MFF_WSP_INC = 1<<4, /* Tx: Write Shadow Ptr Increment */ + MFF_PC_DEC = 1<<3, /* Packet Counter Decrement */ + MFF_PC_T_ON = 1<<2, /* Packet Counter Test On */ + MFF_PC_T_OFF = 1<<1, /* Packet Counter Test Off */ + MFF_PC_INC = 1<<0, /* Packet Counter Increment */ +}; + +/* RX_MFF_TST1 8 bit Receive MAC FIFO Test Register 1 */ +/* TX_MFF_TST1 8 bit Transmit MAC FIFO Test Register 1 */ +enum { + MFF_WP_T_ON = 1<<6, /* Write Pointer Test On */ + MFF_WP_T_OFF = 1<<5, /* Write Pointer Test Off */ + MFF_WP_INC = 1<<4, /* Write Pointer Increm */ + + MFF_RP_T_ON = 1<<2, /* Read Pointer Test On */ + MFF_RP_T_OFF = 1<<1, /* Read Pointer Test Off */ + MFF_RP_DEC = 1<<0, /* Read Pointer Decrement */ +}; + +/* RX_MFF_CTRL2 8 bit Receive MAC FIFO Control Reg 2 */ +/* TX_MFF_CTRL2 8 bit Transmit MAC FIFO Control Reg 2 */ +enum { + MFF_ENA_OP_MD = 1<<3, /* Enable Operation Mode */ + MFF_DIS_OP_MD = 1<<2, /* Disable Operation Mode */ + MFF_RST_CLR = 1<<1, /* Clear MAC FIFO Reset */ + MFF_RST_SET = 1<<0, /* Set MAC FIFO Reset */ +}; + + +/* Link LED Counter Registers (GENESIS only) */ + +/* RX_LED_CTRL 8 bit Receive LED Cnt Control Reg */ +/* TX_LED_CTRL 8 bit Transmit LED Cnt Control Reg */ +/* LNK_SYNC_CTRL 8 bit Link Sync Cnt Control Register */ +enum { + LED_START = 1<<2, /* Start Timer */ + LED_STOP = 1<<1, /* Stop Timer */ + LED_STATE = 1<<0, /* Rx/Tx: LED State, 1=LED on */ +}; + +/* RX_LED_TST 8 bit Receive LED Cnt Test Register */ +/* TX_LED_TST 8 bit Transmit LED Cnt Test Register */ +/* LNK_SYNC_TST 8 bit Link Sync Cnt Test Register */ +enum { + LED_T_ON = 1<<2, /* LED Counter Test mode On */ + LED_T_OFF = 1<<1, /* LED Counter Test mode Off */ + LED_T_STEP = 1<<0, /* LED Counter Step */ +}; + +/* LNK_LED_REG 8 bit Link LED Register */ +enum { + LED_BLK_ON = 1<<5, /* Link LED Blinking On */ + LED_BLK_OFF = 1<<4, /* Link LED Blinking Off */ + LED_SYNC_ON = 1<<3, /* Use Sync Wire to switch LED */ + LED_SYNC_OFF = 1<<2, /* Disable Sync Wire Input */ + LED_REG_ON = 1<<1, /* switch LED on */ + LED_REG_OFF = 1<<0, /* switch LED off */ +}; + +/* Receive GMAC FIFO (YUKON) */ +enum { + RX_GMF_EA = 0x0c40,/* 32 bit Rx GMAC FIFO End Address */ + RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */ + RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */ + RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */ + RX_GMF_FL_THR = 0x0c50,/* 32 bit Rx GMAC FIFO Flush Threshold */ + RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */ + RX_GMF_WLEV = 0x0c68,/* 32 bit Rx GMAC FIFO Write Level */ + RX_GMF_RP = 0x0c70,/* 32 bit Rx GMAC FIFO Read Pointer */ + RX_GMF_RLEV = 0x0c78,/* 32 bit Rx GMAC FIFO Read Level */ +}; + + +/* TXA_TEST 8 bit Tx Arbiter Test Register */ +enum { + TXA_INT_T_ON = 1<<5, /* Tx Arb Interval Timer Test On */ + TXA_INT_T_OFF = 1<<4, /* Tx Arb Interval Timer Test Off */ + TXA_INT_T_STEP = 1<<3, /* Tx Arb Interval Timer Step */ + TXA_LIM_T_ON = 1<<2, /* Tx Arb Limit Timer Test On */ + TXA_LIM_T_OFF = 1<<1, /* Tx Arb Limit Timer Test Off */ + TXA_LIM_T_STEP = 1<<0, /* Tx Arb Limit Timer Step */ +}; + +/* TXA_STAT 8 bit Tx Arbiter Status Register */ +enum { + TXA_PRIO_XS = 1<<0, /* sync queue has prio to send */ +}; + + +/* Q_BC 32 bit Current Byte Counter */ + +/* BMU Control Status Registers */ +/* B0_R1_CSR 32 bit BMU Ctrl/Stat Rx Queue 1 */ +/* B0_R2_CSR 32 bit BMU Ctrl/Stat Rx Queue 2 */ +/* B0_XA1_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */ +/* B0_XS1_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 1 */ +/* B0_XA2_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */ +/* B0_XS2_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 2 */ +/* Q_CSR 32 bit BMU Control/Status Register */ + +enum { + CSR_SV_IDLE = 1<<24, /* BMU SM Idle */ + + CSR_DESC_CLR = 1<<21, /* Clear Reset for Descr */ + CSR_DESC_SET = 1<<20, /* Set Reset for Descr */ + CSR_FIFO_CLR = 1<<19, /* Clear Reset for FIFO */ + CSR_FIFO_SET = 1<<18, /* Set Reset for FIFO */ + CSR_HPI_RUN = 1<<17, /* Release HPI SM */ + CSR_HPI_RST = 1<<16, /* Reset HPI SM to Idle */ + CSR_SV_RUN = 1<<15, /* Release Supervisor SM */ + CSR_SV_RST = 1<<14, /* Reset Supervisor SM */ + CSR_DREAD_RUN = 1<<13, /* Release Descr Read SM */ + CSR_DREAD_RST = 1<<12, /* Reset Descr Read SM */ + CSR_DWRITE_RUN = 1<<11, /* Release Descr Write SM */ + CSR_DWRITE_RST = 1<<10, /* Reset Descr Write SM */ + CSR_TRANS_RUN = 1<<9, /* Release Transfer SM */ + CSR_TRANS_RST = 1<<8, /* Reset Transfer SM */ + CSR_ENA_POL = 1<<7, /* Enable Descr Polling */ + CSR_DIS_POL = 1<<6, /* Disable Descr Polling */ + CSR_STOP = 1<<5, /* Stop Rx/Tx Queue */ + CSR_START = 1<<4, /* Start Rx/Tx Queue */ + CSR_IRQ_CL_P = 1<<3, /* (Rx) Clear Parity IRQ */ + CSR_IRQ_CL_B = 1<<2, /* Clear EOB IRQ */ + CSR_IRQ_CL_F = 1<<1, /* Clear EOF IRQ */ + CSR_IRQ_CL_C = 1<<0, /* Clear ERR IRQ */ +}; + +#define CSR_SET_RESET (CSR_DESC_SET | CSR_FIFO_SET | CSR_HPI_RST |\ + CSR_SV_RST | CSR_DREAD_RST | CSR_DWRITE_RST |\ + CSR_TRANS_RST) +#define CSR_CLR_RESET (CSR_DESC_CLR | CSR_FIFO_CLR | CSR_HPI_RUN |\ + CSR_SV_RUN | CSR_DREAD_RUN | CSR_DWRITE_RUN |\ + CSR_TRANS_RUN) + +/* Q_F 32 bit Flag Register */ +enum { + F_ALM_FULL = 1<<27, /* Rx FIFO: almost full */ + F_EMPTY = 1<<27, /* Tx FIFO: empty flag */ + F_FIFO_EOF = 1<<26, /* Tag (EOF Flag) bit in FIFO */ + F_WM_REACHED = 1<<25, /* Watermark reached */ + + F_FIFO_LEVEL = 0x1fL<<16, /* Bit 23..16: # of Qwords in FIFO */ + F_WATER_MARK = 0x0007ffL, /* Bit 10.. 0: Watermark */ +}; + +/* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */ +/* RB_START 32 bit RAM Buffer Start Address */ +/* RB_END 32 bit RAM Buffer End Address */ +/* RB_WP 32 bit RAM Buffer Write Pointer */ +/* RB_RP 32 bit RAM Buffer Read Pointer */ +/* RB_RX_UTPP 32 bit Rx Upper Threshold, Pause Pack */ +/* RB_RX_LTPP 32 bit Rx Lower Threshold, Pause Pack */ +/* RB_RX_UTHP 32 bit Rx Upper Threshold, High Prio */ +/* RB_RX_LTHP 32 bit Rx Lower Threshold, High Prio */ +/* RB_PC 32 bit RAM Buffer Packet Counter */ +/* RB_LEV 32 bit RAM Buffer Level Register */ + +#define RB_MSK 0x0007ffff /* Bit 18.. 0: RAM Buffer Pointer Bits */ +/* RB_TST2 8 bit RAM Buffer Test Register 2 */ +/* RB_TST1 8 bit RAM Buffer Test Register 1 */ + +/* RB_CTRL 8 bit RAM Buffer Control Register */ +enum { + RB_ENA_STFWD = 1<<5, /* Enable Store & Forward */ + RB_DIS_STFWD = 1<<4, /* Disable Store & Forward */ + RB_ENA_OP_MD = 1<<3, /* Enable Operation Mode */ + RB_DIS_OP_MD = 1<<2, /* Disable Operation Mode */ + RB_RST_CLR = 1<<1, /* Clear RAM Buf STM Reset */ + RB_RST_SET = 1<<0, /* Set RAM Buf STM Reset */ +}; + +/* Transmit MAC FIFO and Transmit LED Registers (GENESIS only), */ +enum { + TX_MFF_EA = 0x0d00,/* 32 bit Transmit MAC FIFO End Address */ + TX_MFF_WP = 0x0d04,/* 32 bit Transmit MAC FIFO WR Pointer */ + TX_MFF_WSP = 0x0d08,/* 32 bit Transmit MAC FIFO WR Shadow Ptr */ + TX_MFF_RP = 0x0d0c,/* 32 bit Transmit MAC FIFO RD Pointer */ + TX_MFF_PC = 0x0d10,/* 32 bit Transmit MAC FIFO Packet Cnt */ + TX_MFF_LEV = 0x0d14,/* 32 bit Transmit MAC FIFO Level */ + TX_MFF_CTRL1 = 0x0d18,/* 16 bit Transmit MAC FIFO Ctrl Reg 1 */ + TX_MFF_WAF = 0x0d1a,/* 8 bit Transmit MAC Wait after flush */ + + TX_MFF_CTRL2 = 0x0d1c,/* 8 bit Transmit MAC FIFO Ctrl Reg 2 */ + TX_MFF_TST1 = 0x0d1d,/* 8 bit Transmit MAC FIFO Test Reg 1 */ + TX_MFF_TST2 = 0x0d1e,/* 8 bit Transmit MAC FIFO Test Reg 2 */ + + TX_LED_INI = 0x0d20,/* 32 bit Transmit LED Cnt Init Value */ + TX_LED_VAL = 0x0d24,/* 32 bit Transmit LED Cnt Current Val */ + TX_LED_CTRL = 0x0d28,/* 8 bit Transmit LED Cnt Control Reg */ + TX_LED_TST = 0x0d29,/* 8 bit Transmit LED Cnt Test Reg */ +}; + +/* Counter and Timer constants, for a host clock of 62.5 MHz */ +#define SK_XMIT_DUR 0x002faf08UL /* 50 ms */ +#define SK_BLK_DUR 0x01dcd650UL /* 500 ms */ + +#define SK_DPOLL_DEF 0x00ee6b28UL /* 250 ms at 62.5 MHz */ + +#define SK_DPOLL_MAX 0x00ffffffUL /* 268 ms at 62.5 MHz */ + /* 215 ms at 78.12 MHz */ + +#define SK_FACT_62 100 /* is given in percent */ +#define SK_FACT_53 85 /* on GENESIS: 53.12 MHz */ +#define SK_FACT_78 125 /* on YUKON: 78.12 MHz */ + + +/* Transmit GMAC FIFO (YUKON only) */ +enum { + TX_GMF_EA = 0x0d40,/* 32 bit Tx GMAC FIFO End Address */ + TX_GMF_AE_THR = 0x0d44,/* 32 bit Tx GMAC FIFO Almost Empty Thresh.*/ + TX_GMF_CTRL_T = 0x0d48,/* 32 bit Tx GMAC FIFO Control/Test */ + + TX_GMF_WP = 0x0d60,/* 32 bit Tx GMAC FIFO Write Pointer */ + TX_GMF_WSP = 0x0d64,/* 32 bit Tx GMAC FIFO Write Shadow Ptr. */ + TX_GMF_WLEV = 0x0d68,/* 32 bit Tx GMAC FIFO Write Level */ + + TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */ + TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */ + TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */ + + /* Descriptor Poll Timer Registers */ + B28_DPT_INI = 0x0e00,/* 24 bit Descriptor Poll Timer Init Val */ + B28_DPT_VAL = 0x0e04,/* 24 bit Descriptor Poll Timer Curr Val */ + B28_DPT_CTRL = 0x0e08,/* 8 bit Descriptor Poll Timer Ctrl Reg */ + + B28_DPT_TST = 0x0e0a,/* 8 bit Descriptor Poll Timer Test Reg */ + + /* Time Stamp Timer Registers (YUKON only) */ + GMAC_TI_ST_VAL = 0x0e14,/* 32 bit Time Stamp Timer Curr Val */ + GMAC_TI_ST_CTRL = 0x0e18,/* 8 bit Time Stamp Timer Ctrl Reg */ + GMAC_TI_ST_TST = 0x0e1a,/* 8 bit Time Stamp Timer Test Reg */ +}; + + +enum { + LINKLED_OFF = 0x01, + LINKLED_ON = 0x02, + LINKLED_LINKSYNC_OFF = 0x04, + LINKLED_LINKSYNC_ON = 0x08, + LINKLED_BLINK_OFF = 0x10, + LINKLED_BLINK_ON = 0x20, +}; + +/* GMAC and GPHY Control Registers (YUKON only) */ +enum { + GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */ + GPHY_CTRL = 0x0f04,/* 32 bit GPHY Control Reg */ + GMAC_IRQ_SRC = 0x0f08,/* 8 bit GMAC Interrupt Source Reg */ + GMAC_IRQ_MSK = 0x0f0c,/* 8 bit GMAC Interrupt Mask Reg */ + GMAC_LINK_CTRL = 0x0f10,/* 16 bit Link Control Reg */ + +/* Wake-up Frame Pattern Match Control Registers (YUKON only) */ + + WOL_REG_OFFS = 0x20,/* HW-Bug: Address is + 0x20 against spec. */ + + WOL_CTRL_STAT = 0x0f20,/* 16 bit WOL Control/Status Reg */ + WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */ + WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */ + WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */ + WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */ + +/* WOL Pattern Length Registers (YUKON only) */ + + WOL_PATT_LEN_LO = 0x0f30,/* 32 bit WOL Pattern Length 3..0 */ + WOL_PATT_LEN_HI = 0x0f34,/* 24 bit WOL Pattern Length 6..4 */ + +/* WOL Pattern Counter Registers (YUKON only) */ + + WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */ + WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */ +}; +#define WOL_REGS(port, x) (x + (port)*0x80) + +enum { + WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */ + WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */ +}; +#define WOL_PATT_RAM_BASE(port) (WOL_PATT_RAM_1 + (port)*0x400) + +enum { + BASE_XMAC_1 = 0x2000,/* XMAC 1 registers */ + BASE_GMAC_1 = 0x2800,/* GMAC 1 registers */ + BASE_XMAC_2 = 0x3000,/* XMAC 2 registers */ + BASE_GMAC_2 = 0x3800,/* GMAC 2 registers */ +}; + +/* + * Receive Frame Status Encoding + */ +enum { + XMR_FS_LEN = 0x3fff<<18, /* Bit 31..18: Rx Frame Length */ + XMR_FS_LEN_SHIFT = 18, + XMR_FS_2L_VLAN = 1<<17, /* Bit 17: tagged wh 2Lev VLAN ID*/ + XMR_FS_1_VLAN = 1<<16, /* Bit 16: tagged wh 1ev VLAN ID*/ + XMR_FS_BC = 1<<15, /* Bit 15: Broadcast Frame */ + XMR_FS_MC = 1<<14, /* Bit 14: Multicast Frame */ + XMR_FS_UC = 1<<13, /* Bit 13: Unicast Frame */ + + XMR_FS_BURST = 1<<11, /* Bit 11: Burst Mode */ + XMR_FS_CEX_ERR = 1<<10, /* Bit 10: Carrier Ext. Error */ + XMR_FS_802_3 = 1<<9, /* Bit 9: 802.3 Frame */ + XMR_FS_COL_ERR = 1<<8, /* Bit 8: Collision Error */ + XMR_FS_CAR_ERR = 1<<7, /* Bit 7: Carrier Event Error */ + XMR_FS_LEN_ERR = 1<<6, /* Bit 6: In-Range Length Error */ + XMR_FS_FRA_ERR = 1<<5, /* Bit 5: Framing Error */ + XMR_FS_RUNT = 1<<4, /* Bit 4: Runt Frame */ + XMR_FS_LNG_ERR = 1<<3, /* Bit 3: Giant (Jumbo) Frame */ + XMR_FS_FCS_ERR = 1<<2, /* Bit 2: Frame Check Sequ Err */ + XMR_FS_ERR = 1<<1, /* Bit 1: Frame Error */ + XMR_FS_MCTRL = 1<<0, /* Bit 0: MAC Control Packet */ + +/* + * XMR_FS_ERR will be set if + * XMR_FS_FCS_ERR, XMR_FS_LNG_ERR, XMR_FS_RUNT, + * XMR_FS_FRA_ERR, XMR_FS_LEN_ERR, or XMR_FS_CEX_ERR + * is set. XMR_FS_LNG_ERR and XMR_FS_LEN_ERR will issue + * XMR_FS_ERR unless the corresponding bit in the Receive Command + * Register is set. + */ +}; + +/* +,* XMAC-PHY Registers, indirect addressed over the XMAC + */ +enum { + PHY_XMAC_CTRL = 0x00,/* 16 bit r/w PHY Control Register */ + PHY_XMAC_STAT = 0x01,/* 16 bit r/w PHY Status Register */ + PHY_XMAC_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */ + PHY_XMAC_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */ + PHY_XMAC_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */ + PHY_XMAC_AUNE_LP = 0x05,/* 16 bit r/o Link Partner Abi Reg */ + PHY_XMAC_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */ + PHY_XMAC_NEPG = 0x07,/* 16 bit r/w Next Page Register */ + PHY_XMAC_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */ + + PHY_XMAC_EXT_STAT = 0x0f,/* 16 bit r/o Ext Status Register */ + PHY_XMAC_RES_ABI = 0x10,/* 16 bit r/o PHY Resolved Ability */ +}; +/* + * Broadcom-PHY Registers, indirect addressed over XMAC + */ +enum { + PHY_BCOM_CTRL = 0x00,/* 16 bit r/w PHY Control Register */ + PHY_BCOM_STAT = 0x01,/* 16 bit r/o PHY Status Register */ + PHY_BCOM_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */ + PHY_BCOM_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */ + PHY_BCOM_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */ + PHY_BCOM_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */ + PHY_BCOM_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */ + PHY_BCOM_NEPG = 0x07,/* 16 bit r/w Next Page Register */ + PHY_BCOM_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */ + /* Broadcom-specific registers */ + PHY_BCOM_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */ + PHY_BCOM_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */ + PHY_BCOM_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */ + PHY_BCOM_P_EXT_CTRL = 0x10,/* 16 bit r/w PHY Extended Ctrl Reg */ + PHY_BCOM_P_EXT_STAT = 0x11,/* 16 bit r/o PHY Extended Stat Reg */ + PHY_BCOM_RE_CTR = 0x12,/* 16 bit r/w Receive Error Counter */ + PHY_BCOM_FC_CTR = 0x13,/* 16 bit r/w False Carrier Sense Cnt */ + PHY_BCOM_RNO_CTR = 0x14,/* 16 bit r/w Receiver NOT_OK Cnt */ + + PHY_BCOM_AUX_CTRL = 0x18,/* 16 bit r/w Auxiliary Control Reg */ + PHY_BCOM_AUX_STAT = 0x19,/* 16 bit r/o Auxiliary Stat Summary */ + PHY_BCOM_INT_STAT = 0x1a,/* 16 bit r/o Interrupt Status Reg */ + PHY_BCOM_INT_MASK = 0x1b,/* 16 bit r/w Interrupt Mask Reg */ +}; + +/* + * Marvel-PHY Registers, indirect addressed over GMAC + */ +enum { + PHY_MARV_CTRL = 0x00,/* 16 bit r/w PHY Control Register */ + PHY_MARV_STAT = 0x01,/* 16 bit r/o PHY Status Register */ + PHY_MARV_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */ + PHY_MARV_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */ + PHY_MARV_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */ + PHY_MARV_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */ + PHY_MARV_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */ + PHY_MARV_NEPG = 0x07,/* 16 bit r/w Next Page Register */ + PHY_MARV_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */ + /* Marvel-specific registers */ + PHY_MARV_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */ + PHY_MARV_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */ + PHY_MARV_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */ + PHY_MARV_PHY_CTRL = 0x10,/* 16 bit r/w PHY Specific Ctrl Reg */ + PHY_MARV_PHY_STAT = 0x11,/* 16 bit r/o PHY Specific Stat Reg */ + PHY_MARV_INT_MASK = 0x12,/* 16 bit r/w Interrupt Mask Reg */ + PHY_MARV_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */ + PHY_MARV_EXT_CTRL = 0x14,/* 16 bit r/w Ext. PHY Specific Ctrl */ + PHY_MARV_RXE_CNT = 0x15,/* 16 bit r/w Receive Error Counter */ + PHY_MARV_EXT_ADR = 0x16,/* 16 bit r/w Ext. Ad. for Cable Diag. */ + PHY_MARV_PORT_IRQ = 0x17,/* 16 bit r/o Port 0 IRQ (88E1111 only) */ + PHY_MARV_LED_CTRL = 0x18,/* 16 bit r/w LED Control Reg */ + PHY_MARV_LED_OVER = 0x19,/* 16 bit r/w Manual LED Override Reg */ + PHY_MARV_EXT_CTRL_2 = 0x1a,/* 16 bit r/w Ext. PHY Specific Ctrl 2 */ + PHY_MARV_EXT_P_STAT = 0x1b,/* 16 bit r/w Ext. PHY Spec. Stat Reg */ + PHY_MARV_CABLE_DIAG = 0x1c,/* 16 bit r/o Cable Diagnostic Reg */ + PHY_MARV_PAGE_ADDR = 0x1d,/* 16 bit r/w Extended Page Address Reg */ + PHY_MARV_PAGE_DATA = 0x1e,/* 16 bit r/w Extended Page Data Reg */ + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ + PHY_MARV_FE_LED_PAR = 0x16,/* 16 bit r/w LED Parallel Select Reg. */ + PHY_MARV_FE_LED_SER = 0x17,/* 16 bit r/w LED Stream Select S. LED */ + PHY_MARV_FE_VCT_TX = 0x1a,/* 16 bit r/w VCT Reg. for TXP/N Pins */ + PHY_MARV_FE_VCT_RX = 0x1b,/* 16 bit r/o VCT Reg. for RXP/N Pins */ + PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */ +}; + +enum { + PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */ + PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */ + PHY_CT_SPS_LSB = 1<<13, /* Bit 13: Speed select, lower bit */ + PHY_CT_ANE = 1<<12, /* Bit 12: Auto-Negotiation Enabled */ + PHY_CT_PDOWN = 1<<11, /* Bit 11: Power Down Mode */ + PHY_CT_ISOL = 1<<10, /* Bit 10: Isolate Mode */ + PHY_CT_RE_CFG = 1<<9, /* Bit 9: (sc) Restart Auto-Negotiation */ + PHY_CT_DUP_MD = 1<<8, /* Bit 8: Duplex Mode */ + PHY_CT_COL_TST = 1<<7, /* Bit 7: Collision Test enabled */ + PHY_CT_SPS_MSB = 1<<6, /* Bit 6: Speed select, upper bit */ +}; + +enum { + PHY_CT_SP1000 = PHY_CT_SPS_MSB, /* enable speed of 1000 Mbps */ + PHY_CT_SP100 = PHY_CT_SPS_LSB, /* enable speed of 100 Mbps */ + PHY_CT_SP10 = 0, /* enable speed of 10 Mbps */ +}; + +enum { + PHY_ST_EXT_ST = 1<<8, /* Bit 8: Extended Status Present */ + + PHY_ST_PRE_SUP = 1<<6, /* Bit 6: Preamble Suppression */ + PHY_ST_AN_OVER = 1<<5, /* Bit 5: Auto-Negotiation Over */ + PHY_ST_REM_FLT = 1<<4, /* Bit 4: Remote Fault Condition Occurred */ + PHY_ST_AN_CAP = 1<<3, /* Bit 3: Auto-Negotiation Capability */ + PHY_ST_LSYNC = 1<<2, /* Bit 2: Link Synchronized */ + PHY_ST_JAB_DET = 1<<1, /* Bit 1: Jabber Detected */ + PHY_ST_EXT_REG = 1<<0, /* Bit 0: Extended Register available */ +}; + +enum { + PHY_I1_OUI_MSK = 0x3f<<10, /* Bit 15..10: Organization Unique ID */ + PHY_I1_MOD_NUM = 0x3f<<4, /* Bit 9.. 4: Model Number */ + PHY_I1_REV_MSK = 0xf, /* Bit 3.. 0: Revision Number */ +}; + +/* different Broadcom PHY Ids */ +enum { + PHY_BCOM_ID1_A1 = 0x6041, + PHY_BCOM_ID1_B2 = 0x6043, + PHY_BCOM_ID1_C0 = 0x6044, + PHY_BCOM_ID1_C5 = 0x6047, +}; + +/* different Marvell PHY Ids */ +enum { + PHY_MARV_ID0_VAL= 0x0141, /* Marvell Unique Identifier */ + PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */ + PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */ + PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */ + PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */ +}; + +/* Advertisement register bits */ +enum { + PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */ + PHY_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */ + PHY_AN_RF = 1<<13, /* Bit 13: Remote Fault Bits */ + + PHY_AN_PAUSE_ASYM = 1<<11,/* Bit 11: Try for asymmetric */ + PHY_AN_PAUSE_CAP = 1<<10, /* Bit 10: Try for pause */ + PHY_AN_100BASE4 = 1<<9, /* Bit 9: Try for 100mbps 4k packets */ + PHY_AN_100FULL = 1<<8, /* Bit 8: Try for 100mbps full-duplex */ + PHY_AN_100HALF = 1<<7, /* Bit 7: Try for 100mbps half-duplex */ + PHY_AN_10FULL = 1<<6, /* Bit 6: Try for 10mbps full-duplex */ + PHY_AN_10HALF = 1<<5, /* Bit 5: Try for 10mbps half-duplex */ + PHY_AN_CSMA = 1<<0, /* Bit 0: Only selector supported */ + PHY_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/ + PHY_AN_FULL = PHY_AN_100FULL | PHY_AN_10FULL | PHY_AN_CSMA, + PHY_AN_ALL = PHY_AN_10HALF | PHY_AN_10FULL | + PHY_AN_100HALF | PHY_AN_100FULL, +}; + +/* Xmac Specific */ +enum { + PHY_X_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */ + PHY_X_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */ + PHY_X_AN_RFB = 3<<12,/* Bit 13..12: Remote Fault Bits */ + + PHY_X_AN_PAUSE = 3<<7,/* Bit 8.. 7: Pause Bits */ + PHY_X_AN_HD = 1<<6, /* Bit 6: Half Duplex */ + PHY_X_AN_FD = 1<<5, /* Bit 5: Full Duplex */ +}; + +/* Pause Bits (PHY_X_AN_PAUSE and PHY_X_RS_PAUSE) encoding */ +enum { + PHY_X_P_NO_PAUSE= 0<<7,/* Bit 8..7: no Pause Mode */ + PHY_X_P_SYM_MD = 1<<7, /* Bit 8..7: symmetric Pause Mode */ + PHY_X_P_ASYM_MD = 2<<7,/* Bit 8..7: asymmetric Pause Mode */ + PHY_X_P_BOTH_MD = 3<<7,/* Bit 8..7: both Pause Mode */ +}; + + +/***** PHY_XMAC_EXT_STAT 16 bit r/w Extended Status Register *****/ +enum { + PHY_X_EX_FD = 1<<15, /* Bit 15: Device Supports Full Duplex */ + PHY_X_EX_HD = 1<<14, /* Bit 14: Device Supports Half Duplex */ +}; + +/***** PHY_XMAC_RES_ABI 16 bit r/o PHY Resolved Ability *****/ +enum { + PHY_X_RS_PAUSE = 3<<7, /* Bit 8..7: selected Pause Mode */ + PHY_X_RS_HD = 1<<6, /* Bit 6: Half Duplex Mode selected */ + PHY_X_RS_FD = 1<<5, /* Bit 5: Full Duplex Mode selected */ + PHY_X_RS_ABLMIS = 1<<4, /* Bit 4: duplex or pause cap mismatch */ + PHY_X_RS_PAUMIS = 1<<3, /* Bit 3: pause capability mismatch */ +}; + +/* Remote Fault Bits (PHY_X_AN_RFB) encoding */ +enum { + X_RFB_OK = 0<<12,/* Bit 13..12 No errors, Link OK */ + X_RFB_LF = 1<<12,/* Bit 13..12 Link Failure */ + X_RFB_OFF = 2<<12,/* Bit 13..12 Offline */ + X_RFB_AN_ERR = 3<<12,/* Bit 13..12 Auto-Negotiation Error */ +}; + +/* Broadcom-Specific */ +/***** PHY_BCOM_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ +enum { + PHY_B_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */ + PHY_B_1000C_MSE = 1<<12, /* Bit 12: Master/Slave Enable */ + PHY_B_1000C_MSC = 1<<11, /* Bit 11: M/S Configuration */ + PHY_B_1000C_RD = 1<<10, /* Bit 10: Repeater/DTE */ + PHY_B_1000C_AFD = 1<<9, /* Bit 9: Advertise Full Duplex */ + PHY_B_1000C_AHD = 1<<8, /* Bit 8: Advertise Half Duplex */ +}; + +/***** PHY_BCOM_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ +/***** PHY_MARV_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ +enum { + PHY_B_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */ + PHY_B_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */ + PHY_B_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */ + PHY_B_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status */ + PHY_B_1000S_LP_FD = 1<<11, /* Bit 11: Link Partner can FD */ + PHY_B_1000S_LP_HD = 1<<10, /* Bit 10: Link Partner can HD */ + /* Bit 9..8: reserved */ + PHY_B_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */ +}; + +/***** PHY_BCOM_EXT_STAT 16 bit r/o Extended Status Register *****/ +enum { + PHY_B_ES_X_FD_CAP = 1<<15, /* Bit 15: 1000Base-X FD capable */ + PHY_B_ES_X_HD_CAP = 1<<14, /* Bit 14: 1000Base-X HD capable */ + PHY_B_ES_T_FD_CAP = 1<<13, /* Bit 13: 1000Base-T FD capable */ + PHY_B_ES_T_HD_CAP = 1<<12, /* Bit 12: 1000Base-T HD capable */ +}; + +/***** PHY_BCOM_P_EXT_CTRL 16 bit r/w PHY Extended Control Reg *****/ +enum { + PHY_B_PEC_MAC_PHY = 1<<15, /* Bit 15: 10BIT/GMI-Interface */ + PHY_B_PEC_DIS_CROSS = 1<<14, /* Bit 14: Disable MDI Crossover */ + PHY_B_PEC_TX_DIS = 1<<13, /* Bit 13: Tx output Disabled */ + PHY_B_PEC_INT_DIS = 1<<12, /* Bit 12: Interrupts Disabled */ + PHY_B_PEC_F_INT = 1<<11, /* Bit 11: Force Interrupt */ + PHY_B_PEC_BY_45 = 1<<10, /* Bit 10: Bypass 4B5B-Decoder */ + PHY_B_PEC_BY_SCR = 1<<9, /* Bit 9: Bypass Scrambler */ + PHY_B_PEC_BY_MLT3 = 1<<8, /* Bit 8: Bypass MLT3 Encoder */ + PHY_B_PEC_BY_RXA = 1<<7, /* Bit 7: Bypass Rx Alignm. */ + PHY_B_PEC_RES_SCR = 1<<6, /* Bit 6: Reset Scrambler */ + PHY_B_PEC_EN_LTR = 1<<5, /* Bit 5: Ena LED Traffic Mode */ + PHY_B_PEC_LED_ON = 1<<4, /* Bit 4: Force LED's on */ + PHY_B_PEC_LED_OFF = 1<<3, /* Bit 3: Force LED's off */ + PHY_B_PEC_EX_IPG = 1<<2, /* Bit 2: Extend Tx IPG Mode */ + PHY_B_PEC_3_LED = 1<<1, /* Bit 1: Three Link LED mode */ + PHY_B_PEC_HIGH_LA = 1<<0, /* Bit 0: GMII FIFO Elasticy */ +}; + +/***** PHY_BCOM_P_EXT_STAT 16 bit r/o PHY Extended Status Reg *****/ +enum { + PHY_B_PES_CROSS_STAT = 1<<13, /* Bit 13: MDI Crossover Status */ + PHY_B_PES_INT_STAT = 1<<12, /* Bit 12: Interrupt Status */ + PHY_B_PES_RRS = 1<<11, /* Bit 11: Remote Receiver Stat. */ + PHY_B_PES_LRS = 1<<10, /* Bit 10: Local Receiver Stat. */ + PHY_B_PES_LOCKED = 1<<9, /* Bit 9: Locked */ + PHY_B_PES_LS = 1<<8, /* Bit 8: Link Status */ + PHY_B_PES_RF = 1<<7, /* Bit 7: Remote Fault */ + PHY_B_PES_CE_ER = 1<<6, /* Bit 6: Carrier Ext Error */ + PHY_B_PES_BAD_SSD = 1<<5, /* Bit 5: Bad SSD */ + PHY_B_PES_BAD_ESD = 1<<4, /* Bit 4: Bad ESD */ + PHY_B_PES_RX_ER = 1<<3, /* Bit 3: Receive Error */ + PHY_B_PES_TX_ER = 1<<2, /* Bit 2: Transmit Error */ + PHY_B_PES_LOCK_ER = 1<<1, /* Bit 1: Lock Error */ + PHY_B_PES_MLT3_ER = 1<<0, /* Bit 0: MLT3 code Error */ +}; + +/* PHY_BCOM_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/ +/* PHY_BCOM_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/ +enum { + PHY_B_AN_RF = 1<<13, /* Bit 13: Remote Fault */ + + PHY_B_AN_ASP = 1<<11, /* Bit 11: Asymmetric Pause */ + PHY_B_AN_PC = 1<<10, /* Bit 10: Pause Capable */ +}; + + +/***** PHY_BCOM_FC_CTR 16 bit r/w False Carrier Counter *****/ +enum { + PHY_B_FC_CTR = 0xff, /* Bit 7..0: False Carrier Counter */ + +/***** PHY_BCOM_RNO_CTR 16 bit r/w Receive NOT_OK Counter *****/ + PHY_B_RC_LOC_MSK = 0xff00, /* Bit 15..8: Local Rx NOT_OK cnt */ + PHY_B_RC_REM_MSK = 0x00ff, /* Bit 7..0: Remote Rx NOT_OK cnt */ + +/***** PHY_BCOM_AUX_CTRL 16 bit r/w Auxiliary Control Reg *****/ + PHY_B_AC_L_SQE = 1<<15, /* Bit 15: Low Squelch */ + PHY_B_AC_LONG_PACK = 1<<14, /* Bit 14: Rx Long Packets */ + PHY_B_AC_ER_CTRL = 3<<12,/* Bit 13..12: Edgerate Control */ + /* Bit 11: reserved */ + PHY_B_AC_TX_TST = 1<<10, /* Bit 10: Tx test bit, always 1 */ + /* Bit 9.. 8: reserved */ + PHY_B_AC_DIS_PRF = 1<<7, /* Bit 7: dis part resp filter */ + /* Bit 6: reserved */ + PHY_B_AC_DIS_PM = 1<<5, /* Bit 5: dis power management */ + /* Bit 4: reserved */ + PHY_B_AC_DIAG = 1<<3, /* Bit 3: Diagnostic Mode */ +}; + +/***** PHY_BCOM_AUX_STAT 16 bit r/o Auxiliary Status Reg *****/ +enum { + PHY_B_AS_AN_C = 1<<15, /* Bit 15: AutoNeg complete */ + PHY_B_AS_AN_CA = 1<<14, /* Bit 14: AN Complete Ack */ + PHY_B_AS_ANACK_D = 1<<13, /* Bit 13: AN Ack Detect */ + PHY_B_AS_ANAB_D = 1<<12, /* Bit 12: AN Ability Detect */ + PHY_B_AS_NPW = 1<<11, /* Bit 11: AN Next Page Wait */ + PHY_B_AS_AN_RES_MSK = 7<<8,/* Bit 10..8: AN HDC */ + PHY_B_AS_PDF = 1<<7, /* Bit 7: Parallel Detect. Fault */ + PHY_B_AS_RF = 1<<6, /* Bit 6: Remote Fault */ + PHY_B_AS_ANP_R = 1<<5, /* Bit 5: AN Page Received */ + PHY_B_AS_LP_ANAB = 1<<4, /* Bit 4: LP AN Ability */ + PHY_B_AS_LP_NPAB = 1<<3, /* Bit 3: LP Next Page Ability */ + PHY_B_AS_LS = 1<<2, /* Bit 2: Link Status */ + PHY_B_AS_PRR = 1<<1, /* Bit 1: Pause Resolution-Rx */ + PHY_B_AS_PRT = 1<<0, /* Bit 0: Pause Resolution-Tx */ +}; +#define PHY_B_AS_PAUSE_MSK (PHY_B_AS_PRR | PHY_B_AS_PRT) + +/***** PHY_BCOM_INT_STAT 16 bit r/o Interrupt Status Reg *****/ +/***** PHY_BCOM_INT_MASK 16 bit r/w Interrupt Mask Reg *****/ +enum { + PHY_B_IS_PSE = 1<<14, /* Bit 14: Pair Swap Error */ + PHY_B_IS_MDXI_SC = 1<<13, /* Bit 13: MDIX Status Change */ + PHY_B_IS_HCT = 1<<12, /* Bit 12: counter above 32k */ + PHY_B_IS_LCT = 1<<11, /* Bit 11: counter above 128 */ + PHY_B_IS_AN_PR = 1<<10, /* Bit 10: Page Received */ + PHY_B_IS_NO_HDCL = 1<<9, /* Bit 9: No HCD Link */ + PHY_B_IS_NO_HDC = 1<<8, /* Bit 8: No HCD */ + PHY_B_IS_NEG_USHDC = 1<<7, /* Bit 7: Negotiated Unsup. HCD */ + PHY_B_IS_SCR_S_ER = 1<<6, /* Bit 6: Scrambler Sync Error */ + PHY_B_IS_RRS_CHANGE = 1<<5, /* Bit 5: Remote Rx Stat Change */ + PHY_B_IS_LRS_CHANGE = 1<<4, /* Bit 4: Local Rx Stat Change */ + PHY_B_IS_DUP_CHANGE = 1<<3, /* Bit 3: Duplex Mode Change */ + PHY_B_IS_LSP_CHANGE = 1<<2, /* Bit 2: Link Speed Change */ + PHY_B_IS_LST_CHANGE = 1<<1, /* Bit 1: Link Status Changed */ + PHY_B_IS_CRC_ER = 1<<0, /* Bit 0: CRC Error */ +}; +#define PHY_B_DEF_MSK \ + (~(PHY_B_IS_PSE | PHY_B_IS_AN_PR | PHY_B_IS_DUP_CHANGE | \ + PHY_B_IS_LSP_CHANGE | PHY_B_IS_LST_CHANGE)) + +/* Pause Bits (PHY_B_AN_ASP and PHY_B_AN_PC) encoding */ +enum { + PHY_B_P_NO_PAUSE = 0<<10,/* Bit 11..10: no Pause Mode */ + PHY_B_P_SYM_MD = 1<<10, /* Bit 11..10: symmetric Pause Mode */ + PHY_B_P_ASYM_MD = 2<<10,/* Bit 11..10: asymmetric Pause Mode */ + PHY_B_P_BOTH_MD = 3<<10,/* Bit 11..10: both Pause Mode */ +}; +/* + * Resolved Duplex mode and Capabilities (Aux Status Summary Reg) + */ +enum { + PHY_B_RES_1000FD = 7<<8,/* Bit 10..8: 1000Base-T Full Dup. */ + PHY_B_RES_1000HD = 6<<8,/* Bit 10..8: 1000Base-T Half Dup. */ +}; + +/** Marvell-Specific */ +enum { + PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */ + PHY_M_AN_ACK = 1<<14, /* (ro) Acknowledge Received */ + PHY_M_AN_RF = 1<<13, /* Remote Fault */ + + PHY_M_AN_ASP = 1<<11, /* Asymmetric Pause */ + PHY_M_AN_PC = 1<<10, /* MAC Pause implemented */ + PHY_M_AN_100_T4 = 1<<9, /* Not cap. 100Base-T4 (always 0) */ + PHY_M_AN_100_FD = 1<<8, /* Advertise 100Base-TX Full Duplex */ + PHY_M_AN_100_HD = 1<<7, /* Advertise 100Base-TX Half Duplex */ + PHY_M_AN_10_FD = 1<<6, /* Advertise 10Base-TX Full Duplex */ + PHY_M_AN_10_HD = 1<<5, /* Advertise 10Base-TX Half Duplex */ + PHY_M_AN_SEL_MSK =0x1f<<4, /* Bit 4.. 0: Selector Field Mask */ +}; + +/* special defines for FIBER (88E1011S only) */ +enum { + PHY_M_AN_ASP_X = 1<<8, /* Asymmetric Pause */ + PHY_M_AN_PC_X = 1<<7, /* MAC Pause implemented */ + PHY_M_AN_1000X_AHD = 1<<6, /* Advertise 10000Base-X Half Duplex */ + PHY_M_AN_1000X_AFD = 1<<5, /* Advertise 10000Base-X Full Duplex */ +}; + +/* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */ +enum { + PHY_M_P_NO_PAUSE_X = 0<<7,/* Bit 8.. 7: no Pause Mode */ + PHY_M_P_SYM_MD_X = 1<<7, /* Bit 8.. 7: symmetric Pause Mode */ + PHY_M_P_ASYM_MD_X = 2<<7,/* Bit 8.. 7: asymmetric Pause Mode */ + PHY_M_P_BOTH_MD_X = 3<<7,/* Bit 8.. 7: both Pause Mode */ +}; + +/***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ +enum { + PHY_M_1000C_TEST= 7<<13,/* Bit 15..13: Test Modes */ + PHY_M_1000C_MSE = 1<<12, /* Manual Master/Slave Enable */ + PHY_M_1000C_MSC = 1<<11, /* M/S Configuration (1=Master) */ + PHY_M_1000C_MPD = 1<<10, /* Multi-Port Device */ + PHY_M_1000C_AFD = 1<<9, /* Advertise Full Duplex */ + PHY_M_1000C_AHD = 1<<8, /* Advertise Half Duplex */ +}; + +/***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/ +enum { + PHY_M_PC_TX_FFD_MSK = 3<<14,/* Bit 15..14: Tx FIFO Depth Mask */ + PHY_M_PC_RX_FFD_MSK = 3<<12,/* Bit 13..12: Rx FIFO Depth Mask */ + PHY_M_PC_ASS_CRS_TX = 1<<11, /* Assert CRS on Transmit */ + PHY_M_PC_FL_GOOD = 1<<10, /* Force Link Good */ + PHY_M_PC_EN_DET_MSK = 3<<8,/* Bit 9.. 8: Energy Detect Mask */ + PHY_M_PC_ENA_EXT_D = 1<<7, /* Enable Ext. Distance (10BT) */ + PHY_M_PC_MDIX_MSK = 3<<5,/* Bit 6.. 5: MDI/MDIX Config. Mask */ + PHY_M_PC_DIS_125CLK = 1<<4, /* Disable 125 CLK */ + PHY_M_PC_MAC_POW_UP = 1<<3, /* MAC Power up */ + PHY_M_PC_SQE_T_ENA = 1<<2, /* SQE Test Enabled */ + PHY_M_PC_POL_R_DIS = 1<<1, /* Polarity Reversal Disabled */ + PHY_M_PC_DIS_JABBER = 1<<0, /* Disable Jabber */ +}; + +enum { + PHY_M_PC_EN_DET = 2<<8, /* Energy Detect (Mode 1) */ + PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */ +}; + +enum { + PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */ + PHY_M_PC_MAN_MDIX = 1, /* 01 = Manual MDIX configuration */ + PHY_M_PC_ENA_AUTO = 3, /* 11 = Enable Automatic Crossover */ +}; + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ +enum { + PHY_M_PC_ENA_DTE_DT = 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */ + PHY_M_PC_ENA_ENE_DT = 1<<14, /* Enable Energy Detect (sense & pulse) */ + PHY_M_PC_DIS_NLP_CK = 1<<13, /* Disable Normal Link Puls (NLP) Check */ + PHY_M_PC_ENA_LIP_NP = 1<<12, /* Enable Link Partner Next Page Reg. */ + PHY_M_PC_DIS_NLP_GN = 1<<11, /* Disable Normal Link Puls Generation */ + + PHY_M_PC_DIS_SCRAMB = 1<<9, /* Disable Scrambler */ + PHY_M_PC_DIS_FEFI = 1<<8, /* Disable Far End Fault Indic. (FEFI) */ + + PHY_M_PC_SH_TP_SEL = 1<<6, /* Shielded Twisted Pair Select */ + PHY_M_PC_RX_FD_MSK = 3<<2,/* Bit 3.. 2: Rx FIFO Depth Mask */ +}; + +/***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/ +enum { + PHY_M_PS_SPEED_MSK = 3<<14, /* Bit 15..14: Speed Mask */ + PHY_M_PS_SPEED_1000 = 1<<15, /* 10 = 1000 Mbps */ + PHY_M_PS_SPEED_100 = 1<<14, /* 01 = 100 Mbps */ + PHY_M_PS_SPEED_10 = 0, /* 00 = 10 Mbps */ + PHY_M_PS_FULL_DUP = 1<<13, /* Full Duplex */ + PHY_M_PS_PAGE_REC = 1<<12, /* Page Received */ + PHY_M_PS_SPDUP_RES = 1<<11, /* Speed & Duplex Resolved */ + PHY_M_PS_LINK_UP = 1<<10, /* Link Up */ + PHY_M_PS_CABLE_MSK = 7<<7, /* Bit 9.. 7: Cable Length Mask */ + PHY_M_PS_MDI_X_STAT = 1<<6, /* MDI Crossover Stat (1=MDIX) */ + PHY_M_PS_DOWNS_STAT = 1<<5, /* Downshift Status (1=downsh.) */ + PHY_M_PS_ENDET_STAT = 1<<4, /* Energy Detect Status (1=act) */ + PHY_M_PS_TX_P_EN = 1<<3, /* Tx Pause Enabled */ + PHY_M_PS_RX_P_EN = 1<<2, /* Rx Pause Enabled */ + PHY_M_PS_POL_REV = 1<<1, /* Polarity Reversed */ + PHY_M_PS_JABBER = 1<<0, /* Jabber */ +}; + +#define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN) + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ +enum { + PHY_M_PS_DTE_DETECT = 1<<15, /* Data Terminal Equipment (DTE) Detected */ + PHY_M_PS_RES_SPEED = 1<<14, /* Resolved Speed (1=100 Mbps, 0=10 Mbps */ +}; + +enum { + PHY_M_IS_AN_ERROR = 1<<15, /* Auto-Negotiation Error */ + PHY_M_IS_LSP_CHANGE = 1<<14, /* Link Speed Changed */ + PHY_M_IS_DUP_CHANGE = 1<<13, /* Duplex Mode Changed */ + PHY_M_IS_AN_PR = 1<<12, /* Page Received */ + PHY_M_IS_AN_COMPL = 1<<11, /* Auto-Negotiation Completed */ + PHY_M_IS_LST_CHANGE = 1<<10, /* Link Status Changed */ + PHY_M_IS_SYMB_ERROR = 1<<9, /* Symbol Error */ + PHY_M_IS_FALSE_CARR = 1<<8, /* False Carrier */ + PHY_M_IS_FIFO_ERROR = 1<<7, /* FIFO Overflow/Underrun Error */ + PHY_M_IS_MDI_CHANGE = 1<<6, /* MDI Crossover Changed */ + PHY_M_IS_DOWNSH_DET = 1<<5, /* Downshift Detected */ + PHY_M_IS_END_CHANGE = 1<<4, /* Energy Detect Changed */ + + PHY_M_IS_DTE_CHANGE = 1<<2, /* DTE Power Det. Status Changed */ + PHY_M_IS_POL_CHANGE = 1<<1, /* Polarity Changed */ + PHY_M_IS_JABBER = 1<<0, /* Jabber */ + + PHY_M_IS_DEF_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_LSP_CHANGE | + PHY_M_IS_LST_CHANGE | PHY_M_IS_FIFO_ERROR, + + PHY_M_IS_AN_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_AN_COMPL, +}; + +/***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/ +enum { + PHY_M_EC_ENA_BC_EXT = 1<<15, /* Enable Block Carr. Ext. (88E1111 only) */ + PHY_M_EC_ENA_LIN_LB = 1<<14, /* Enable Line Loopback (88E1111 only) */ + + PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */ + PHY_M_EC_M_DSC_MSK = 3<<10, /* Bit 11..10: Master Downshift Counter */ + /* (88E1011 only) */ + PHY_M_EC_S_DSC_MSK = 3<<8, /* Bit 9.. 8: Slave Downshift Counter */ + /* (88E1011 only) */ + PHY_M_EC_M_DSC_MSK2 = 7<<9, /* Bit 11.. 9: Master Downshift Counter */ + /* (88E1111 only) */ + PHY_M_EC_DOWN_S_ENA = 1<<8, /* Downshift Enable (88E1111 only) */ + /* !!! Errata in spec. (1 = disable) */ + PHY_M_EC_RX_TIM_CT = 1<<7, /* RGMII Rx Timing Control*/ + PHY_M_EC_MAC_S_MSK = 7<<4, /* Bit 6.. 4: Def. MAC interface speed */ + PHY_M_EC_FIB_AN_ENA = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */ + PHY_M_EC_DTE_D_ENA = 1<<2, /* DTE Detect Enable (88E1111 only) */ + PHY_M_EC_TX_TIM_CT = 1<<1, /* RGMII Tx Timing Control */ + PHY_M_EC_TRANS_DIS = 1<<0, /* Transmitter Disable (88E1111 only) */}; + +#define PHY_M_EC_M_DSC(x) ((u16)(x)<<10) /* 00=1x; 01=2x; 10=3x; 11=4x */ +#define PHY_M_EC_S_DSC(x) ((u16)(x)<<8) /* 00=dis; 01=1x; 10=2x; 11=3x */ +#define PHY_M_EC_MAC_S(x) ((u16)(x)<<4) /* 01X=0; 110=2.5; 111=25 (MHz) */ + +#define PHY_M_EC_M_DSC_2(x) ((u16)(x)<<9) /* 000=1x; 001=2x; 010=3x; 011=4x */ + /* 100=5x; 101=6x; 110=7x; 111=8x */ +enum { + MAC_TX_CLK_0_MHZ = 2, + MAC_TX_CLK_2_5_MHZ = 6, + MAC_TX_CLK_25_MHZ = 7, +}; + +/***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/ +enum { + PHY_M_LEDC_DIS_LED = 1<<15, /* Disable LED */ + PHY_M_LEDC_PULS_MSK = 7<<12,/* Bit 14..12: Pulse Stretch Mask */ + PHY_M_LEDC_F_INT = 1<<11, /* Force Interrupt */ + PHY_M_LEDC_BL_R_MSK = 7<<8,/* Bit 10.. 8: Blink Rate Mask */ + PHY_M_LEDC_DP_C_LSB = 1<<7, /* Duplex Control (LSB, 88E1111 only) */ + PHY_M_LEDC_TX_C_LSB = 1<<6, /* Tx Control (LSB, 88E1111 only) */ + PHY_M_LEDC_LK_C_MSK = 7<<3,/* Bit 5.. 3: Link Control Mask */ + /* (88E1111 only) */ +}; +#define PHY_M_LED_PULS_DUR(x) (((u16)(x)<<12) & PHY_M_LEDC_PULS_MSK) +#define PHY_M_LED_BLINK_RT(x) (((u16)(x)<<8) & PHY_M_LEDC_BL_R_MSK) + +enum { + PHY_M_LEDC_LINK_MSK = 3<<3, /* Bit 4.. 3: Link Control Mask */ + /* (88E1011 only) */ + PHY_M_LEDC_DP_CTRL = 1<<2, /* Duplex Control */ + PHY_M_LEDC_DP_C_MSB = 1<<2, /* Duplex Control (MSB, 88E1111 only) */ + PHY_M_LEDC_RX_CTRL = 1<<1, /* Rx Activity / Link */ + PHY_M_LEDC_TX_CTRL = 1<<0, /* Tx Activity / Link */ + PHY_M_LEDC_TX_C_MSB = 1<<0, /* Tx Control (MSB, 88E1111 only) */ +}; + +enum { + PULS_NO_STR = 0, /* no pulse stretching */ + PULS_21MS = 1, /* 21 ms to 42 ms */ + PULS_42MS = 2, /* 42 ms to 84 ms */ + PULS_84MS = 3, /* 84 ms to 170 ms */ + PULS_170MS = 4, /* 170 ms to 340 ms */ + PULS_340MS = 5, /* 340 ms to 670 ms */ + PULS_670MS = 6, /* 670 ms to 1.3 s */ + PULS_1300MS = 7, /* 1.3 s to 2.7 s */ +}; + + +enum { + BLINK_42MS = 0, /* 42 ms */ + BLINK_84MS = 1, /* 84 ms */ + BLINK_170MS = 2, /* 170 ms */ + BLINK_340MS = 3, /* 340 ms */ + BLINK_670MS = 4, /* 670 ms */ +}; + +/***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/ +#define PHY_M_LED_MO_SGMII(x) ((x)<<14) /* Bit 15..14: SGMII AN Timer */ + /* Bit 13..12: reserved */ +#define PHY_M_LED_MO_DUP(x) ((x)<<10) /* Bit 11..10: Duplex */ +#define PHY_M_LED_MO_10(x) ((x)<<8) /* Bit 9.. 8: Link 10 */ +#define PHY_M_LED_MO_100(x) ((x)<<6) /* Bit 7.. 6: Link 100 */ +#define PHY_M_LED_MO_1000(x) ((x)<<4) /* Bit 5.. 4: Link 1000 */ +#define PHY_M_LED_MO_RX(x) ((x)<<2) /* Bit 3.. 2: Rx */ +#define PHY_M_LED_MO_TX(x) ((x)<<0) /* Bit 1.. 0: Tx */ + +enum { + MO_LED_NORM = 0, + MO_LED_BLINK = 1, + MO_LED_OFF = 2, + MO_LED_ON = 3, +}; + +/***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/ +enum { + PHY_M_EC2_FI_IMPED = 1<<6, /* Fiber Input Impedance */ + PHY_M_EC2_FO_IMPED = 1<<5, /* Fiber Output Impedance */ + PHY_M_EC2_FO_M_CLK = 1<<4, /* Fiber Mode Clock Enable */ + PHY_M_EC2_FO_BOOST = 1<<3, /* Fiber Output Boost */ + PHY_M_EC2_FO_AM_MSK = 7, /* Bit 2.. 0: Fiber Output Amplitude */ +}; + +/***** PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/ +enum { + PHY_M_FC_AUTO_SEL = 1<<15, /* Fiber/Copper Auto Sel. Dis. */ + PHY_M_FC_AN_REG_ACC = 1<<14, /* Fiber/Copper AN Reg. Access */ + PHY_M_FC_RESOLUTION = 1<<13, /* Fiber/Copper Resolution */ + PHY_M_SER_IF_AN_BP = 1<<12, /* Ser. IF AN Bypass Enable */ + PHY_M_SER_IF_BP_ST = 1<<11, /* Ser. IF AN Bypass Status */ + PHY_M_IRQ_POLARITY = 1<<10, /* IRQ polarity */ + PHY_M_DIS_AUT_MED = 1<<9, /* Disable Aut. Medium Reg. Selection */ + /* (88E1111 only) */ + /* Bit 9.. 4: reserved (88E1011 only) */ + PHY_M_UNDOC1 = 1<<7, /* undocumented bit !! */ + PHY_M_DTE_POW_STAT = 1<<4, /* DTE Power Status (88E1111 only) */ + PHY_M_MODE_MASK = 0xf, /* Bit 3.. 0: copy of HWCFG MODE[3:0] */ +}; + +/***** PHY_MARV_CABLE_DIAG 16 bit r/o Cable Diagnostic Reg *****/ +enum { + PHY_M_CABD_ENA_TEST = 1<<15, /* Enable Test (Page 0) */ + PHY_M_CABD_DIS_WAIT = 1<<15, /* Disable Waiting Period (Page 1) */ + /* (88E1111 only) */ + PHY_M_CABD_STAT_MSK = 3<<13, /* Bit 14..13: Status Mask */ + PHY_M_CABD_AMPL_MSK = 0x1f<<8, /* Bit 12.. 8: Amplitude Mask */ + /* (88E1111 only) */ + PHY_M_CABD_DIST_MSK = 0xff, /* Bit 7.. 0: Distance Mask */ +}; + +/* values for Cable Diagnostic Status (11=fail; 00=OK; 10=open; 01=short) */ +enum { + CABD_STAT_NORMAL= 0, + CABD_STAT_SHORT = 1, + CABD_STAT_OPEN = 2, + CABD_STAT_FAIL = 3, +}; + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ +/***** PHY_MARV_FE_LED_PAR 16 bit r/w LED Parallel Select Reg. *****/ + /* Bit 15..12: reserved (used internally) */ +enum { + PHY_M_FELP_LED2_MSK = 0xf<<8, /* Bit 11.. 8: LED2 Mask (LINK) */ + PHY_M_FELP_LED1_MSK = 0xf<<4, /* Bit 7.. 4: LED1 Mask (ACT) */ + PHY_M_FELP_LED0_MSK = 0xf, /* Bit 3.. 0: LED0 Mask (SPEED) */ +}; + +#define PHY_M_FELP_LED2_CTRL(x) (((x)<<8) & PHY_M_FELP_LED2_MSK) +#define PHY_M_FELP_LED1_CTRL(x) (((x)<<4) & PHY_M_FELP_LED1_MSK) +#define PHY_M_FELP_LED0_CTRL(x) (((x)<<0) & PHY_M_FELP_LED0_MSK) + +enum { + LED_PAR_CTRL_COLX = 0x00, + LED_PAR_CTRL_ERROR = 0x01, + LED_PAR_CTRL_DUPLEX = 0x02, + LED_PAR_CTRL_DP_COL = 0x03, + LED_PAR_CTRL_SPEED = 0x04, + LED_PAR_CTRL_LINK = 0x05, + LED_PAR_CTRL_TX = 0x06, + LED_PAR_CTRL_RX = 0x07, + LED_PAR_CTRL_ACT = 0x08, + LED_PAR_CTRL_LNK_RX = 0x09, + LED_PAR_CTRL_LNK_AC = 0x0a, + LED_PAR_CTRL_ACT_BL = 0x0b, + LED_PAR_CTRL_TX_BL = 0x0c, + LED_PAR_CTRL_RX_BL = 0x0d, + LED_PAR_CTRL_COL_BL = 0x0e, + LED_PAR_CTRL_INACT = 0x0f +}; + +/*****,PHY_MARV_FE_SPEC_2 16 bit r/w Specific Control Reg. 2 *****/ +enum { + PHY_M_FESC_DIS_WAIT = 1<<2, /* Disable TDR Waiting Period */ + PHY_M_FESC_ENA_MCLK = 1<<1, /* Enable MAC Rx Clock in sleep mode */ + PHY_M_FESC_SEL_CL_A = 1<<0, /* Select Class A driver (100B-TX) */ +}; + + +/***** PHY_MARV_PHY_CTRL (page 3) 16 bit r/w LED Control Reg. *****/ +enum { + PHY_M_LEDC_LOS_MSK = 0xf<<12, /* Bit 15..12: LOS LED Ctrl. Mask */ + PHY_M_LEDC_INIT_MSK = 0xf<<8, /* Bit 11.. 8: INIT LED Ctrl. Mask */ + PHY_M_LEDC_STA1_MSK = 0xf<<4, /* Bit 7.. 4: STAT1 LED Ctrl. Mask */ + PHY_M_LEDC_STA0_MSK = 0xf, /* Bit 3.. 0: STAT0 LED Ctrl. Mask */ +}; + +#define PHY_M_LEDC_LOS_CTRL(x) (((x)<<12) & PHY_M_LEDC_LOS_MSK) +#define PHY_M_LEDC_INIT_CTRL(x) (((x)<<8) & PHY_M_LEDC_INIT_MSK) +#define PHY_M_LEDC_STA1_CTRL(x) (((x)<<4) & PHY_M_LEDC_STA1_MSK) +#define PHY_M_LEDC_STA0_CTRL(x) (((x)<<0) & PHY_M_LEDC_STA0_MSK) + +/* GMAC registers */ +/* Port Registers */ +enum { + GM_GP_STAT = 0x0000, /* 16 bit r/o General Purpose Status */ + GM_GP_CTRL = 0x0004, /* 16 bit r/w General Purpose Control */ + GM_TX_CTRL = 0x0008, /* 16 bit r/w Transmit Control Reg. */ + GM_RX_CTRL = 0x000c, /* 16 bit r/w Receive Control Reg. */ + GM_TX_FLOW_CTRL = 0x0010, /* 16 bit r/w Transmit Flow-Control */ + GM_TX_PARAM = 0x0014, /* 16 bit r/w Transmit Parameter Reg. */ + GM_SERIAL_MODE = 0x0018, /* 16 bit r/w Serial Mode Register */ +/* Source Address Registers */ + GM_SRC_ADDR_1L = 0x001c, /* 16 bit r/w Source Address 1 (low) */ + GM_SRC_ADDR_1M = 0x0020, /* 16 bit r/w Source Address 1 (middle) */ + GM_SRC_ADDR_1H = 0x0024, /* 16 bit r/w Source Address 1 (high) */ + GM_SRC_ADDR_2L = 0x0028, /* 16 bit r/w Source Address 2 (low) */ + GM_SRC_ADDR_2M = 0x002c, /* 16 bit r/w Source Address 2 (middle) */ + GM_SRC_ADDR_2H = 0x0030, /* 16 bit r/w Source Address 2 (high) */ + +/* Multicast Address Hash Registers */ + GM_MC_ADDR_H1 = 0x0034, /* 16 bit r/w Multicast Address Hash 1 */ + GM_MC_ADDR_H2 = 0x0038, /* 16 bit r/w Multicast Address Hash 2 */ + GM_MC_ADDR_H3 = 0x003c, /* 16 bit r/w Multicast Address Hash 3 */ + GM_MC_ADDR_H4 = 0x0040, /* 16 bit r/w Multicast Address Hash 4 */ + +/* Interrupt Source Registers */ + GM_TX_IRQ_SRC = 0x0044, /* 16 bit r/o Tx Overflow IRQ Source */ + GM_RX_IRQ_SRC = 0x0048, /* 16 bit r/o Rx Overflow IRQ Source */ + GM_TR_IRQ_SRC = 0x004c, /* 16 bit r/o Tx/Rx Over. IRQ Source */ + +/* Interrupt Mask Registers */ + GM_TX_IRQ_MSK = 0x0050, /* 16 bit r/w Tx Overflow IRQ Mask */ + GM_RX_IRQ_MSK = 0x0054, /* 16 bit r/w Rx Overflow IRQ Mask */ + GM_TR_IRQ_MSK = 0x0058, /* 16 bit r/w Tx/Rx Over. IRQ Mask */ + +/* Serial Management Interface (SMI) Registers */ + GM_SMI_CTRL = 0x0080, /* 16 bit r/w SMI Control Register */ + GM_SMI_DATA = 0x0084, /* 16 bit r/w SMI Data Register */ + GM_PHY_ADDR = 0x0088, /* 16 bit r/w GPHY Address Register */ +}; + +/* MIB Counters */ +#define GM_MIB_CNT_BASE 0x0100 /* Base Address of MIB Counters */ +#define GM_MIB_CNT_SIZE 44 /* Number of MIB Counters */ + +/* + * MIB Counters base address definitions (low word) - + * use offset 4 for access to high word (32 bit r/o) + */ +enum { + GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */ + GM_RXF_BC_OK = GM_MIB_CNT_BASE + 8, /* Broadcast Frames Received OK */ + GM_RXF_MPAUSE = GM_MIB_CNT_BASE + 16, /* Pause MAC Ctrl Frames Received */ + GM_RXF_MC_OK = GM_MIB_CNT_BASE + 24, /* Multicast Frames Received OK */ + GM_RXF_FCS_ERR = GM_MIB_CNT_BASE + 32, /* Rx Frame Check Seq. Error */ + /* GM_MIB_CNT_BASE + 40: reserved */ + GM_RXO_OK_LO = GM_MIB_CNT_BASE + 48, /* Octets Received OK Low */ + GM_RXO_OK_HI = GM_MIB_CNT_BASE + 56, /* Octets Received OK High */ + GM_RXO_ERR_LO = GM_MIB_CNT_BASE + 64, /* Octets Received Invalid Low */ + GM_RXO_ERR_HI = GM_MIB_CNT_BASE + 72, /* Octets Received Invalid High */ + GM_RXF_SHT = GM_MIB_CNT_BASE + 80, /* Frames <64 Byte Received OK */ + GM_RXE_FRAG = GM_MIB_CNT_BASE + 88, /* Frames <64 Byte Received with FCS Err */ + GM_RXF_64B = GM_MIB_CNT_BASE + 96, /* 64 Byte Rx Frame */ + GM_RXF_127B = GM_MIB_CNT_BASE + 104, /* 65-127 Byte Rx Frame */ + GM_RXF_255B = GM_MIB_CNT_BASE + 112, /* 128-255 Byte Rx Frame */ + GM_RXF_511B = GM_MIB_CNT_BASE + 120, /* 256-511 Byte Rx Frame */ + GM_RXF_1023B = GM_MIB_CNT_BASE + 128, /* 512-1023 Byte Rx Frame */ + GM_RXF_1518B = GM_MIB_CNT_BASE + 136, /* 1024-1518 Byte Rx Frame */ + GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144, /* 1519-MaxSize Byte Rx Frame */ + GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152, /* Rx Frame too Long Error */ + GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160, /* Rx Jabber Packet Frame */ + /* GM_MIB_CNT_BASE + 168: reserved */ + GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176, /* Rx FIFO overflow Event */ + /* GM_MIB_CNT_BASE + 184: reserved */ + GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192, /* Unicast Frames Xmitted OK */ + GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200, /* Broadcast Frames Xmitted OK */ + GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208, /* Pause MAC Ctrl Frames Xmitted */ + GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216, /* Multicast Frames Xmitted OK */ + GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224, /* Octets Transmitted OK Low */ + GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232, /* Octets Transmitted OK High */ + GM_TXF_64B = GM_MIB_CNT_BASE + 240, /* 64 Byte Tx Frame */ + GM_TXF_127B = GM_MIB_CNT_BASE + 248, /* 65-127 Byte Tx Frame */ + GM_TXF_255B = GM_MIB_CNT_BASE + 256, /* 128-255 Byte Tx Frame */ + GM_TXF_511B = GM_MIB_CNT_BASE + 264, /* 256-511 Byte Tx Frame */ + GM_TXF_1023B = GM_MIB_CNT_BASE + 272, /* 512-1023 Byte Tx Frame */ + GM_TXF_1518B = GM_MIB_CNT_BASE + 280, /* 1024-1518 Byte Tx Frame */ + GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288, /* 1519-MaxSize Byte Tx Frame */ + + GM_TXF_COL = GM_MIB_CNT_BASE + 304, /* Tx Collision */ + GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312, /* Tx Late Collision */ + GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320, /* Tx aborted due to Exces. Col. */ + GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328, /* Tx Multiple Collision */ + GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336, /* Tx Single Collision */ + GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344, /* Tx FIFO Underrun Event */ +}; + +/* GMAC Bit Definitions */ +/* GM_GP_STAT 16 bit r/o General Purpose Status Register */ +enum { + GM_GPSR_SPEED = 1<<15, /* Bit 15: Port Speed (1 = 100 Mbps) */ + GM_GPSR_DUPLEX = 1<<14, /* Bit 14: Duplex Mode (1 = Full) */ + GM_GPSR_FC_TX_DIS = 1<<13, /* Bit 13: Tx Flow-Control Mode Disabled */ + GM_GPSR_LINK_UP = 1<<12, /* Bit 12: Link Up Status */ + GM_GPSR_PAUSE = 1<<11, /* Bit 11: Pause State */ + GM_GPSR_TX_ACTIVE = 1<<10, /* Bit 10: Tx in Progress */ + GM_GPSR_EXC_COL = 1<<9, /* Bit 9: Excessive Collisions Occurred */ + GM_GPSR_LAT_COL = 1<<8, /* Bit 8: Late Collisions Occurred */ + + GM_GPSR_PHY_ST_CH = 1<<5, /* Bit 5: PHY Status Change */ + GM_GPSR_GIG_SPEED = 1<<4, /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */ + GM_GPSR_PART_MODE = 1<<3, /* Bit 3: Partition mode */ + GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */ + GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */ +}; + +/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */ +enum { + GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */ + GM_GPCR_FC_TX_DIS = 1<<13, /* Bit 13: Disable Tx Flow-Control Mode */ + GM_GPCR_TX_ENA = 1<<12, /* Bit 12: Enable Transmit */ + GM_GPCR_RX_ENA = 1<<11, /* Bit 11: Enable Receive */ + GM_GPCR_BURST_ENA = 1<<10, /* Bit 10: Enable Burst Mode */ + GM_GPCR_LOOP_ENA = 1<<9, /* Bit 9: Enable MAC Loopback Mode */ + GM_GPCR_PART_ENA = 1<<8, /* Bit 8: Enable Partition Mode */ + GM_GPCR_GIGS_ENA = 1<<7, /* Bit 7: Gigabit Speed (1000 Mbps) */ + GM_GPCR_FL_PASS = 1<<6, /* Bit 6: Force Link Pass */ + GM_GPCR_DUP_FULL = 1<<5, /* Bit 5: Full Duplex Mode */ + GM_GPCR_FC_RX_DIS = 1<<4, /* Bit 4: Disable Rx Flow-Control Mode */ + GM_GPCR_SPEED_100 = 1<<3, /* Bit 3: Port Speed 100 Mbps */ + GM_GPCR_AU_DUP_DIS = 1<<2, /* Bit 2: Disable Auto-Update Duplex */ + GM_GPCR_AU_FCT_DIS = 1<<1, /* Bit 1: Disable Auto-Update Flow-C. */ + GM_GPCR_AU_SPD_DIS = 1<<0, /* Bit 0: Disable Auto-Update Speed */ +}; + +#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100) +#define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS|GM_GPCR_AU_SPD_DIS) + +/* GM_TX_CTRL 16 bit r/w Transmit Control Register */ +enum { + GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */ + GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */ + GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */ + GM_TXCR_COL_THR_MSK = 7<<10, /* Bit 12..10: Collision Threshold */ +}; + +#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK) +#define TX_COL_DEF 0x04 /* late collision after 64 byte */ + +/* GM_RX_CTRL 16 bit r/w Receive Control Register */ +enum { + GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */ + GM_RXCR_MCF_ENA = 1<<14, /* Bit 14: Enable Multicast filtering */ + GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */ + GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */ +}; + +/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */ +enum { + GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */ + GM_TXPA_JAMIPG_MSK = 0x1f<<9, /* Bit 13..9: Jam IPG */ + GM_TXPA_JAMDAT_MSK = 0x1f<<4, /* Bit 8..4: IPG Jam to Data */ + + TX_JAM_LEN_DEF = 0x03, + TX_JAM_IPG_DEF = 0x0b, + TX_IPG_JAM_DEF = 0x1c, +}; + +#define TX_JAM_LEN_VAL(x) (((x)<<14) & GM_TXPA_JAMLEN_MSK) +#define TX_JAM_IPG_VAL(x) (((x)<<9) & GM_TXPA_JAMIPG_MSK) +#define TX_IPG_JAM_DATA(x) (((x)<<4) & GM_TXPA_JAMDAT_MSK) + + +/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */ +enum { + GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */ + GM_SMOD_LIMIT_4 = 1<<10, /* Bit 10: 4 consecutive Tx trials */ + GM_SMOD_VLAN_ENA = 1<<9, /* Bit 9: Enable VLAN (Max. Frame Len) */ + GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */ + GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */ +}; + +#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK) +#define DATA_BLIND_DEF 0x04 + +#define IPG_DATA_VAL(x) (x & GM_SMOD_IPG_MSK) +#define IPG_DATA_DEF 0x1e + +/* GM_SMI_CTRL 16 bit r/w SMI Control Register */ +enum { + GM_SMI_CT_PHY_A_MSK = 0x1f<<11, /* Bit 15..11: PHY Device Address */ + GM_SMI_CT_REG_A_MSK = 0x1f<<6, /* Bit 10.. 6: PHY Register Address */ + GM_SMI_CT_OP_RD = 1<<5, /* Bit 5: OpCode Read (0=Write)*/ + GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */ + GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */ +}; + +#define GM_SMI_CT_PHY_AD(x) (((x)<<11) & GM_SMI_CT_PHY_A_MSK) +#define GM_SMI_CT_REG_AD(x) (((x)<<6) & GM_SMI_CT_REG_A_MSK) + +/* GM_PHY_ADDR 16 bit r/w GPHY Address Register */ +enum { + GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */ + GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */ +}; + +/* Receive Frame Status Encoding */ +enum { + GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */ + GMR_FS_LEN_SHIFT = 16, + GMR_FS_VLAN = 1<<13, /* Bit 13: VLAN Packet */ + GMR_FS_JABBER = 1<<12, /* Bit 12: Jabber Packet */ + GMR_FS_UN_SIZE = 1<<11, /* Bit 11: Undersize Packet */ + GMR_FS_MC = 1<<10, /* Bit 10: Multicast Packet */ + GMR_FS_BC = 1<<9, /* Bit 9: Broadcast Packet */ + GMR_FS_RX_OK = 1<<8, /* Bit 8: Receive OK (Good Packet) */ + GMR_FS_GOOD_FC = 1<<7, /* Bit 7: Good Flow-Control Packet */ + GMR_FS_BAD_FC = 1<<6, /* Bit 6: Bad Flow-Control Packet */ + GMR_FS_MII_ERR = 1<<5, /* Bit 5: MII Error */ + GMR_FS_LONG_ERR = 1<<4, /* Bit 4: Too Long Packet */ + GMR_FS_FRAGMENT = 1<<3, /* Bit 3: Fragment */ + + GMR_FS_CRC_ERR = 1<<1, /* Bit 1: CRC Error */ + GMR_FS_RX_FF_OV = 1<<0, /* Bit 0: Rx FIFO Overflow */ + +/* + * GMR_FS_ANY_ERR (analogous to XMR_FS_ANY_ERR) + */ + GMR_FS_ANY_ERR = GMR_FS_CRC_ERR | GMR_FS_LONG_ERR | + GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC | + GMR_FS_JABBER, +/* Rx GMAC FIFO Flush Mask (default) */ + RX_FF_FL_DEF_MSK = GMR_FS_CRC_ERR | GMR_FS_RX_FF_OV |GMR_FS_MII_ERR | + GMR_FS_BAD_FC | GMR_FS_UN_SIZE | GMR_FS_JABBER, +}; + +/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */ +enum { + GMF_WP_TST_ON = 1<<14, /* Write Pointer Test On */ + GMF_WP_TST_OFF = 1<<13, /* Write Pointer Test Off */ + GMF_WP_STEP = 1<<12, /* Write Pointer Step/Increment */ + + GMF_RP_TST_ON = 1<<10, /* Read Pointer Test On */ + GMF_RP_TST_OFF = 1<<9, /* Read Pointer Test Off */ + GMF_RP_STEP = 1<<8, /* Read Pointer Step/Increment */ + GMF_RX_F_FL_ON = 1<<7, /* Rx FIFO Flush Mode On */ + GMF_RX_F_FL_OFF = 1<<6, /* Rx FIFO Flush Mode Off */ + GMF_CLI_RX_FO = 1<<5, /* Clear IRQ Rx FIFO Overrun */ + GMF_CLI_RX_FC = 1<<4, /* Clear IRQ Rx Frame Complete */ + GMF_OPER_ON = 1<<3, /* Operational Mode On */ + GMF_OPER_OFF = 1<<2, /* Operational Mode Off */ + GMF_RST_CLR = 1<<1, /* Clear GMAC FIFO Reset */ + GMF_RST_SET = 1<<0, /* Set GMAC FIFO Reset */ + + RX_GMF_FL_THR_DEF = 0xa, /* flush threshold (default) */ +}; + + +/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */ +enum { + GMF_WSP_TST_ON = 1<<18, /* Write Shadow Pointer Test On */ + GMF_WSP_TST_OFF = 1<<17, /* Write Shadow Pointer Test Off */ + GMF_WSP_STEP = 1<<16, /* Write Shadow Pointer Step/Increment */ + + GMF_CLI_TX_FU = 1<<6, /* Clear IRQ Tx FIFO Underrun */ + GMF_CLI_TX_FC = 1<<5, /* Clear IRQ Tx Frame Complete */ + GMF_CLI_TX_PE = 1<<4, /* Clear IRQ Tx Parity Error */ +}; + +/* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */ +enum { + GMT_ST_START = 1<<2, /* Start Time Stamp Timer */ + GMT_ST_STOP = 1<<1, /* Stop Time Stamp Timer */ + GMT_ST_CLR_IRQ = 1<<0, /* Clear Time Stamp Timer IRQ */ +}; + +/* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */ +enum { + GMC_H_BURST_ON = 1<<7, /* Half Duplex Burst Mode On */ + GMC_H_BURST_OFF = 1<<6, /* Half Duplex Burst Mode Off */ + GMC_F_LOOPB_ON = 1<<5, /* FIFO Loopback On */ + GMC_F_LOOPB_OFF = 1<<4, /* FIFO Loopback Off */ + GMC_PAUSE_ON = 1<<3, /* Pause On */ + GMC_PAUSE_OFF = 1<<2, /* Pause Off */ + GMC_RST_CLR = 1<<1, /* Clear GMAC Reset */ + GMC_RST_SET = 1<<0, /* Set GMAC Reset */ +}; + +/* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */ +enum { + GPC_SEL_BDT = 1<<28, /* Select Bi-Dir. Transfer for MDC/MDIO */ + GPC_INT_POL_HI = 1<<27, /* IRQ Polarity is Active HIGH */ + GPC_75_OHM = 1<<26, /* Use 75 Ohm Termination instead of 50 */ + GPC_DIS_FC = 1<<25, /* Disable Automatic Fiber/Copper Detection */ + GPC_DIS_SLEEP = 1<<24, /* Disable Energy Detect */ + GPC_HWCFG_M_3 = 1<<23, /* HWCFG_MODE[3] */ + GPC_HWCFG_M_2 = 1<<22, /* HWCFG_MODE[2] */ + GPC_HWCFG_M_1 = 1<<21, /* HWCFG_MODE[1] */ + GPC_HWCFG_M_0 = 1<<20, /* HWCFG_MODE[0] */ + GPC_ANEG_0 = 1<<19, /* ANEG[0] */ + GPC_ENA_XC = 1<<18, /* Enable MDI crossover */ + GPC_DIS_125 = 1<<17, /* Disable 125 MHz clock */ + GPC_ANEG_3 = 1<<16, /* ANEG[3] */ + GPC_ANEG_2 = 1<<15, /* ANEG[2] */ + GPC_ANEG_1 = 1<<14, /* ANEG[1] */ + GPC_ENA_PAUSE = 1<<13, /* Enable Pause (SYM_OR_REM) */ + GPC_PHYADDR_4 = 1<<12, /* Bit 4 of Phy Addr */ + GPC_PHYADDR_3 = 1<<11, /* Bit 3 of Phy Addr */ + GPC_PHYADDR_2 = 1<<10, /* Bit 2 of Phy Addr */ + GPC_PHYADDR_1 = 1<<9, /* Bit 1 of Phy Addr */ + GPC_PHYADDR_0 = 1<<8, /* Bit 0 of Phy Addr */ + /* Bits 7..2: reserved */ + GPC_RST_CLR = 1<<1, /* Clear GPHY Reset */ + GPC_RST_SET = 1<<0, /* Set GPHY Reset */ +}; + +#define GPC_HWCFG_GMII_COP (GPC_HWCFG_M_3|GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0) +#define GPC_HWCFG_GMII_FIB (GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0) +#define GPC_ANEG_ADV_ALL_M (GPC_ANEG_3 | GPC_ANEG_2 | GPC_ANEG_1 | GPC_ANEG_0) + +/* forced speed and duplex mode (don't mix with other ANEG bits) */ +#define GPC_FRC10MBIT_HALF 0 +#define GPC_FRC10MBIT_FULL GPC_ANEG_0 +#define GPC_FRC100MBIT_HALF GPC_ANEG_1 +#define GPC_FRC100MBIT_FULL (GPC_ANEG_0 | GPC_ANEG_1) + +/* auto-negotiation with limited advertised speeds */ +/* mix only with master/slave settings (for copper) */ +#define GPC_ADV_1000_HALF GPC_ANEG_2 +#define GPC_ADV_1000_FULL GPC_ANEG_3 +#define GPC_ADV_ALL (GPC_ANEG_2 | GPC_ANEG_3) + +/* master/slave settings */ +/* only for copper with 1000 Mbps */ +#define GPC_FORCE_MASTER 0 +#define GPC_FORCE_SLAVE GPC_ANEG_0 +#define GPC_PREF_MASTER GPC_ANEG_1 +#define GPC_PREF_SLAVE (GPC_ANEG_1 | GPC_ANEG_0) + +/* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */ +/* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */ +enum { + GM_IS_TX_CO_OV = 1<<5, /* Transmit Counter Overflow IRQ */ + GM_IS_RX_CO_OV = 1<<4, /* Receive Counter Overflow IRQ */ + GM_IS_TX_FF_UR = 1<<3, /* Transmit FIFO Underrun */ + GM_IS_TX_COMPL = 1<<2, /* Frame Transmission Complete */ + GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ + GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ + +#define GMAC_DEF_MSK (GM_IS_RX_FF_OR | GM_IS_TX_FF_UR) + +/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ + /* Bits 15.. 2: reserved */ + GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */ + GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */ + + +/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */ + WOL_CTL_LINK_CHG_OCC = 1<<15, + WOL_CTL_MAGIC_PKT_OCC = 1<<14, + WOL_CTL_PATTERN_OCC = 1<<13, + WOL_CTL_CLEAR_RESULT = 1<<12, + WOL_CTL_ENA_PME_ON_LINK_CHG = 1<<11, + WOL_CTL_DIS_PME_ON_LINK_CHG = 1<<10, + WOL_CTL_ENA_PME_ON_MAGIC_PKT = 1<<9, + WOL_CTL_DIS_PME_ON_MAGIC_PKT = 1<<8, + WOL_CTL_ENA_PME_ON_PATTERN = 1<<7, + WOL_CTL_DIS_PME_ON_PATTERN = 1<<6, + WOL_CTL_ENA_LINK_CHG_UNIT = 1<<5, + WOL_CTL_DIS_LINK_CHG_UNIT = 1<<4, + WOL_CTL_ENA_MAGIC_PKT_UNIT = 1<<3, + WOL_CTL_DIS_MAGIC_PKT_UNIT = 1<<2, + WOL_CTL_ENA_PATTERN_UNIT = 1<<1, + WOL_CTL_DIS_PATTERN_UNIT = 1<<0, +}; + +#define WOL_CTL_DEFAULT \ + (WOL_CTL_DIS_PME_ON_LINK_CHG | \ + WOL_CTL_DIS_PME_ON_PATTERN | \ + WOL_CTL_DIS_PME_ON_MAGIC_PKT | \ + WOL_CTL_DIS_LINK_CHG_UNIT | \ + WOL_CTL_DIS_PATTERN_UNIT | \ + WOL_CTL_DIS_MAGIC_PKT_UNIT) + +/* WOL_MATCH_CTL 8 bit WOL Match Control Reg */ +#define WOL_CTL_PATT_ENA(x) (1 << (x)) + + +/* XMAC II registers */ +enum { + XM_MMU_CMD = 0x0000, /* 16 bit r/w MMU Command Register */ + XM_POFF = 0x0008, /* 32 bit r/w Packet Offset Register */ + XM_BURST = 0x000c, /* 32 bit r/w Burst Register for half duplex*/ + XM_1L_VLAN_TAG = 0x0010, /* 16 bit r/w One Level VLAN Tag ID */ + XM_2L_VLAN_TAG = 0x0014, /* 16 bit r/w Two Level VLAN Tag ID */ + XM_TX_CMD = 0x0020, /* 16 bit r/w Transmit Command Register */ + XM_TX_RT_LIM = 0x0024, /* 16 bit r/w Transmit Retry Limit Register */ + XM_TX_STIME = 0x0028, /* 16 bit r/w Transmit Slottime Register */ + XM_TX_IPG = 0x002c, /* 16 bit r/w Transmit Inter Packet Gap */ + XM_RX_CMD = 0x0030, /* 16 bit r/w Receive Command Register */ + XM_PHY_ADDR = 0x0034, /* 16 bit r/w PHY Address Register */ + XM_PHY_DATA = 0x0038, /* 16 bit r/w PHY Data Register */ + XM_GP_PORT = 0x0040, /* 32 bit r/w General Purpose Port Register */ + XM_IMSK = 0x0044, /* 16 bit r/w Interrupt Mask Register */ + XM_ISRC = 0x0048, /* 16 bit r/o Interrupt Status Register */ + XM_HW_CFG = 0x004c, /* 16 bit r/w Hardware Config Register */ + XM_TX_LO_WM = 0x0060, /* 16 bit r/w Tx FIFO Low Water Mark */ + XM_TX_HI_WM = 0x0062, /* 16 bit r/w Tx FIFO High Water Mark */ + XM_TX_THR = 0x0064, /* 16 bit r/w Tx Request Threshold */ + XM_HT_THR = 0x0066, /* 16 bit r/w Host Request Threshold */ + XM_PAUSE_DA = 0x0068, /* NA reg r/w Pause Destination Address */ + XM_CTL_PARA = 0x0070, /* 32 bit r/w Control Parameter Register */ + XM_MAC_OPCODE = 0x0074, /* 16 bit r/w Opcode for MAC control frames */ + XM_MAC_PTIME = 0x0076, /* 16 bit r/w Pause time for MAC ctrl frames*/ + XM_TX_STAT = 0x0078, /* 32 bit r/o Tx Status LIFO Register */ + + XM_EXM_START = 0x0080, /* r/w Start Address of the EXM Regs */ +#define XM_EXM(reg) (XM_EXM_START + ((reg) << 3)) +}; + +enum { + XM_SRC_CHK = 0x0100, /* NA reg r/w Source Check Address Register */ + XM_SA = 0x0108, /* NA reg r/w Station Address Register */ + XM_HSM = 0x0110, /* 64 bit r/w Hash Match Address Registers */ + XM_RX_LO_WM = 0x0118, /* 16 bit r/w Receive Low Water Mark */ + XM_RX_HI_WM = 0x011a, /* 16 bit r/w Receive High Water Mark */ + XM_RX_THR = 0x011c, /* 32 bit r/w Receive Request Threshold */ + XM_DEV_ID = 0x0120, /* 32 bit r/o Device ID Register */ + XM_MODE = 0x0124, /* 32 bit r/w Mode Register */ + XM_LSA = 0x0128, /* NA reg r/o Last Source Register */ + XM_TS_READ = 0x0130, /* 32 bit r/o Time Stamp Read Register */ + XM_TS_LOAD = 0x0134, /* 32 bit r/o Time Stamp Load Value */ + XM_STAT_CMD = 0x0200, /* 16 bit r/w Statistics Command Register */ + XM_RX_CNT_EV = 0x0204, /* 32 bit r/o Rx Counter Event Register */ + XM_TX_CNT_EV = 0x0208, /* 32 bit r/o Tx Counter Event Register */ + XM_RX_EV_MSK = 0x020c, /* 32 bit r/w Rx Counter Event Mask */ + XM_TX_EV_MSK = 0x0210, /* 32 bit r/w Tx Counter Event Mask */ + XM_TXF_OK = 0x0280, /* 32 bit r/o Frames Transmitted OK Conuter */ + XM_TXO_OK_HI = 0x0284, /* 32 bit r/o Octets Transmitted OK High Cnt*/ + XM_TXO_OK_LO = 0x0288, /* 32 bit r/o Octets Transmitted OK Low Cnt */ + XM_TXF_BC_OK = 0x028c, /* 32 bit r/o Broadcast Frames Xmitted OK */ + XM_TXF_MC_OK = 0x0290, /* 32 bit r/o Multicast Frames Xmitted OK */ + XM_TXF_UC_OK = 0x0294, /* 32 bit r/o Unicast Frames Xmitted OK */ + XM_TXF_LONG = 0x0298, /* 32 bit r/o Tx Long Frame Counter */ + XM_TXE_BURST = 0x029c, /* 32 bit r/o Tx Burst Event Counter */ + XM_TXF_MPAUSE = 0x02a0, /* 32 bit r/o Tx Pause MAC Ctrl Frame Cnt */ + XM_TXF_MCTRL = 0x02a4, /* 32 bit r/o Tx MAC Ctrl Frame Counter */ + XM_TXF_SNG_COL = 0x02a8, /* 32 bit r/o Tx Single Collision Counter */ + XM_TXF_MUL_COL = 0x02ac, /* 32 bit r/o Tx Multiple Collision Counter */ + XM_TXF_ABO_COL = 0x02b0, /* 32 bit r/o Tx aborted due to Exces. Col. */ + XM_TXF_LAT_COL = 0x02b4, /* 32 bit r/o Tx Late Collision Counter */ + XM_TXF_DEF = 0x02b8, /* 32 bit r/o Tx Deferred Frame Counter */ + XM_TXF_EX_DEF = 0x02bc, /* 32 bit r/o Tx Excessive Deferall Counter */ + XM_TXE_FIFO_UR = 0x02c0, /* 32 bit r/o Tx FIFO Underrun Event Cnt */ + XM_TXE_CS_ERR = 0x02c4, /* 32 bit r/o Tx Carrier Sense Error Cnt */ + XM_TXP_UTIL = 0x02c8, /* 32 bit r/o Tx Utilization in % */ + XM_TXF_64B = 0x02d0, /* 32 bit r/o 64 Byte Tx Frame Counter */ + XM_TXF_127B = 0x02d4, /* 32 bit r/o 65-127 Byte Tx Frame Counter */ + XM_TXF_255B = 0x02d8, /* 32 bit r/o 128-255 Byte Tx Frame Counter */ + XM_TXF_511B = 0x02dc, /* 32 bit r/o 256-511 Byte Tx Frame Counter */ + XM_TXF_1023B = 0x02e0, /* 32 bit r/o 512-1023 Byte Tx Frame Counter*/ + XM_TXF_MAX_SZ = 0x02e4, /* 32 bit r/o 1024-MaxSize Byte Tx Frame Cnt*/ + XM_RXF_OK = 0x0300, /* 32 bit r/o Frames Received OK */ + XM_RXO_OK_HI = 0x0304, /* 32 bit r/o Octets Received OK High Cnt */ + XM_RXO_OK_LO = 0x0308, /* 32 bit r/o Octets Received OK Low Counter*/ + XM_RXF_BC_OK = 0x030c, /* 32 bit r/o Broadcast Frames Received OK */ + XM_RXF_MC_OK = 0x0310, /* 32 bit r/o Multicast Frames Received OK */ + XM_RXF_UC_OK = 0x0314, /* 32 bit r/o Unicast Frames Received OK */ + XM_RXF_MPAUSE = 0x0318, /* 32 bit r/o Rx Pause MAC Ctrl Frame Cnt */ + XM_RXF_MCTRL = 0x031c, /* 32 bit r/o Rx MAC Ctrl Frame Counter */ + XM_RXF_INV_MP = 0x0320, /* 32 bit r/o Rx invalid Pause Frame Cnt */ + XM_RXF_INV_MOC = 0x0324, /* 32 bit r/o Rx Frames with inv. MAC Opcode*/ + XM_RXE_BURST = 0x0328, /* 32 bit r/o Rx Burst Event Counter */ + XM_RXE_FMISS = 0x032c, /* 32 bit r/o Rx Missed Frames Event Cnt */ + XM_RXF_FRA_ERR = 0x0330, /* 32 bit r/o Rx Framing Error Counter */ + XM_RXE_FIFO_OV = 0x0334, /* 32 bit r/o Rx FIFO overflow Event Cnt */ + XM_RXF_JAB_PKT = 0x0338, /* 32 bit r/o Rx Jabber Packet Frame Cnt */ + XM_RXE_CAR_ERR = 0x033c, /* 32 bit r/o Rx Carrier Event Error Cnt */ + XM_RXF_LEN_ERR = 0x0340, /* 32 bit r/o Rx in Range Length Error */ + XM_RXE_SYM_ERR = 0x0344, /* 32 bit r/o Rx Symbol Error Counter */ + XM_RXE_SHT_ERR = 0x0348, /* 32 bit r/o Rx Short Event Error Cnt */ + XM_RXE_RUNT = 0x034c, /* 32 bit r/o Rx Runt Event Counter */ + XM_RXF_LNG_ERR = 0x0350, /* 32 bit r/o Rx Frame too Long Error Cnt */ + XM_RXF_FCS_ERR = 0x0354, /* 32 bit r/o Rx Frame Check Seq. Error Cnt */ + XM_RXF_CEX_ERR = 0x035c, /* 32 bit r/o Rx Carrier Ext Error Frame Cnt*/ + XM_RXP_UTIL = 0x0360, /* 32 bit r/o Rx Utilization in % */ + XM_RXF_64B = 0x0368, /* 32 bit r/o 64 Byte Rx Frame Counter */ + XM_RXF_127B = 0x036c, /* 32 bit r/o 65-127 Byte Rx Frame Counter */ + XM_RXF_255B = 0x0370, /* 32 bit r/o 128-255 Byte Rx Frame Counter */ + XM_RXF_511B = 0x0374, /* 32 bit r/o 256-511 Byte Rx Frame Counter */ + XM_RXF_1023B = 0x0378, /* 32 bit r/o 512-1023 Byte Rx Frame Counter*/ + XM_RXF_MAX_SZ = 0x037c, /* 32 bit r/o 1024-MaxSize Byte Rx Frame Cnt*/ +}; + +/* XM_MMU_CMD 16 bit r/w MMU Command Register */ +enum { + XM_MMU_PHY_RDY = 1<<12, /* Bit 12: PHY Read Ready */ + XM_MMU_PHY_BUSY = 1<<11, /* Bit 11: PHY Busy */ + XM_MMU_IGN_PF = 1<<10, /* Bit 10: Ignore Pause Frame */ + XM_MMU_MAC_LB = 1<<9, /* Bit 9: Enable MAC Loopback */ + XM_MMU_FRC_COL = 1<<7, /* Bit 7: Force Collision */ + XM_MMU_SIM_COL = 1<<6, /* Bit 6: Simulate Collision */ + XM_MMU_NO_PRE = 1<<5, /* Bit 5: No MDIO Preamble */ + XM_MMU_GMII_FD = 1<<4, /* Bit 4: GMII uses Full Duplex */ + XM_MMU_RAT_CTRL = 1<<3, /* Bit 3: Enable Rate Control */ + XM_MMU_GMII_LOOP= 1<<2, /* Bit 2: PHY is in Loopback Mode */ + XM_MMU_ENA_RX = 1<<1, /* Bit 1: Enable Receiver */ + XM_MMU_ENA_TX = 1<<0, /* Bit 0: Enable Transmitter */ +}; + + +/* XM_TX_CMD 16 bit r/w Transmit Command Register */ +enum { + XM_TX_BK2BK = 1<<6, /* Bit 6: Ignor Carrier Sense (Tx Bk2Bk)*/ + XM_TX_ENC_BYP = 1<<5, /* Bit 5: Set Encoder in Bypass Mode */ + XM_TX_SAM_LINE = 1<<4, /* Bit 4: (sc) Start utilization calculation */ + XM_TX_NO_GIG_MD = 1<<3, /* Bit 3: Disable Carrier Extension */ + XM_TX_NO_PRE = 1<<2, /* Bit 2: Disable Preamble Generation */ + XM_TX_NO_CRC = 1<<1, /* Bit 1: Disable CRC Generation */ + XM_TX_AUTO_PAD = 1<<0, /* Bit 0: Enable Automatic Padding */ +}; + +/* XM_TX_RT_LIM 16 bit r/w Transmit Retry Limit Register */ +#define XM_RT_LIM_MSK 0x1f /* Bit 4..0: Tx Retry Limit */ + + +/* XM_TX_STIME 16 bit r/w Transmit Slottime Register */ +#define XM_STIME_MSK 0x7f /* Bit 6..0: Tx Slottime bits */ + + +/* XM_TX_IPG 16 bit r/w Transmit Inter Packet Gap */ +#define XM_IPG_MSK 0xff /* Bit 7..0: IPG value bits */ + + +/* XM_RX_CMD 16 bit r/w Receive Command Register */ +enum { + XM_RX_LENERR_OK = 1<<8, /* Bit 8 don't set Rx Err bit for */ + /* inrange error packets */ + XM_RX_BIG_PK_OK = 1<<7, /* Bit 7 don't set Rx Err bit for */ + /* jumbo packets */ + XM_RX_IPG_CAP = 1<<6, /* Bit 6 repl. type field with IPG */ + XM_RX_TP_MD = 1<<5, /* Bit 5: Enable transparent Mode */ + XM_RX_STRIP_FCS = 1<<4, /* Bit 4: Enable FCS Stripping */ + XM_RX_SELF_RX = 1<<3, /* Bit 3: Enable Rx of own packets */ + XM_RX_SAM_LINE = 1<<2, /* Bit 2: (sc) Start utilization calculation */ + XM_RX_STRIP_PAD = 1<<1, /* Bit 1: Strip pad bytes of Rx frames */ + XM_RX_DIS_CEXT = 1<<0, /* Bit 0: Disable carrier ext. check */ +}; + + +/* XM_GP_PORT 32 bit r/w General Purpose Port Register */ +enum { + XM_GP_ANIP = 1<<6, /* Bit 6: (ro) Auto-Neg. in progress */ + XM_GP_FRC_INT = 1<<5, /* Bit 5: (sc) Force Interrupt */ + XM_GP_RES_MAC = 1<<3, /* Bit 3: (sc) Reset MAC and FIFOs */ + XM_GP_RES_STAT = 1<<2, /* Bit 2: (sc) Reset the statistics module */ + XM_GP_INP_ASS = 1<<0, /* Bit 0: (ro) GP Input Pin asserted */ +}; + + +/* XM_IMSK 16 bit r/w Interrupt Mask Register */ +/* XM_ISRC 16 bit r/o Interrupt Status Register */ +enum { + XM_IS_LNK_AE = 1<<14, /* Bit 14: Link Asynchronous Event */ + XM_IS_TX_ABORT = 1<<13, /* Bit 13: Transmit Abort, late Col. etc */ + XM_IS_FRC_INT = 1<<12, /* Bit 12: Force INT bit set in GP */ + XM_IS_INP_ASS = 1<<11, /* Bit 11: Input Asserted, GP bit 0 set */ + XM_IS_LIPA_RC = 1<<10, /* Bit 10: Link Partner requests config */ + XM_IS_RX_PAGE = 1<<9, /* Bit 9: Page Received */ + XM_IS_TX_PAGE = 1<<8, /* Bit 8: Next Page Loaded for Transmit */ + XM_IS_AND = 1<<7, /* Bit 7: Auto-Negotiation Done */ + XM_IS_TSC_OV = 1<<6, /* Bit 6: Time Stamp Counter Overflow */ + XM_IS_RXC_OV = 1<<5, /* Bit 5: Rx Counter Event Overflow */ + XM_IS_TXC_OV = 1<<4, /* Bit 4: Tx Counter Event Overflow */ + XM_IS_RXF_OV = 1<<3, /* Bit 3: Receive FIFO Overflow */ + XM_IS_TXF_UR = 1<<2, /* Bit 2: Transmit FIFO Underrun */ + XM_IS_TX_COMP = 1<<1, /* Bit 1: Frame Tx Complete */ + XM_IS_RX_COMP = 1<<0, /* Bit 0: Frame Rx Complete */ + + XM_IMSK_DISABLE = 0xffff, +}; + +/* XM_HW_CFG 16 bit r/w Hardware Config Register */ +enum { + XM_HW_GEN_EOP = 1<<3, /* Bit 3: generate End of Packet pulse */ + XM_HW_COM4SIG = 1<<2, /* Bit 2: use Comma Detect for Sig. Det.*/ + XM_HW_GMII_MD = 1<<0, /* Bit 0: GMII Interface selected */ +}; + + +/* XM_TX_LO_WM 16 bit r/w Tx FIFO Low Water Mark */ +/* XM_TX_HI_WM 16 bit r/w Tx FIFO High Water Mark */ +#define XM_TX_WM_MSK 0x01ff /* Bit 9.. 0 Tx FIFO Watermark bits */ + +/* XM_TX_THR 16 bit r/w Tx Request Threshold */ +/* XM_HT_THR 16 bit r/w Host Request Threshold */ +/* XM_RX_THR 16 bit r/w Rx Request Threshold */ +#define XM_THR_MSK 0x03ff /* Bit 10.. 0 Rx/Tx Request Threshold bits */ + + +/* XM_TX_STAT 32 bit r/o Tx Status LIFO Register */ +enum { + XM_ST_VALID = (1UL<<31), /* Bit 31: Status Valid */ + XM_ST_BYTE_CNT = (0x3fffL<<17), /* Bit 30..17: Tx frame Length */ + XM_ST_RETRY_CNT = (0x1fL<<12), /* Bit 16..12: Retry Count */ + XM_ST_EX_COL = 1<<11, /* Bit 11: Excessive Collisions */ + XM_ST_EX_DEF = 1<<10, /* Bit 10: Excessive Deferral */ + XM_ST_BURST = 1<<9, /* Bit 9: p. xmitted in burst md*/ + XM_ST_DEFER = 1<<8, /* Bit 8: packet was defered */ + XM_ST_BC = 1<<7, /* Bit 7: Broadcast packet */ + XM_ST_MC = 1<<6, /* Bit 6: Multicast packet */ + XM_ST_UC = 1<<5, /* Bit 5: Unicast packet */ + XM_ST_TX_UR = 1<<4, /* Bit 4: FIFO Underrun occurred */ + XM_ST_CS_ERR = 1<<3, /* Bit 3: Carrier Sense Error */ + XM_ST_LAT_COL = 1<<2, /* Bit 2: Late Collision Error */ + XM_ST_MUL_COL = 1<<1, /* Bit 1: Multiple Collisions */ + XM_ST_SGN_COL = 1<<0, /* Bit 0: Single Collision */ +}; + +/* XM_RX_LO_WM 16 bit r/w Receive Low Water Mark */ +/* XM_RX_HI_WM 16 bit r/w Receive High Water Mark */ +#define XM_RX_WM_MSK 0x03ff /* Bit 11.. 0: Rx FIFO Watermark bits */ + + +/* XM_DEV_ID 32 bit r/o Device ID Register */ +#define XM_DEV_OUI (0x00ffffffUL<<8) /* Bit 31..8: Device OUI */ +#define XM_DEV_REV (0x07L << 5) /* Bit 7..5: Chip Rev Num */ + + +/* XM_MODE 32 bit r/w Mode Register */ +enum { + XM_MD_ENA_REJ = 1<<26, /* Bit 26: Enable Frame Reject */ + XM_MD_SPOE_E = 1<<25, /* Bit 25: Send Pause on Edge */ + /* extern generated */ + XM_MD_TX_REP = 1<<24, /* Bit 24: Transmit Repeater Mode */ + XM_MD_SPOFF_I = 1<<23, /* Bit 23: Send Pause on FIFO full */ + /* intern generated */ + XM_MD_LE_STW = 1<<22, /* Bit 22: Rx Stat Word in Little Endian */ + XM_MD_TX_CONT = 1<<21, /* Bit 21: Send Continuous */ + XM_MD_TX_PAUSE = 1<<20, /* Bit 20: (sc) Send Pause Frame */ + XM_MD_ATS = 1<<19, /* Bit 19: Append Time Stamp */ + XM_MD_SPOL_I = 1<<18, /* Bit 18: Send Pause on Low */ + /* intern generated */ + XM_MD_SPOH_I = 1<<17, /* Bit 17: Send Pause on High */ + /* intern generated */ + XM_MD_CAP = 1<<16, /* Bit 16: Check Address Pair */ + XM_MD_ENA_HASH = 1<<15, /* Bit 15: Enable Hashing */ + XM_MD_CSA = 1<<14, /* Bit 14: Check Station Address */ + XM_MD_CAA = 1<<13, /* Bit 13: Check Address Array */ + XM_MD_RX_MCTRL = 1<<12, /* Bit 12: Rx MAC Control Frame */ + XM_MD_RX_RUNT = 1<<11, /* Bit 11: Rx Runt Frames */ + XM_MD_RX_IRLE = 1<<10, /* Bit 10: Rx in Range Len Err Frame */ + XM_MD_RX_LONG = 1<<9, /* Bit 9: Rx Long Frame */ + XM_MD_RX_CRCE = 1<<8, /* Bit 8: Rx CRC Error Frame */ + XM_MD_RX_ERR = 1<<7, /* Bit 7: Rx Error Frame */ + XM_MD_DIS_UC = 1<<6, /* Bit 6: Disable Rx Unicast */ + XM_MD_DIS_MC = 1<<5, /* Bit 5: Disable Rx Multicast */ + XM_MD_DIS_BC = 1<<4, /* Bit 4: Disable Rx Broadcast */ + XM_MD_ENA_PROM = 1<<3, /* Bit 3: Enable Promiscuous */ + XM_MD_ENA_BE = 1<<2, /* Bit 2: Enable Big Endian */ + XM_MD_FTF = 1<<1, /* Bit 1: (sc) Flush Tx FIFO */ + XM_MD_FRF = 1<<0, /* Bit 0: (sc) Flush Rx FIFO */ +}; + +#define XM_PAUSE_MODE (XM_MD_SPOE_E | XM_MD_SPOL_I | XM_MD_SPOH_I) +#define XM_DEF_MODE (XM_MD_RX_RUNT | XM_MD_RX_IRLE | XM_MD_RX_LONG |\ + XM_MD_RX_CRCE | XM_MD_RX_ERR | XM_MD_CSA) + +/* XM_STAT_CMD 16 bit r/w Statistics Command Register */ +enum { + XM_SC_SNP_RXC = 1<<5, /* Bit 5: (sc) Snap Rx Counters */ + XM_SC_SNP_TXC = 1<<4, /* Bit 4: (sc) Snap Tx Counters */ + XM_SC_CP_RXC = 1<<3, /* Bit 3: Copy Rx Counters Continuously */ + XM_SC_CP_TXC = 1<<2, /* Bit 2: Copy Tx Counters Continuously */ + XM_SC_CLR_RXC = 1<<1, /* Bit 1: (sc) Clear Rx Counters */ + XM_SC_CLR_TXC = 1<<0, /* Bit 0: (sc) Clear Tx Counters */ +}; + + +/* XM_RX_CNT_EV 32 bit r/o Rx Counter Event Register */ +/* XM_RX_EV_MSK 32 bit r/w Rx Counter Event Mask */ +enum { + XMR_MAX_SZ_OV = 1<<31, /* Bit 31: 1024-MaxSize Rx Cnt Ov*/ + XMR_1023B_OV = 1<<30, /* Bit 30: 512-1023Byte Rx Cnt Ov*/ + XMR_511B_OV = 1<<29, /* Bit 29: 256-511 Byte Rx Cnt Ov*/ + XMR_255B_OV = 1<<28, /* Bit 28: 128-255 Byte Rx Cnt Ov*/ + XMR_127B_OV = 1<<27, /* Bit 27: 65-127 Byte Rx Cnt Ov */ + XMR_64B_OV = 1<<26, /* Bit 26: 64 Byte Rx Cnt Ov */ + XMR_UTIL_OV = 1<<25, /* Bit 25: Rx Util Cnt Overflow */ + XMR_UTIL_UR = 1<<24, /* Bit 24: Rx Util Cnt Underrun */ + XMR_CEX_ERR_OV = 1<<23, /* Bit 23: CEXT Err Cnt Ov */ + XMR_FCS_ERR_OV = 1<<21, /* Bit 21: Rx FCS Error Cnt Ov */ + XMR_LNG_ERR_OV = 1<<20, /* Bit 20: Rx too Long Err Cnt Ov*/ + XMR_RUNT_OV = 1<<19, /* Bit 19: Runt Event Cnt Ov */ + XMR_SHT_ERR_OV = 1<<18, /* Bit 18: Rx Short Ev Err Cnt Ov*/ + XMR_SYM_ERR_OV = 1<<17, /* Bit 17: Rx Sym Err Cnt Ov */ + XMR_CAR_ERR_OV = 1<<15, /* Bit 15: Rx Carr Ev Err Cnt Ov */ + XMR_JAB_PKT_OV = 1<<14, /* Bit 14: Rx Jabb Packet Cnt Ov */ + XMR_FIFO_OV = 1<<13, /* Bit 13: Rx FIFO Ov Ev Cnt Ov */ + XMR_FRA_ERR_OV = 1<<12, /* Bit 12: Rx Framing Err Cnt Ov */ + XMR_FMISS_OV = 1<<11, /* Bit 11: Rx Missed Ev Cnt Ov */ + XMR_BURST = 1<<10, /* Bit 10: Rx Burst Event Cnt Ov */ + XMR_INV_MOC = 1<<9, /* Bit 9: Rx with inv. MAC OC Ov*/ + XMR_INV_MP = 1<<8, /* Bit 8: Rx inv Pause Frame Ov */ + XMR_MCTRL_OV = 1<<7, /* Bit 7: Rx MAC Ctrl-F Cnt Ov */ + XMR_MPAUSE_OV = 1<<6, /* Bit 6: Rx Pause MAC Ctrl-F Ov*/ + XMR_UC_OK_OV = 1<<5, /* Bit 5: Rx Unicast Frame CntOv*/ + XMR_MC_OK_OV = 1<<4, /* Bit 4: Rx Multicast Cnt Ov */ + XMR_BC_OK_OV = 1<<3, /* Bit 3: Rx Broadcast Cnt Ov */ + XMR_OK_LO_OV = 1<<2, /* Bit 2: Octets Rx OK Low CntOv*/ + XMR_OK_HI_OV = 1<<1, /* Bit 1: Octets Rx OK Hi Cnt Ov*/ + XMR_OK_OV = 1<<0, /* Bit 0: Frames Received Ok Ov */ +}; + +#define XMR_DEF_MSK (XMR_OK_LO_OV | XMR_OK_HI_OV) + +/* XM_TX_CNT_EV 32 bit r/o Tx Counter Event Register */ +/* XM_TX_EV_MSK 32 bit r/w Tx Counter Event Mask */ +enum { + XMT_MAX_SZ_OV = 1<<25, /* Bit 25: 1024-MaxSize Tx Cnt Ov*/ + XMT_1023B_OV = 1<<24, /* Bit 24: 512-1023Byte Tx Cnt Ov*/ + XMT_511B_OV = 1<<23, /* Bit 23: 256-511 Byte Tx Cnt Ov*/ + XMT_255B_OV = 1<<22, /* Bit 22: 128-255 Byte Tx Cnt Ov*/ + XMT_127B_OV = 1<<21, /* Bit 21: 65-127 Byte Tx Cnt Ov */ + XMT_64B_OV = 1<<20, /* Bit 20: 64 Byte Tx Cnt Ov */ + XMT_UTIL_OV = 1<<19, /* Bit 19: Tx Util Cnt Overflow */ + XMT_UTIL_UR = 1<<18, /* Bit 18: Tx Util Cnt Underrun */ + XMT_CS_ERR_OV = 1<<17, /* Bit 17: Tx Carr Sen Err Cnt Ov*/ + XMT_FIFO_UR_OV = 1<<16, /* Bit 16: Tx FIFO Ur Ev Cnt Ov */ + XMT_EX_DEF_OV = 1<<15, /* Bit 15: Tx Ex Deferall Cnt Ov */ + XMT_DEF = 1<<14, /* Bit 14: Tx Deferred Cnt Ov */ + XMT_LAT_COL_OV = 1<<13, /* Bit 13: Tx Late Col Cnt Ov */ + XMT_ABO_COL_OV = 1<<12, /* Bit 12: Tx abo dueto Ex Col Ov*/ + XMT_MUL_COL_OV = 1<<11, /* Bit 11: Tx Mult Col Cnt Ov */ + XMT_SNG_COL = 1<<10, /* Bit 10: Tx Single Col Cnt Ov */ + XMT_MCTRL_OV = 1<<9, /* Bit 9: Tx MAC Ctrl Counter Ov*/ + XMT_MPAUSE = 1<<8, /* Bit 8: Tx Pause MAC Ctrl-F Ov*/ + XMT_BURST = 1<<7, /* Bit 7: Tx Burst Event Cnt Ov */ + XMT_LONG = 1<<6, /* Bit 6: Tx Long Frame Cnt Ov */ + XMT_UC_OK_OV = 1<<5, /* Bit 5: Tx Unicast Cnt Ov */ + XMT_MC_OK_OV = 1<<4, /* Bit 4: Tx Multicast Cnt Ov */ + XMT_BC_OK_OV = 1<<3, /* Bit 3: Tx Broadcast Cnt Ov */ + XMT_OK_LO_OV = 1<<2, /* Bit 2: Octets Tx OK Low CntOv*/ + XMT_OK_HI_OV = 1<<1, /* Bit 1: Octets Tx OK Hi Cnt Ov*/ + XMT_OK_OV = 1<<0, /* Bit 0: Frames Tx Ok Ov */ +}; + +#define XMT_DEF_MSK (XMT_OK_LO_OV | XMT_OK_HI_OV) + +struct skge_rx_desc { + u32 control; + u32 next_offset; + u32 dma_lo; + u32 dma_hi; + u32 status; + u32 timestamp; + u16 csum2; + u16 csum1; + u16 csum2_start; + u16 csum1_start; +}; + +struct skge_tx_desc { + u32 control; + u32 next_offset; + u32 dma_lo; + u32 dma_hi; + u32 status; + u32 csum_offs; + u16 csum_write; + u16 csum_start; + u32 rsvd; +}; + +struct skge_element { + struct skge_element *next; + void *desc; + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(mapaddr); + DEFINE_DMA_UNMAP_LEN(maplen); +}; + +struct skge_ring { + struct skge_element *to_clean; + struct skge_element *to_use; + struct skge_element *start; + unsigned long count; +}; + + +struct skge_hw { + void __iomem *regs; + struct pci_dev *pdev; + spinlock_t hw_lock; + u32 intr_mask; + struct net_device *dev[2]; + + u8 chip_id; + u8 chip_rev; + u8 copper; + u8 ports; + u8 phy_type; + + u32 ram_size; + u32 ram_offset; + u16 phy_addr; + spinlock_t phy_lock; + struct tasklet_struct phy_task; + + char irq_name[]; /* skge@pci:000:04:00.0 */ +}; + +enum pause_control { + FLOW_MODE_NONE = 1, /* No Flow-Control */ + FLOW_MODE_LOC_SEND = 2, /* Local station sends PAUSE */ + FLOW_MODE_SYMMETRIC = 3, /* Both stations may send PAUSE */ + FLOW_MODE_SYM_OR_REM = 4, /* Both stations may send PAUSE or + * just the remote station may send PAUSE + */ +}; + +enum pause_status { + FLOW_STAT_INDETERMINATED=0, /* indeterminated */ + FLOW_STAT_NONE, /* No Flow Control */ + FLOW_STAT_REM_SEND, /* Remote Station sends PAUSE */ + FLOW_STAT_LOC_SEND, /* Local station sends PAUSE */ + FLOW_STAT_SYMMETRIC, /* Both station may send PAUSE */ +}; + + +struct skge_port { + struct skge_hw *hw; + struct net_device *netdev; + struct napi_struct napi; + int port; + u32 msg_enable; + + struct skge_ring tx_ring; + + struct skge_ring rx_ring ____cacheline_aligned_in_smp; + unsigned int rx_buf_size; + + struct timer_list link_timer; + enum pause_control flow_control; + enum pause_status flow_status; + u8 blink_on; + u8 wol; + u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */ + u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */ + u16 speed; /* SPEED_1000, SPEED_100, ... */ + u32 advertising; + + void *mem; /* PCI memory for rings */ + dma_addr_t dma; + unsigned long mem_size; +#ifdef CONFIG_SKGE_DEBUG + struct dentry *debugfs; +#endif +}; + + +/* Register accessor for memory mapped device */ +static inline u32 skge_read32(const struct skge_hw *hw, int reg) +{ + return readl(hw->regs + reg); +} + +static inline u16 skge_read16(const struct skge_hw *hw, int reg) +{ + return readw(hw->regs + reg); +} + +static inline u8 skge_read8(const struct skge_hw *hw, int reg) +{ + return readb(hw->regs + reg); +} + +static inline void skge_write32(const struct skge_hw *hw, int reg, u32 val) +{ + writel(val, hw->regs + reg); +} + +static inline void skge_write16(const struct skge_hw *hw, int reg, u16 val) +{ + writew(val, hw->regs + reg); +} + +static inline void skge_write8(const struct skge_hw *hw, int reg, u8 val) +{ + writeb(val, hw->regs + reg); +} + +/* MAC Related Registers inside the device. */ +#define SK_REG(port,reg) (((port)<<7)+(u16)(reg)) +#define SK_XMAC_REG(port, reg) \ + ((BASE_XMAC_1 + (port) * (BASE_XMAC_2 - BASE_XMAC_1)) | (reg) << 1) + +static inline u32 xm_read32(const struct skge_hw *hw, int port, int reg) +{ + u32 v; + v = skge_read16(hw, SK_XMAC_REG(port, reg)); + v |= (u32)skge_read16(hw, SK_XMAC_REG(port, reg+2)) << 16; + return v; +} + +static inline u16 xm_read16(const struct skge_hw *hw, int port, int reg) +{ + return skge_read16(hw, SK_XMAC_REG(port,reg)); +} + +static inline void xm_write32(const struct skge_hw *hw, int port, int r, u32 v) +{ + skge_write16(hw, SK_XMAC_REG(port,r), v & 0xffff); + skge_write16(hw, SK_XMAC_REG(port,r+2), v >> 16); +} + +static inline void xm_write16(const struct skge_hw *hw, int port, int r, u16 v) +{ + skge_write16(hw, SK_XMAC_REG(port,r), v); +} + +static inline void xm_outhash(const struct skge_hw *hw, int port, int reg, + const u8 *hash) +{ + xm_write16(hw, port, reg, (u16)hash[0] | ((u16)hash[1] << 8)); + xm_write16(hw, port, reg+2, (u16)hash[2] | ((u16)hash[3] << 8)); + xm_write16(hw, port, reg+4, (u16)hash[4] | ((u16)hash[5] << 8)); + xm_write16(hw, port, reg+6, (u16)hash[6] | ((u16)hash[7] << 8)); +} + +static inline void xm_outaddr(const struct skge_hw *hw, int port, int reg, + const u8 *addr) +{ + xm_write16(hw, port, reg, (u16)addr[0] | ((u16)addr[1] << 8)); + xm_write16(hw, port, reg+2, (u16)addr[2] | ((u16)addr[3] << 8)); + xm_write16(hw, port, reg+4, (u16)addr[4] | ((u16)addr[5] << 8)); +} + +#define SK_GMAC_REG(port,reg) \ + (BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg)) + +static inline u16 gma_read16(const struct skge_hw *hw, int port, int reg) +{ + return skge_read16(hw, SK_GMAC_REG(port,reg)); +} + +static inline u32 gma_read32(const struct skge_hw *hw, int port, int reg) +{ + return (u32) skge_read16(hw, SK_GMAC_REG(port,reg)) + | ((u32)skge_read16(hw, SK_GMAC_REG(port,reg+4)) << 16); +} + +static inline void gma_write16(const struct skge_hw *hw, int port, int r, u16 v) +{ + skge_write16(hw, SK_GMAC_REG(port,r), v); +} + +static inline void gma_set_addr(struct skge_hw *hw, int port, int reg, + const u8 *addr) +{ + gma_write16(hw, port, reg, (u16) addr[0] | ((u16) addr[1] << 8)); + gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8)); + gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8)); +} + +#endif diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c new file mode 100644 index 000000000..25981a7a4 --- /dev/null +++ b/drivers/net/ethernet/marvell/sky2.c @@ -0,0 +1,5297 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * New driver for Marvell Yukon 2 chipset. + * Based on earlier sk98lin, and skge driver. + * + * This driver intentionally does not support all the features + * of the original driver such as link fail-over and link management because + * those should be done at higher levels. + * + * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/crc32.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/dma-mapping.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/ip.h> +#include <linux/slab.h> +#include <net/ip.h> +#include <linux/tcp.h> +#include <linux/in.h> +#include <linux/delay.h> +#include <linux/workqueue.h> +#include <linux/if_vlan.h> +#include <linux/prefetch.h> +#include <linux/debugfs.h> +#include <linux/mii.h> +#include <linux/of_device.h> +#include <linux/of_net.h> +#include <linux/dmi.h> + +#include <asm/irq.h> + +#include "sky2.h" + +#define DRV_NAME "sky2" +#define DRV_VERSION "1.30" + +/* + * The Yukon II chipset takes 64 bit command blocks (called list elements) + * that are organized into three (receive, transmit, status) different rings + * similar to Tigon3. + */ + +#define RX_LE_SIZE 1024 +#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le)) +#define RX_MAX_PENDING (RX_LE_SIZE/6 - 2) +#define RX_DEF_PENDING RX_MAX_PENDING + +/* This is the worst case number of transmit list elements for a single skb: + VLAN:GSO + CKSUM + Data + skb_frags * DMA */ +#define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1)) +#define TX_MIN_PENDING (MAX_SKB_TX_LE+1) +#define TX_MAX_PENDING 1024 +#define TX_DEF_PENDING 63 + +#define TX_WATCHDOG (5 * HZ) +#define NAPI_WEIGHT 64 +#define PHY_RETRIES 1000 + +#define SKY2_EEPROM_MAGIC 0x9955aabb + +#define RING_NEXT(x, s) (((x)+1) & ((s)-1)) + +static const u32 default_msg = + NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK + | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR + | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; + +static int debug = -1; /* defaults above */ +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +static int copybreak __read_mostly = 128; +module_param(copybreak, int, 0); +MODULE_PARM_DESC(copybreak, "Receive copy threshold"); + +static int disable_msi = -1; +module_param(disable_msi, int, 0); +MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); + +static int legacy_pme = 0; +module_param(legacy_pme, int, 0); +MODULE_PARM_DESC(legacy_pme, "Legacy power management"); + +static const struct pci_device_id sky2_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */ + { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */ + { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E01) }, /* SK-9E21M */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) }, /* DGE-560SX */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B03) }, /* DGE-550T */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, /* 88E8021 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, /* 88E8022 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, /* 88E8061 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) }, /* 88E8062 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) }, /* 88E8021 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) }, /* 88E8022 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) }, /* 88E8061 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) }, /* 88E8062 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) }, /* 88E8035 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) }, /* 88E8036 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, /* 88E8038 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) }, /* 88E8039 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4354) }, /* 88E8040 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4355) }, /* 88E8040T */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4356) }, /* 88EC033 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4357) }, /* 88E8042 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x435A) }, /* 88E8048 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) }, /* 88E8052 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4365) }, /* 88E8070 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) }, /* 88EC042 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436A) }, /* 88E8058 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */ + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, sky2_id_table); + +/* Avoid conditionals by using array */ +static const unsigned txqaddr[] = { Q_XA1, Q_XA2 }; +static const unsigned rxqaddr[] = { Q_R1, Q_R2 }; +static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 }; + +static void sky2_set_multicast(struct net_device *dev); +static irqreturn_t sky2_intr(int irq, void *dev_id); + +/* Access to PHY via serial interconnect */ +static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val) +{ + int i; + + gma_write16(hw, port, GM_SMI_DATA, val); + gma_write16(hw, port, GM_SMI_CTRL, + GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg)); + + for (i = 0; i < PHY_RETRIES; i++) { + u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL); + if (ctrl == 0xffff) + goto io_error; + + if (!(ctrl & GM_SMI_CT_BUSY)) + return 0; + + udelay(10); + } + + dev_warn(&hw->pdev->dev, "%s: phy write timeout\n", hw->dev[port]->name); + return -ETIMEDOUT; + +io_error: + dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name); + return -EIO; +} + +static int __gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg, u16 *val) +{ + int i; + + gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) + | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); + + for (i = 0; i < PHY_RETRIES; i++) { + u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL); + if (ctrl == 0xffff) + goto io_error; + + if (ctrl & GM_SMI_CT_RD_VAL) { + *val = gma_read16(hw, port, GM_SMI_DATA); + return 0; + } + + udelay(10); + } + + dev_warn(&hw->pdev->dev, "%s: phy read timeout\n", hw->dev[port]->name); + return -ETIMEDOUT; +io_error: + dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name); + return -EIO; +} + +static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg) +{ + u16 v = 0; + __gm_phy_read(hw, port, reg, &v); + return v; +} + + +static void sky2_power_on(struct sky2_hw *hw) +{ + /* switch power to VCC (WA for VAUX problem) */ + sky2_write8(hw, B0_POWER_CTRL, + PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); + + /* disable Core Clock Division, */ + sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); + + if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1) + /* enable bits are inverted */ + sky2_write8(hw, B2_Y2_CLK_GATE, + Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | + Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | + Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS); + else + sky2_write8(hw, B2_Y2_CLK_GATE, 0); + + if (hw->flags & SKY2_HW_ADV_POWER_CTL) { + u32 reg; + + sky2_pci_write32(hw, PCI_DEV_REG3, 0); + + reg = sky2_pci_read32(hw, PCI_DEV_REG4); + /* set all bits to 0 except bits 15..12 and 8 */ + reg &= P_ASPM_CONTROL_MSK; + sky2_pci_write32(hw, PCI_DEV_REG4, reg); + + reg = sky2_pci_read32(hw, PCI_DEV_REG5); + /* set all bits to 0 except bits 28 & 27 */ + reg &= P_CTL_TIM_VMAIN_AV_MSK; + sky2_pci_write32(hw, PCI_DEV_REG5, reg); + + sky2_pci_write32(hw, PCI_CFG_REG_1, 0); + + sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON); + + /* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */ + reg = sky2_read32(hw, B2_GP_IO); + reg |= GLB_GPIO_STAT_RACE_DIS; + sky2_write32(hw, B2_GP_IO, reg); + + sky2_read32(hw, B2_GP_IO); + } + + /* Turn on "driver loaded" LED */ + sky2_write16(hw, B0_CTST, Y2_LED_STAT_ON); +} + +static void sky2_power_aux(struct sky2_hw *hw) +{ + if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1) + sky2_write8(hw, B2_Y2_CLK_GATE, 0); + else + /* enable bits are inverted */ + sky2_write8(hw, B2_Y2_CLK_GATE, + Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | + Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | + Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS); + + /* switch power to VAUX if supported and PME from D3cold */ + if ( (sky2_read32(hw, B0_CTST) & Y2_VAUX_AVAIL) && + pci_pme_capable(hw->pdev, PCI_D3cold)) + sky2_write8(hw, B0_POWER_CTRL, + (PC_VAUX_ENA | PC_VCC_ENA | + PC_VAUX_ON | PC_VCC_OFF)); + + /* turn off "driver loaded LED" */ + sky2_write16(hw, B0_CTST, Y2_LED_STAT_OFF); +} + +static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port) +{ + u16 reg; + + /* disable all GMAC IRQ's */ + sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); + + gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */ + gma_write16(hw, port, GM_MC_ADDR_H2, 0); + gma_write16(hw, port, GM_MC_ADDR_H3, 0); + gma_write16(hw, port, GM_MC_ADDR_H4, 0); + + reg = gma_read16(hw, port, GM_RX_CTRL); + reg |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA; + gma_write16(hw, port, GM_RX_CTRL, reg); +} + +/* flow control to advertise bits */ +static const u16 copper_fc_adv[] = { + [FC_NONE] = 0, + [FC_TX] = PHY_M_AN_ASP, + [FC_RX] = PHY_M_AN_PC, + [FC_BOTH] = PHY_M_AN_PC | PHY_M_AN_ASP, +}; + +/* flow control to advertise bits when using 1000BaseX */ +static const u16 fiber_fc_adv[] = { + [FC_NONE] = PHY_M_P_NO_PAUSE_X, + [FC_TX] = PHY_M_P_ASYM_MD_X, + [FC_RX] = PHY_M_P_SYM_MD_X, + [FC_BOTH] = PHY_M_P_BOTH_MD_X, +}; + +/* flow control to GMA disable bits */ +static const u16 gm_fc_disable[] = { + [FC_NONE] = GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS, + [FC_TX] = GM_GPCR_FC_RX_DIS, + [FC_RX] = GM_GPCR_FC_TX_DIS, + [FC_BOTH] = 0, +}; + + +static void sky2_phy_init(struct sky2_hw *hw, unsigned port) +{ + struct sky2_port *sky2 = netdev_priv(hw->dev[port]); + u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg; + + if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) && + !(hw->flags & SKY2_HW_NEWER_PHY)) { + u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); + + ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | + PHY_M_EC_MAC_S_MSK); + ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); + + /* on PHY 88E1040 Rev.D0 (and newer) downshift control changed */ + if (hw->chip_id == CHIP_ID_YUKON_EC) + /* set downshift counter to 3x and enable downshift */ + ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA; + else + /* set master & slave downshift counter to 1x */ + ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1); + + gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); + } + + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); + if (sky2_is_copper(hw)) { + if (!(hw->flags & SKY2_HW_GIGABIT)) { + /* enable automatic crossover */ + ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1; + + if (hw->chip_id == CHIP_ID_YUKON_FE_P && + hw->chip_rev == CHIP_REV_YU_FE2_A0) { + u16 spec; + + /* Enable Class A driver for FE+ A0 */ + spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2); + spec |= PHY_M_FESC_SEL_CL_A; + gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec); + } + } else { + /* disable energy detect */ + ctrl &= ~PHY_M_PC_EN_DET_MSK; + + /* enable automatic crossover */ + ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO); + + /* downshift on PHY 88E1112 and 88E1149 is changed */ + if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) && + (hw->flags & SKY2_HW_NEWER_PHY)) { + /* set downshift counter to 3x and enable downshift */ + ctrl &= ~PHY_M_PC_DSC_MSK; + ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA; + } + } + } else { + /* workaround for deviation #4.88 (CRC errors) */ + /* disable Automatic Crossover */ + + ctrl &= ~PHY_M_PC_MDIX_MSK; + } + + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); + + /* special setup for PHY 88E1112 Fiber */ + if (hw->chip_id == CHIP_ID_YUKON_XL && (hw->flags & SKY2_HW_FIBRE_PHY)) { + pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); + + /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2); + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); + ctrl &= ~PHY_M_MAC_MD_MSK; + ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX); + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); + + if (hw->pmd_type == 'P') { + /* select page 1 to access Fiber registers */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1); + + /* for SFP-module set SIGDET polarity to low */ + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); + ctrl |= PHY_M_FIB_SIGD_POL; + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); + } + + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); + } + + ctrl = PHY_CT_RESET; + ct1000 = 0; + adv = PHY_AN_CSMA; + reg = 0; + + if (sky2->flags & SKY2_FLAG_AUTO_SPEED) { + if (sky2_is_copper(hw)) { + if (sky2->advertising & ADVERTISED_1000baseT_Full) + ct1000 |= PHY_M_1000C_AFD; + if (sky2->advertising & ADVERTISED_1000baseT_Half) + ct1000 |= PHY_M_1000C_AHD; + if (sky2->advertising & ADVERTISED_100baseT_Full) + adv |= PHY_M_AN_100_FD; + if (sky2->advertising & ADVERTISED_100baseT_Half) + adv |= PHY_M_AN_100_HD; + if (sky2->advertising & ADVERTISED_10baseT_Full) + adv |= PHY_M_AN_10_FD; + if (sky2->advertising & ADVERTISED_10baseT_Half) + adv |= PHY_M_AN_10_HD; + + } else { /* special defines for FIBER (88E1040S only) */ + if (sky2->advertising & ADVERTISED_1000baseT_Full) + adv |= PHY_M_AN_1000X_AFD; + if (sky2->advertising & ADVERTISED_1000baseT_Half) + adv |= PHY_M_AN_1000X_AHD; + } + + /* Restart Auto-negotiation */ + ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG; + } else { + /* forced speed/duplex settings */ + ct1000 = PHY_M_1000C_MSE; + + /* Disable auto update for duplex flow control and duplex */ + reg |= GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_SPD_DIS; + + switch (sky2->speed) { + case SPEED_1000: + ctrl |= PHY_CT_SP1000; + reg |= GM_GPCR_SPEED_1000; + break; + case SPEED_100: + ctrl |= PHY_CT_SP100; + reg |= GM_GPCR_SPEED_100; + break; + } + + if (sky2->duplex == DUPLEX_FULL) { + reg |= GM_GPCR_DUP_FULL; + ctrl |= PHY_CT_DUP_MD; + } else if (sky2->speed < SPEED_1000) + sky2->flow_mode = FC_NONE; + } + + if (sky2->flags & SKY2_FLAG_AUTO_PAUSE) { + if (sky2_is_copper(hw)) + adv |= copper_fc_adv[sky2->flow_mode]; + else + adv |= fiber_fc_adv[sky2->flow_mode]; + } else { + reg |= GM_GPCR_AU_FCT_DIS; + reg |= gm_fc_disable[sky2->flow_mode]; + + /* Forward pause packets to GMAC? */ + if (sky2->flow_mode & FC_RX) + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); + else + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); + } + + gma_write16(hw, port, GM_GP_CTRL, reg); + + if (hw->flags & SKY2_HW_GIGABIT) + gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000); + + gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); + + /* Setup Phy LED's */ + ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS); + ledover = 0; + + switch (hw->chip_id) { + case CHIP_ID_YUKON_FE: + /* on 88E3082 these bits are at 11..9 (shifted left) */ + ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1; + + ctrl = gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR); + + /* delete ACT LED control bits */ + ctrl &= ~PHY_M_FELP_LED1_MSK; + /* change ACT LED control to blink mode */ + ctrl |= PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL); + gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl); + break; + + case CHIP_ID_YUKON_FE_P: + /* Enable Link Partner Next Page */ + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); + ctrl |= PHY_M_PC_ENA_LIP_NP; + + /* disable Energy Detect and enable scrambler */ + ctrl &= ~(PHY_M_PC_ENA_ENE_DT | PHY_M_PC_DIS_SCRAMB); + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); + + /* set LED2 -> ACT, LED1 -> LINK, LED0 -> SPEED */ + ctrl = PHY_M_FELP_LED2_CTRL(LED_PAR_CTRL_ACT_BL) | + PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_LINK) | + PHY_M_FELP_LED0_CTRL(LED_PAR_CTRL_SPEED); + + gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl); + break; + + case CHIP_ID_YUKON_XL: + pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); + + /* select page 3 to access LED control register */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); + + /* set LED Function Control register */ + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, + (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ + PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */ + PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */ + PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */ + + /* set Polarity Control register */ + gm_phy_write(hw, port, PHY_MARV_PHY_STAT, + (PHY_M_POLC_LS1_P_MIX(4) | + PHY_M_POLC_IS0_P_MIX(4) | + PHY_M_POLC_LOS_CTRL(2) | + PHY_M_POLC_INIT_CTRL(2) | + PHY_M_POLC_STA1_CTRL(2) | + PHY_M_POLC_STA0_CTRL(2))); + + /* restore page register */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); + break; + + case CHIP_ID_YUKON_EC_U: + case CHIP_ID_YUKON_EX: + case CHIP_ID_YUKON_SUPR: + pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); + + /* select page 3 to access LED control register */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); + + /* set LED Function Control register */ + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, + (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ + PHY_M_LEDC_INIT_CTRL(8) | /* 10 Mbps */ + PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */ + PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */ + + /* set Blink Rate in LED Timer Control Register */ + gm_phy_write(hw, port, PHY_MARV_INT_MASK, + ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS)); + /* restore page register */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); + break; + + default: + /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */ + ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL; + + /* turn off the Rx LED (LED_RX) */ + ledover |= PHY_M_LED_MO_RX(MO_LED_OFF); + } + + if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_UL_2) { + /* apply fixes in PHY AFE */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255); + + /* increase differential signal amplitude in 10BASE-T */ + gm_phy_write(hw, port, 0x18, 0xaa99); + gm_phy_write(hw, port, 0x17, 0x2011); + + if (hw->chip_id == CHIP_ID_YUKON_EC_U) { + /* fix for IEEE A/B Symmetry failure in 1000BASE-T */ + gm_phy_write(hw, port, 0x18, 0xa204); + gm_phy_write(hw, port, 0x17, 0x2002); + } + + /* set page register to 0 */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); + } else if (hw->chip_id == CHIP_ID_YUKON_FE_P && + hw->chip_rev == CHIP_REV_YU_FE2_A0) { + /* apply workaround for integrated resistors calibration */ + gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17); + gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60); + } else if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) { + /* apply fixes in PHY AFE */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff); + + /* apply RDAC termination workaround */ + gm_phy_write(hw, port, 24, 0x2800); + gm_phy_write(hw, port, 23, 0x2001); + + /* set page register back to 0 */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); + } else if (hw->chip_id != CHIP_ID_YUKON_EX && + hw->chip_id < CHIP_ID_YUKON_SUPR) { + /* no effect on Yukon-XL */ + gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); + + if (!(sky2->flags & SKY2_FLAG_AUTO_SPEED) || + sky2->speed == SPEED_100) { + /* turn on 100 Mbps LED (LED_LINK100) */ + ledover |= PHY_M_LED_MO_100(MO_LED_ON); + } + + if (ledover) + gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); + + } else if (hw->chip_id == CHIP_ID_YUKON_PRM && + (sky2_read8(hw, B2_MAC_CFG) & 0xf) == 0x7) { + int i; + /* This a phy register setup workaround copied from vendor driver. */ + static const struct { + u16 reg, val; + } eee_afe[] = { + { 0x156, 0x58ce }, + { 0x153, 0x99eb }, + { 0x141, 0x8064 }, + /* { 0x155, 0x130b },*/ + { 0x000, 0x0000 }, + { 0x151, 0x8433 }, + { 0x14b, 0x8c44 }, + { 0x14c, 0x0f90 }, + { 0x14f, 0x39aa }, + /* { 0x154, 0x2f39 },*/ + { 0x14d, 0xba33 }, + { 0x144, 0x0048 }, + { 0x152, 0x2010 }, + /* { 0x158, 0x1223 },*/ + { 0x140, 0x4444 }, + { 0x154, 0x2f3b }, + { 0x158, 0xb203 }, + { 0x157, 0x2029 }, + }; + + /* Start Workaround for OptimaEEE Rev.Z0 */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00fb); + + gm_phy_write(hw, port, 1, 0x4099); + gm_phy_write(hw, port, 3, 0x1120); + gm_phy_write(hw, port, 11, 0x113c); + gm_phy_write(hw, port, 14, 0x8100); + gm_phy_write(hw, port, 15, 0x112a); + gm_phy_write(hw, port, 17, 0x1008); + + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00fc); + gm_phy_write(hw, port, 1, 0x20b0); + + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff); + + for (i = 0; i < ARRAY_SIZE(eee_afe); i++) { + /* apply AFE settings */ + gm_phy_write(hw, port, 17, eee_afe[i].val); + gm_phy_write(hw, port, 16, eee_afe[i].reg | 1u<<13); + } + + /* End Workaround for OptimaEEE */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); + + /* Enable 10Base-Te (EEE) */ + if (hw->chip_id >= CHIP_ID_YUKON_PRM) { + reg = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); + gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, + reg | PHY_M_10B_TE_ENABLE); + } + } + + /* Enable phy interrupt on auto-negotiation complete (or link up) */ + if (sky2->flags & SKY2_FLAG_AUTO_SPEED) + gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL); + else + gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); +} + +static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD }; +static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA }; + +static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port) +{ + u32 reg1; + + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); + reg1 &= ~phy_power[port]; + + if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1) + reg1 |= coma_mode[port]; + + sky2_pci_write32(hw, PCI_DEV_REG1, reg1); + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + sky2_pci_read32(hw, PCI_DEV_REG1); + + if (hw->chip_id == CHIP_ID_YUKON_FE) + gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_ANE); + else if (hw->flags & SKY2_HW_ADV_POWER_CTL) + sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); +} + +static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port) +{ + u32 reg1; + u16 ctrl; + + /* release GPHY Control reset */ + sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); + + /* release GMAC reset */ + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); + + if (hw->flags & SKY2_HW_NEWER_PHY) { + /* select page 2 to access MAC control register */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2); + + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); + /* allow GMII Power Down */ + ctrl &= ~PHY_M_MAC_GMIF_PUP; + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); + + /* set page register back to 0 */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); + } + + /* setup General Purpose Control Register */ + gma_write16(hw, port, GM_GP_CTRL, + GM_GPCR_FL_PASS | GM_GPCR_SPEED_100 | + GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS | + GM_GPCR_AU_SPD_DIS); + + if (hw->chip_id != CHIP_ID_YUKON_EC) { + if (hw->chip_id == CHIP_ID_YUKON_EC_U) { + /* select page 2 to access MAC control register */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2); + + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); + /* enable Power Down */ + ctrl |= PHY_M_PC_POW_D_ENA; + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); + + /* set page register back to 0 */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); + } + + /* set IEEE compatible Power Down Mode (dev. #4.99) */ + gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN); + } + + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); + reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */ + sky2_pci_write32(hw, PCI_DEV_REG1, reg1); + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); +} + +/* configure IPG according to used link speed */ +static void sky2_set_ipg(struct sky2_port *sky2) +{ + u16 reg; + + reg = gma_read16(sky2->hw, sky2->port, GM_SERIAL_MODE); + reg &= ~GM_SMOD_IPG_MSK; + if (sky2->speed > SPEED_100) + reg |= IPG_DATA_VAL(IPG_DATA_DEF_1000); + else + reg |= IPG_DATA_VAL(IPG_DATA_DEF_10_100); + gma_write16(sky2->hw, sky2->port, GM_SERIAL_MODE, reg); +} + +/* Enable Rx/Tx */ +static void sky2_enable_rx_tx(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + u16 reg; + + reg = gma_read16(hw, port, GM_GP_CTRL); + reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; + gma_write16(hw, port, GM_GP_CTRL, reg); +} + +/* Force a renegotiation */ +static void sky2_phy_reinit(struct sky2_port *sky2) +{ + spin_lock_bh(&sky2->phy_lock); + sky2_phy_init(sky2->hw, sky2->port); + sky2_enable_rx_tx(sky2); + spin_unlock_bh(&sky2->phy_lock); +} + +/* Put device in state to listen for Wake On Lan */ +static void sky2_wol_init(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + enum flow_control save_mode; + u16 ctrl; + + /* Bring hardware out of reset */ + sky2_write16(hw, B0_CTST, CS_RST_CLR); + sky2_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR); + + sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); + + /* Force to 10/100 + * sky2_reset will re-enable on resume + */ + save_mode = sky2->flow_mode; + ctrl = sky2->advertising; + + sky2->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full); + sky2->flow_mode = FC_NONE; + + spin_lock_bh(&sky2->phy_lock); + sky2_phy_power_up(hw, port); + sky2_phy_init(hw, port); + spin_unlock_bh(&sky2->phy_lock); + + sky2->flow_mode = save_mode; + sky2->advertising = ctrl; + + /* Set GMAC to no flow control and auto update for speed/duplex */ + gma_write16(hw, port, GM_GP_CTRL, + GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA| + GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS); + + /* Set WOL address */ + memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR), + sky2->netdev->dev_addr, ETH_ALEN); + + /* Turn on appropriate WOL control bits */ + sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT); + ctrl = 0; + if (sky2->wol & WAKE_PHY) + ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT; + else + ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT; + + if (sky2->wol & WAKE_MAGIC) + ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT; + else + ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT; + + ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT; + sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl); + + /* Disable PiG firmware */ + sky2_write16(hw, B0_CTST, Y2_HW_WOL_OFF); + + /* Needed by some broken BIOSes, use PCI rather than PCI-e for WOL */ + if (legacy_pme) { + u32 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); + reg1 |= PCI_Y2_PME_LEGACY; + sky2_pci_write32(hw, PCI_DEV_REG1, reg1); + } + + /* block receiver */ + sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); + sky2_read32(hw, B0_CTST); +} + +static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port) +{ + struct net_device *dev = hw->dev[port]; + + if ( (hw->chip_id == CHIP_ID_YUKON_EX && + hw->chip_rev != CHIP_REV_YU_EX_A0) || + hw->chip_id >= CHIP_ID_YUKON_FE_P) { + /* Yukon-Extreme B0 and further Extreme devices */ + sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA); + } else if (dev->mtu > ETH_DATA_LEN) { + /* set Tx GMAC FIFO Almost Empty Threshold */ + sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR), + (ECU_JUMBO_WM << 16) | ECU_AE_THR); + + sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS); + } else + sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA); +} + +static void sky2_mac_init(struct sky2_hw *hw, unsigned port) +{ + struct sky2_port *sky2 = netdev_priv(hw->dev[port]); + u16 reg; + u32 rx_reg; + int i; + const u8 *addr = hw->dev[port]->dev_addr; + + sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); + sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); + + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); + + if (hw->chip_id == CHIP_ID_YUKON_XL && + hw->chip_rev == CHIP_REV_YU_XL_A0 && + port == 1) { + /* WA DEV_472 -- looks like crossed wires on port 2 */ + /* clear GMAC 1 Control reset */ + sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR); + do { + sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_SET); + sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_CLR); + } while (gm_phy_read(hw, 1, PHY_MARV_ID0) != PHY_MARV_ID0_VAL || + gm_phy_read(hw, 1, PHY_MARV_ID1) != PHY_MARV_ID1_Y2 || + gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0); + } + + sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC)); + + /* Enable Transmit FIFO Underrun */ + sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK); + + spin_lock_bh(&sky2->phy_lock); + sky2_phy_power_up(hw, port); + sky2_phy_init(hw, port); + spin_unlock_bh(&sky2->phy_lock); + + /* MIB clear */ + reg = gma_read16(hw, port, GM_PHY_ADDR); + gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR); + + for (i = GM_MIB_CNT_BASE; i <= GM_MIB_CNT_END; i += 4) + gma_read16(hw, port, i); + gma_write16(hw, port, GM_PHY_ADDR, reg); + + /* transmit control */ + gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); + + /* receive control reg: unicast + multicast + no FCS */ + gma_write16(hw, port, GM_RX_CTRL, + GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA); + + /* transmit flow control */ + gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff); + + /* transmit parameter */ + gma_write16(hw, port, GM_TX_PARAM, + TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | + TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | + TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | + TX_BACK_OFF_LIM(TX_BOF_LIM_DEF)); + + /* serial mode register */ + reg = DATA_BLIND_VAL(DATA_BLIND_DEF) | + GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF_1000); + + if (hw->dev[port]->mtu > ETH_DATA_LEN) + reg |= GM_SMOD_JUMBO_ENA; + + if (hw->chip_id == CHIP_ID_YUKON_EC_U && + hw->chip_rev == CHIP_REV_YU_EC_U_B1) + reg |= GM_NEW_FLOW_CTRL; + + gma_write16(hw, port, GM_SERIAL_MODE, reg); + + /* virtual address for data */ + gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr); + + /* physical address: used for pause frames */ + gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr); + + /* ignore counter overflows */ + gma_write16(hw, port, GM_TX_IRQ_MSK, 0); + gma_write16(hw, port, GM_RX_IRQ_MSK, 0); + gma_write16(hw, port, GM_TR_IRQ_MSK, 0); + + /* Configure Rx MAC FIFO */ + sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); + rx_reg = GMF_OPER_ON | GMF_RX_F_FL_ON; + if (hw->chip_id == CHIP_ID_YUKON_EX || + hw->chip_id == CHIP_ID_YUKON_FE_P) + rx_reg |= GMF_RX_OVER_ON; + + sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg); + + if (hw->chip_id == CHIP_ID_YUKON_XL) { + /* Hardware errata - clear flush mask */ + sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), 0); + } else { + /* Flush Rx MAC FIFO on any flow control or error */ + sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR); + } + + /* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug */ + reg = RX_GMF_FL_THR_DEF + 1; + /* Another magic mystery workaround from sk98lin */ + if (hw->chip_id == CHIP_ID_YUKON_FE_P && + hw->chip_rev == CHIP_REV_YU_FE2_A0) + reg = 0x178; + sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), reg); + + /* Configure Tx MAC FIFO */ + sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); + sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); + + /* On chips without ram buffer, pause is controlled by MAC level */ + if (!(hw->flags & SKY2_HW_RAM_BUFFER)) { + /* Pause threshold is scaled by 8 in bytes */ + if (hw->chip_id == CHIP_ID_YUKON_FE_P && + hw->chip_rev == CHIP_REV_YU_FE2_A0) + reg = 1568 / 8; + else + reg = 1024 / 8; + sky2_write16(hw, SK_REG(port, RX_GMF_UP_THR), reg); + sky2_write16(hw, SK_REG(port, RX_GMF_LP_THR), 768 / 8); + + sky2_set_tx_stfwd(hw, port); + } + + if (hw->chip_id == CHIP_ID_YUKON_FE_P && + hw->chip_rev == CHIP_REV_YU_FE2_A0) { + /* disable dynamic watermark */ + reg = sky2_read16(hw, SK_REG(port, TX_GMF_EA)); + reg &= ~TX_DYN_WM_ENA; + sky2_write16(hw, SK_REG(port, TX_GMF_EA), reg); + } +} + +/* Assign Ram Buffer allocation to queue */ +static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space) +{ + u32 end; + + /* convert from K bytes to qwords used for hw register */ + start *= 1024/8; + space *= 1024/8; + end = start + space - 1; + + sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); + sky2_write32(hw, RB_ADDR(q, RB_START), start); + sky2_write32(hw, RB_ADDR(q, RB_END), end); + sky2_write32(hw, RB_ADDR(q, RB_WP), start); + sky2_write32(hw, RB_ADDR(q, RB_RP), start); + + if (q == Q_R1 || q == Q_R2) { + u32 tp = space - space/4; + + /* On receive queue's set the thresholds + * give receiver priority when > 3/4 full + * send pause when down to 2K + */ + sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp); + sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2); + + tp = space - 8192/8; + sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp); + sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4); + } else { + /* Enable store & forward on Tx queue's because + * Tx FIFO is only 1K on Yukon + */ + sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD); + } + + sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD); + sky2_read8(hw, RB_ADDR(q, RB_CTRL)); +} + +/* Setup Bus Memory Interface */ +static void sky2_qset(struct sky2_hw *hw, u16 q) +{ + sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET); + sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT); + sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON); + sky2_write32(hw, Q_ADDR(q, Q_WM), BMU_WM_DEFAULT); +} + +/* Setup prefetch unit registers. This is the interface between + * hardware and driver list elements + */ +static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr, + dma_addr_t addr, u32 last) +{ + sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); + sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR); + sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), upper_32_bits(addr)); + sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), lower_32_bits(addr)); + sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last); + sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON); + + sky2_read32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL)); +} + +static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot) +{ + struct sky2_tx_le *le = sky2->tx_le + *slot; + + *slot = RING_NEXT(*slot, sky2->tx_ring_size); + le->ctrl = 0; + return le; +} + +static void tx_init(struct sky2_port *sky2) +{ + struct sky2_tx_le *le; + + sky2->tx_prod = sky2->tx_cons = 0; + sky2->tx_tcpsum = 0; + sky2->tx_last_mss = 0; + netdev_reset_queue(sky2->netdev); + + le = get_tx_le(sky2, &sky2->tx_prod); + le->addr = 0; + le->opcode = OP_ADDR64 | HW_OWNER; + sky2->tx_last_upper = 0; +} + +/* Update chip's next pointer */ +static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx) +{ + /* Make sure write' to descriptors are complete before we tell hardware */ + wmb(); + sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx); +} + + +static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2) +{ + struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put; + sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE); + le->ctrl = 0; + return le; +} + +static unsigned sky2_get_rx_threshold(struct sky2_port *sky2) +{ + unsigned size; + + /* Space needed for frame data + headers rounded up */ + size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8); + + /* Stopping point for hardware truncation */ + return (size - 8) / sizeof(u32); +} + +static unsigned sky2_get_rx_data_size(struct sky2_port *sky2) +{ + struct rx_ring_info *re; + unsigned size; + + /* Space needed for frame data + headers rounded up */ + size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8); + + sky2->rx_nfrags = size >> PAGE_SHIFT; + BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr)); + + /* Compute residue after pages */ + size -= sky2->rx_nfrags << PAGE_SHIFT; + + /* Optimize to handle small packets and headers */ + if (size < copybreak) + size = copybreak; + if (size < ETH_HLEN) + size = ETH_HLEN; + + return size; +} + +/* Build description to hardware for one receive segment */ +static void sky2_rx_add(struct sky2_port *sky2, u8 op, + dma_addr_t map, unsigned len) +{ + struct sky2_rx_le *le; + + if (sizeof(dma_addr_t) > sizeof(u32)) { + le = sky2_next_rx(sky2); + le->addr = cpu_to_le32(upper_32_bits(map)); + le->opcode = OP_ADDR64 | HW_OWNER; + } + + le = sky2_next_rx(sky2); + le->addr = cpu_to_le32(lower_32_bits(map)); + le->length = cpu_to_le16(len); + le->opcode = op | HW_OWNER; +} + +/* Build description to hardware for one possibly fragmented skb */ +static void sky2_rx_submit(struct sky2_port *sky2, + const struct rx_ring_info *re) +{ + int i; + + sky2_rx_add(sky2, OP_PACKET, re->data_addr, sky2->rx_data_size); + + for (i = 0; i < skb_shinfo(re->skb)->nr_frags; i++) + sky2_rx_add(sky2, OP_BUFFER, re->frag_addr[i], PAGE_SIZE); +} + + +static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re, + unsigned size) +{ + struct sk_buff *skb = re->skb; + int i; + + re->data_addr = dma_map_single(&pdev->dev, skb->data, size, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, re->data_addr)) + goto mapping_error; + + dma_unmap_len_set(re, data_size, size); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + re->frag_addr[i] = skb_frag_dma_map(&pdev->dev, frag, 0, + skb_frag_size(frag), + DMA_FROM_DEVICE); + + if (dma_mapping_error(&pdev->dev, re->frag_addr[i])) + goto map_page_error; + } + return 0; + +map_page_error: + while (--i >= 0) { + dma_unmap_page(&pdev->dev, re->frag_addr[i], + skb_frag_size(&skb_shinfo(skb)->frags[i]), + DMA_FROM_DEVICE); + } + + dma_unmap_single(&pdev->dev, re->data_addr, + dma_unmap_len(re, data_size), DMA_FROM_DEVICE); + +mapping_error: + if (net_ratelimit()) + dev_warn(&pdev->dev, "%s: rx mapping error\n", + skb->dev->name); + return -EIO; +} + +static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re) +{ + struct sk_buff *skb = re->skb; + int i; + + dma_unmap_single(&pdev->dev, re->data_addr, + dma_unmap_len(re, data_size), DMA_FROM_DEVICE); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) + dma_unmap_page(&pdev->dev, re->frag_addr[i], + skb_frag_size(&skb_shinfo(skb)->frags[i]), + DMA_FROM_DEVICE); +} + +/* Tell chip where to start receive checksum. + * Actually has two checksums, but set both same to avoid possible byte + * order problems. + */ +static void rx_set_checksum(struct sky2_port *sky2) +{ + struct sky2_rx_le *le = sky2_next_rx(sky2); + + le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN); + le->ctrl = 0; + le->opcode = OP_TCPSTART | HW_OWNER; + + sky2_write32(sky2->hw, + Q_ADDR(rxqaddr[sky2->port], Q_CSR), + (sky2->netdev->features & NETIF_F_RXCSUM) + ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); +} + +/* Enable/disable receive hash calculation (RSS) */ +static void rx_set_rss(struct net_device *dev, netdev_features_t features) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + int i, nkeys = 4; + + /* Supports IPv6 and other modes */ + if (hw->flags & SKY2_HW_NEW_LE) { + nkeys = 10; + sky2_write32(hw, SK_REG(sky2->port, RSS_CFG), HASH_ALL); + } + + /* Program RSS initial values */ + if (features & NETIF_F_RXHASH) { + u32 rss_key[10]; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + for (i = 0; i < nkeys; i++) + sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4), + rss_key[i]); + + /* Need to turn on (undocumented) flag to make hashing work */ + sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), + RX_STFW_ENA); + + sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), + BMU_ENA_RX_RSS_HASH); + } else + sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), + BMU_DIS_RX_RSS_HASH); +} + +/* + * The RX Stop command will not work for Yukon-2 if the BMU does not + * reach the end of packet and since we can't make sure that we have + * incoming data, we must reset the BMU while it is not doing a DMA + * transfer. Since it is possible that the RX path is still active, + * the RX RAM buffer will be stopped first, so any possible incoming + * data will not trigger a DMA. After the RAM buffer is stopped, the + * BMU is polled until any DMA in progress is ended and only then it + * will be reset. + */ +static void sky2_rx_stop(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + unsigned rxq = rxqaddr[sky2->port]; + int i; + + /* disable the RAM Buffer receive queue */ + sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD); + + for (i = 0; i < 0xffff; i++) + if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL)) + == sky2_read8(hw, RB_ADDR(rxq, Q_RL))) + goto stopped; + + netdev_warn(sky2->netdev, "receiver stop failed\n"); +stopped: + sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST); + + /* reset the Rx prefetch unit */ + sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); +} + +/* Clean out receive buffer area, assumes receiver hardware stopped */ +static void sky2_rx_clean(struct sky2_port *sky2) +{ + unsigned i; + + if (sky2->rx_le) + memset(sky2->rx_le, 0, RX_LE_BYTES); + + for (i = 0; i < sky2->rx_pending; i++) { + struct rx_ring_info *re = sky2->rx_ring + i; + + if (re->skb) { + sky2_rx_unmap_skb(sky2->hw->pdev, re); + kfree_skb(re->skb); + re->skb = NULL; + } + } +} + +/* Basic MII support */ +static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *data = if_mii(ifr); + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + int err = -EOPNOTSUPP; + + if (!netif_running(dev)) + return -ENODEV; /* Phy still in reset */ + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = PHY_ADDR_MARV; + + fallthrough; + case SIOCGMIIREG: { + u16 val = 0; + + spin_lock_bh(&sky2->phy_lock); + err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val); + spin_unlock_bh(&sky2->phy_lock); + + data->val_out = val; + break; + } + + case SIOCSMIIREG: + spin_lock_bh(&sky2->phy_lock); + err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f, + data->val_in); + spin_unlock_bh(&sky2->phy_lock); + break; + } + return err; +} + +#define SKY2_VLAN_OFFLOADS (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO) + +static void sky2_vlan_mode(struct net_device *dev, netdev_features_t features) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + u16 port = sky2->port; + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), + RX_VLAN_STRIP_ON); + else + sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), + RX_VLAN_STRIP_OFF); + + if (features & NETIF_F_HW_VLAN_CTAG_TX) { + sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), + TX_VLAN_TAG_ON); + + dev->vlan_features |= SKY2_VLAN_OFFLOADS; + } else { + sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), + TX_VLAN_TAG_OFF); + + /* Can't do transmit offload of vlan without hw vlan */ + dev->vlan_features &= ~SKY2_VLAN_OFFLOADS; + } +} + +/* Amount of required worst case padding in rx buffer */ +static inline unsigned sky2_rx_pad(const struct sky2_hw *hw) +{ + return (hw->flags & SKY2_HW_RAM_BUFFER) ? 8 : 2; +} + +/* + * Allocate an skb for receiving. If the MTU is large enough + * make the skb non-linear with a fragment list of pages. + */ +static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2, gfp_t gfp) +{ + struct sk_buff *skb; + int i; + + skb = __netdev_alloc_skb(sky2->netdev, + sky2->rx_data_size + sky2_rx_pad(sky2->hw), + gfp); + if (!skb) + goto nomem; + + if (sky2->hw->flags & SKY2_HW_RAM_BUFFER) { + unsigned char *start; + /* + * Workaround for a bug in FIFO that cause hang + * if the FIFO if the receive buffer is not 64 byte aligned. + * The buffer returned from netdev_alloc_skb is + * aligned except if slab debugging is enabled. + */ + start = PTR_ALIGN(skb->data, 8); + skb_reserve(skb, start - skb->data); + } else + skb_reserve(skb, NET_IP_ALIGN); + + for (i = 0; i < sky2->rx_nfrags; i++) { + struct page *page = alloc_page(gfp); + + if (!page) + goto free_partial; + skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE); + } + + return skb; +free_partial: + kfree_skb(skb); +nomem: + return NULL; +} + +static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq) +{ + sky2_put_idx(sky2->hw, rxq, sky2->rx_put); +} + +static int sky2_alloc_rx_skbs(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + unsigned i; + + sky2->rx_data_size = sky2_get_rx_data_size(sky2); + + /* Fill Rx ring */ + for (i = 0; i < sky2->rx_pending; i++) { + struct rx_ring_info *re = sky2->rx_ring + i; + + re->skb = sky2_rx_alloc(sky2, GFP_KERNEL); + if (!re->skb) + return -ENOMEM; + + if (sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size)) { + dev_kfree_skb(re->skb); + re->skb = NULL; + return -ENOMEM; + } + } + return 0; +} + +/* + * Setup receiver buffer pool. + * Normal case this ends up creating one list element for skb + * in the receive ring. Worst case if using large MTU and each + * allocation falls on a different 64 bit region, that results + * in 6 list elements per ring entry. + * One element is used for checksum enable/disable, and one + * extra to avoid wrap. + */ +static void sky2_rx_start(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + struct rx_ring_info *re; + unsigned rxq = rxqaddr[sky2->port]; + unsigned i, thresh; + + sky2->rx_put = sky2->rx_next = 0; + sky2_qset(hw, rxq); + + /* On PCI express lowering the watermark gives better performance */ + if (pci_is_pcie(hw->pdev)) + sky2_write32(hw, Q_ADDR(rxq, Q_WM), BMU_WM_PEX); + + /* These chips have no ram buffer? + * MAC Rx RAM Read is controlled by hardware */ + if (hw->chip_id == CHIP_ID_YUKON_EC_U && + hw->chip_rev > CHIP_REV_YU_EC_U_A0) + sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS); + + sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1); + + if (!(hw->flags & SKY2_HW_NEW_LE)) + rx_set_checksum(sky2); + + if (!(hw->flags & SKY2_HW_RSS_BROKEN)) + rx_set_rss(sky2->netdev, sky2->netdev->features); + + /* submit Rx ring */ + for (i = 0; i < sky2->rx_pending; i++) { + re = sky2->rx_ring + i; + sky2_rx_submit(sky2, re); + } + + /* + * The receiver hangs if it receives frames larger than the + * packet buffer. As a workaround, truncate oversize frames, but + * the register is limited to 9 bits, so if you do frames > 2052 + * you better get the MTU right! + */ + thresh = sky2_get_rx_threshold(sky2); + if (thresh > 0x1ff) + sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF); + else { + sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), thresh); + sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON); + } + + /* Tell chip about available buffers */ + sky2_rx_update(sky2, rxq); + + if (hw->chip_id == CHIP_ID_YUKON_EX || + hw->chip_id == CHIP_ID_YUKON_SUPR) { + /* + * Disable flushing of non ASF packets; + * must be done after initializing the BMUs; + * drivers without ASF support should do this too, otherwise + * it may happen that they cannot run on ASF devices; + * remember that the MAC FIFO isn't reset during initialization. + */ + sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_MACSEC_FLUSH_OFF); + } + + if (hw->chip_id >= CHIP_ID_YUKON_SUPR) { + /* Enable RX Home Address & Routing Header checksum fix */ + sky2_write16(hw, SK_REG(sky2->port, RX_GMF_FL_CTRL), + RX_IPV6_SA_MOB_ENA | RX_IPV6_DA_MOB_ENA); + + /* Enable TX Home Address & Routing Header checksum fix */ + sky2_write32(hw, Q_ADDR(txqaddr[sky2->port], Q_TEST), + TBMU_TEST_HOME_ADD_FIX_EN | TBMU_TEST_ROUTING_ADD_FIX_EN); + } +} + +static int sky2_alloc_buffers(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + + /* must be power of 2 */ + sky2->tx_le = dma_alloc_coherent(&hw->pdev->dev, + sky2->tx_ring_size * sizeof(struct sky2_tx_le), + &sky2->tx_le_map, GFP_KERNEL); + if (!sky2->tx_le) + goto nomem; + + sky2->tx_ring = kcalloc(sky2->tx_ring_size, sizeof(struct tx_ring_info), + GFP_KERNEL); + if (!sky2->tx_ring) + goto nomem; + + sky2->rx_le = dma_alloc_coherent(&hw->pdev->dev, RX_LE_BYTES, + &sky2->rx_le_map, GFP_KERNEL); + if (!sky2->rx_le) + goto nomem; + + sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct rx_ring_info), + GFP_KERNEL); + if (!sky2->rx_ring) + goto nomem; + + return sky2_alloc_rx_skbs(sky2); +nomem: + return -ENOMEM; +} + +static void sky2_free_buffers(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + + sky2_rx_clean(sky2); + + if (sky2->rx_le) { + dma_free_coherent(&hw->pdev->dev, RX_LE_BYTES, sky2->rx_le, + sky2->rx_le_map); + sky2->rx_le = NULL; + } + if (sky2->tx_le) { + dma_free_coherent(&hw->pdev->dev, + sky2->tx_ring_size * sizeof(struct sky2_tx_le), + sky2->tx_le, sky2->tx_le_map); + sky2->tx_le = NULL; + } + kfree(sky2->tx_ring); + kfree(sky2->rx_ring); + + sky2->tx_ring = NULL; + sky2->rx_ring = NULL; +} + +static void sky2_hw_up(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + u32 ramsize; + int cap; + struct net_device *otherdev = hw->dev[sky2->port^1]; + + tx_init(sky2); + + /* + * On dual port PCI-X card, there is an problem where status + * can be received out of order due to split transactions + */ + if (otherdev && netif_running(otherdev) && + (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) { + u16 cmd; + + cmd = sky2_pci_read16(hw, cap + PCI_X_CMD); + cmd &= ~PCI_X_CMD_MAX_SPLIT; + sky2_pci_write16(hw, cap + PCI_X_CMD, cmd); + } + + sky2_mac_init(hw, port); + + /* Register is number of 4K blocks on internal RAM buffer. */ + ramsize = sky2_read8(hw, B2_E_0) * 4; + if (ramsize > 0) { + u32 rxspace; + + netdev_dbg(sky2->netdev, "ram buffer %dK\n", ramsize); + if (ramsize < 16) + rxspace = ramsize / 2; + else + rxspace = 8 + (2*(ramsize - 16))/3; + + sky2_ramset(hw, rxqaddr[port], 0, rxspace); + sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace); + + /* Make sure SyncQ is disabled */ + sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL), + RB_RST_SET); + } + + sky2_qset(hw, txqaddr[port]); + + /* This is copied from sk98lin 10.0.5.3; no one tells me about erratta's */ + if (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev == CHIP_REV_YU_EX_B0) + sky2_write32(hw, Q_ADDR(txqaddr[port], Q_TEST), F_TX_CHK_AUTO_OFF); + + /* Set almost empty threshold */ + if (hw->chip_id == CHIP_ID_YUKON_EC_U && + hw->chip_rev == CHIP_REV_YU_EC_U_A0) + sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), ECU_TXFF_LEV); + + sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, + sky2->tx_ring_size - 1); + + sky2_vlan_mode(sky2->netdev, sky2->netdev->features); + netdev_update_features(sky2->netdev); + + sky2_rx_start(sky2); +} + +/* Setup device IRQ and enable napi to process */ +static int sky2_setup_irq(struct sky2_hw *hw, const char *name) +{ + struct pci_dev *pdev = hw->pdev; + int err; + + err = request_irq(pdev->irq, sky2_intr, + (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED, + name, hw); + if (err) + dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); + else { + hw->flags |= SKY2_HW_IRQ_SETUP; + + napi_enable(&hw->napi); + sky2_write32(hw, B0_IMSK, Y2_IS_BASE); + sky2_read32(hw, B0_IMSK); + } + + return err; +} + + +/* Bring up network interface. */ +static int sky2_open(struct net_device *dev) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + u32 imask; + int err; + + netif_carrier_off(dev); + + err = sky2_alloc_buffers(sky2); + if (err) + goto err_out; + + /* With single port, IRQ is setup when device is brought up */ + if (hw->ports == 1 && (err = sky2_setup_irq(hw, dev->name))) + goto err_out; + + sky2_hw_up(sky2); + + /* Enable interrupts from phy/mac for port */ + imask = sky2_read32(hw, B0_IMSK); + + if (hw->chip_id == CHIP_ID_YUKON_OPT || + hw->chip_id == CHIP_ID_YUKON_PRM || + hw->chip_id == CHIP_ID_YUKON_OP_2) + imask |= Y2_IS_PHY_QLNK; /* enable PHY Quick Link */ + + imask |= portirq_msk[port]; + sky2_write32(hw, B0_IMSK, imask); + sky2_read32(hw, B0_IMSK); + + netif_info(sky2, ifup, dev, "enabling interface\n"); + + return 0; + +err_out: + sky2_free_buffers(sky2); + return err; +} + +/* Modular subtraction in ring */ +static inline int tx_inuse(const struct sky2_port *sky2) +{ + return (sky2->tx_prod - sky2->tx_cons) & (sky2->tx_ring_size - 1); +} + +/* Number of list elements available for next tx */ +static inline int tx_avail(const struct sky2_port *sky2) +{ + return sky2->tx_pending - tx_inuse(sky2); +} + +/* Estimate of number of transmit list elements required */ +static unsigned tx_le_req(const struct sk_buff *skb) +{ + unsigned count; + + count = (skb_shinfo(skb)->nr_frags + 1) + * (sizeof(dma_addr_t) / sizeof(u32)); + + if (skb_is_gso(skb)) + ++count; + else if (sizeof(dma_addr_t) == sizeof(u32)) + ++count; /* possible vlan */ + + if (skb->ip_summed == CHECKSUM_PARTIAL) + ++count; + + return count; +} + +static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re) +{ + if (re->flags & TX_MAP_SINGLE) + dma_unmap_single(&pdev->dev, dma_unmap_addr(re, mapaddr), + dma_unmap_len(re, maplen), DMA_TO_DEVICE); + else if (re->flags & TX_MAP_PAGE) + dma_unmap_page(&pdev->dev, dma_unmap_addr(re, mapaddr), + dma_unmap_len(re, maplen), DMA_TO_DEVICE); + re->flags = 0; +} + +/* + * Put one packet in ring for transmit. + * A single packet can generate multiple list elements, and + * the number of ring elements will probably be less than the number + * of list elements used. + */ +static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb, + struct net_device *dev) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + struct sky2_tx_le *le = NULL; + struct tx_ring_info *re; + unsigned i, len; + dma_addr_t mapping; + u32 upper; + u16 slot; + u16 mss; + u8 ctrl; + + if (unlikely(tx_avail(sky2) < tx_le_req(skb))) + return NETDEV_TX_BUSY; + + len = skb_headlen(skb); + mapping = dma_map_single(&hw->pdev->dev, skb->data, len, + DMA_TO_DEVICE); + + if (dma_mapping_error(&hw->pdev->dev, mapping)) + goto mapping_error; + + slot = sky2->tx_prod; + netif_printk(sky2, tx_queued, KERN_DEBUG, dev, + "tx queued, slot %u, len %d\n", slot, skb->len); + + /* Send high bits if needed */ + upper = upper_32_bits(mapping); + if (upper != sky2->tx_last_upper) { + le = get_tx_le(sky2, &slot); + le->addr = cpu_to_le32(upper); + sky2->tx_last_upper = upper; + le->opcode = OP_ADDR64 | HW_OWNER; + } + + /* Check for TCP Segmentation Offload */ + mss = skb_shinfo(skb)->gso_size; + if (mss != 0) { + + if (!(hw->flags & SKY2_HW_NEW_LE)) + mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); + + if (mss != sky2->tx_last_mss) { + le = get_tx_le(sky2, &slot); + le->addr = cpu_to_le32(mss); + + if (hw->flags & SKY2_HW_NEW_LE) + le->opcode = OP_MSS | HW_OWNER; + else + le->opcode = OP_LRGLEN | HW_OWNER; + sky2->tx_last_mss = mss; + } + } + + ctrl = 0; + + /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */ + if (skb_vlan_tag_present(skb)) { + if (!le) { + le = get_tx_le(sky2, &slot); + le->addr = 0; + le->opcode = OP_VLAN|HW_OWNER; + } else + le->opcode |= OP_VLAN; + le->length = cpu_to_be16(skb_vlan_tag_get(skb)); + ctrl |= INS_VLAN; + } + + /* Handle TCP checksum offload */ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + /* On Yukon EX (some versions) encoding change. */ + if (hw->flags & SKY2_HW_AUTO_TX_SUM) + ctrl |= CALSUM; /* auto checksum */ + else { + const unsigned offset = skb_transport_offset(skb); + u32 tcpsum; + + tcpsum = offset << 16; /* sum start */ + tcpsum |= offset + skb->csum_offset; /* sum write */ + + ctrl |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; + if (ip_hdr(skb)->protocol == IPPROTO_UDP) + ctrl |= UDPTCP; + + if (tcpsum != sky2->tx_tcpsum) { + sky2->tx_tcpsum = tcpsum; + + le = get_tx_le(sky2, &slot); + le->addr = cpu_to_le32(tcpsum); + le->length = 0; /* initial checksum value */ + le->ctrl = 1; /* one packet */ + le->opcode = OP_TCPLISW | HW_OWNER; + } + } + } + + re = sky2->tx_ring + slot; + re->flags = TX_MAP_SINGLE; + dma_unmap_addr_set(re, mapaddr, mapping); + dma_unmap_len_set(re, maplen, len); + + le = get_tx_le(sky2, &slot); + le->addr = cpu_to_le32(lower_32_bits(mapping)); + le->length = cpu_to_le16(len); + le->ctrl = ctrl; + le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER); + + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + mapping = skb_frag_dma_map(&hw->pdev->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + + if (dma_mapping_error(&hw->pdev->dev, mapping)) + goto mapping_unwind; + + upper = upper_32_bits(mapping); + if (upper != sky2->tx_last_upper) { + le = get_tx_le(sky2, &slot); + le->addr = cpu_to_le32(upper); + sky2->tx_last_upper = upper; + le->opcode = OP_ADDR64 | HW_OWNER; + } + + re = sky2->tx_ring + slot; + re->flags = TX_MAP_PAGE; + dma_unmap_addr_set(re, mapaddr, mapping); + dma_unmap_len_set(re, maplen, skb_frag_size(frag)); + + le = get_tx_le(sky2, &slot); + le->addr = cpu_to_le32(lower_32_bits(mapping)); + le->length = cpu_to_le16(skb_frag_size(frag)); + le->ctrl = ctrl; + le->opcode = OP_BUFFER | HW_OWNER; + } + + re->skb = skb; + le->ctrl |= EOP; + + sky2->tx_prod = slot; + + if (tx_avail(sky2) <= MAX_SKB_TX_LE) + netif_stop_queue(dev); + + netdev_sent_queue(dev, skb->len); + sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod); + + return NETDEV_TX_OK; + +mapping_unwind: + for (i = sky2->tx_prod; i != slot; i = RING_NEXT(i, sky2->tx_ring_size)) { + re = sky2->tx_ring + i; + + sky2_tx_unmap(hw->pdev, re); + } + +mapping_error: + if (net_ratelimit()) + dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +/* + * Free ring elements from starting at tx_cons until "done" + * + * NB: + * 1. The hardware will tell us about partial completion of multi-part + * buffers so make sure not to free skb to early. + * 2. This may run in parallel start_xmit because the it only + * looks at the tail of the queue of FIFO (tx_cons), not + * the head (tx_prod) + */ +static void sky2_tx_complete(struct sky2_port *sky2, u16 done) +{ + struct net_device *dev = sky2->netdev; + u16 idx; + unsigned int bytes_compl = 0, pkts_compl = 0; + + BUG_ON(done >= sky2->tx_ring_size); + + for (idx = sky2->tx_cons; idx != done; + idx = RING_NEXT(idx, sky2->tx_ring_size)) { + struct tx_ring_info *re = sky2->tx_ring + idx; + struct sk_buff *skb = re->skb; + + sky2_tx_unmap(sky2->hw->pdev, re); + + if (skb) { + netif_printk(sky2, tx_done, KERN_DEBUG, dev, + "tx done %u\n", idx); + + pkts_compl++; + bytes_compl += skb->len; + + re->skb = NULL; + dev_kfree_skb_any(skb); + + sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size); + } + } + + sky2->tx_cons = idx; + smp_mb(); + + netdev_completed_queue(dev, pkts_compl, bytes_compl); + + u64_stats_update_begin(&sky2->tx_stats.syncp); + sky2->tx_stats.packets += pkts_compl; + sky2->tx_stats.bytes += bytes_compl; + u64_stats_update_end(&sky2->tx_stats.syncp); +} + +static void sky2_tx_reset(struct sky2_hw *hw, unsigned port) +{ + /* Disable Force Sync bit and Enable Alloc bit */ + sky2_write8(hw, SK_REG(port, TXA_CTRL), + TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); + + /* Stop Interval Timer and Limit Counter of Tx Arbiter */ + sky2_write32(hw, SK_REG(port, TXA_ITI_INI), 0L); + sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L); + + /* Reset the PCI FIFO of the async Tx queue */ + sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), + BMU_RST_SET | BMU_FIFO_RST); + + /* Reset the Tx prefetch units */ + sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL), + PREF_UNIT_RST_SET); + + sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET); + sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); + + sky2_read32(hw, B0_CTST); +} + +static void sky2_hw_down(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + u16 ctrl; + + /* Force flow control off */ + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); + + /* Stop transmitter */ + sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP); + sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR)); + + sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), + RB_RST_SET | RB_DIS_OP_MD); + + ctrl = gma_read16(hw, port, GM_GP_CTRL); + ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA); + gma_write16(hw, port, GM_GP_CTRL, ctrl); + + sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); + + /* Workaround shared GMAC reset */ + if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && + port == 0 && hw->dev[1] && netif_running(hw->dev[1]))) + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); + + sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); + + /* Force any delayed status interrupt and NAPI */ + sky2_write32(hw, STAT_LEV_TIMER_CNT, 0); + sky2_write32(hw, STAT_TX_TIMER_CNT, 0); + sky2_write32(hw, STAT_ISR_TIMER_CNT, 0); + sky2_read8(hw, STAT_ISR_TIMER_CTRL); + + sky2_rx_stop(sky2); + + spin_lock_bh(&sky2->phy_lock); + sky2_phy_power_down(hw, port); + spin_unlock_bh(&sky2->phy_lock); + + sky2_tx_reset(hw, port); + + /* Free any pending frames stuck in HW queue */ + sky2_tx_complete(sky2, sky2->tx_prod); +} + +/* Network shutdown */ +static int sky2_close(struct net_device *dev) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + + /* Never really got started! */ + if (!sky2->tx_le) + return 0; + + netif_info(sky2, ifdown, dev, "disabling interface\n"); + + if (hw->ports == 1) { + sky2_write32(hw, B0_IMSK, 0); + sky2_read32(hw, B0_IMSK); + + napi_disable(&hw->napi); + free_irq(hw->pdev->irq, hw); + hw->flags &= ~SKY2_HW_IRQ_SETUP; + } else { + u32 imask; + + /* Disable port IRQ */ + imask = sky2_read32(hw, B0_IMSK); + imask &= ~portirq_msk[sky2->port]; + sky2_write32(hw, B0_IMSK, imask); + sky2_read32(hw, B0_IMSK); + + synchronize_irq(hw->pdev->irq); + napi_synchronize(&hw->napi); + } + + sky2_hw_down(sky2); + + sky2_free_buffers(sky2); + + return 0; +} + +static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux) +{ + if (hw->flags & SKY2_HW_FIBRE_PHY) + return SPEED_1000; + + if (!(hw->flags & SKY2_HW_GIGABIT)) { + if (aux & PHY_M_PS_SPEED_100) + return SPEED_100; + else + return SPEED_10; + } + + switch (aux & PHY_M_PS_SPEED_MSK) { + case PHY_M_PS_SPEED_1000: + return SPEED_1000; + case PHY_M_PS_SPEED_100: + return SPEED_100; + default: + return SPEED_10; + } +} + +static void sky2_link_up(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + static const char *fc_name[] = { + [FC_NONE] = "none", + [FC_TX] = "tx", + [FC_RX] = "rx", + [FC_BOTH] = "both", + }; + + sky2_set_ipg(sky2); + + sky2_enable_rx_tx(sky2); + + gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); + + netif_carrier_on(sky2->netdev); + + mod_timer(&hw->watchdog_timer, jiffies + 1); + + /* Turn on link LED */ + sky2_write8(hw, SK_REG(port, LNK_LED_REG), + LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF); + + netif_info(sky2, link, sky2->netdev, + "Link is up at %d Mbps, %s duplex, flow control %s\n", + sky2->speed, + sky2->duplex == DUPLEX_FULL ? "full" : "half", + fc_name[sky2->flow_status]); +} + +static void sky2_link_down(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + u16 reg; + + gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0); + + reg = gma_read16(hw, port, GM_GP_CTRL); + reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); + gma_write16(hw, port, GM_GP_CTRL, reg); + + netif_carrier_off(sky2->netdev); + + /* Turn off link LED */ + sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF); + + netif_info(sky2, link, sky2->netdev, "Link is down\n"); + + sky2_phy_init(hw, port); +} + +static enum flow_control sky2_flow(int rx, int tx) +{ + if (rx) + return tx ? FC_BOTH : FC_RX; + else + return tx ? FC_TX : FC_NONE; +} + +static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux) +{ + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + u16 advert, lpa; + + advert = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV); + lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP); + if (lpa & PHY_M_AN_RF) { + netdev_err(sky2->netdev, "remote fault\n"); + return -1; + } + + if (!(aux & PHY_M_PS_SPDUP_RES)) { + netdev_err(sky2->netdev, "speed/duplex mismatch\n"); + return -1; + } + + sky2->speed = sky2_phy_speed(hw, aux); + sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; + + /* Since the pause result bits seem to in different positions on + * different chips. look at registers. + */ + if (hw->flags & SKY2_HW_FIBRE_PHY) { + /* Shift for bits in fiber PHY */ + advert &= ~(ADVERTISE_PAUSE_CAP|ADVERTISE_PAUSE_ASYM); + lpa &= ~(LPA_PAUSE_CAP|LPA_PAUSE_ASYM); + + if (advert & ADVERTISE_1000XPAUSE) + advert |= ADVERTISE_PAUSE_CAP; + if (advert & ADVERTISE_1000XPSE_ASYM) + advert |= ADVERTISE_PAUSE_ASYM; + if (lpa & LPA_1000XPAUSE) + lpa |= LPA_PAUSE_CAP; + if (lpa & LPA_1000XPAUSE_ASYM) + lpa |= LPA_PAUSE_ASYM; + } + + sky2->flow_status = FC_NONE; + if (advert & ADVERTISE_PAUSE_CAP) { + if (lpa & LPA_PAUSE_CAP) + sky2->flow_status = FC_BOTH; + else if (advert & ADVERTISE_PAUSE_ASYM) + sky2->flow_status = FC_RX; + } else if (advert & ADVERTISE_PAUSE_ASYM) { + if ((lpa & LPA_PAUSE_CAP) && (lpa & LPA_PAUSE_ASYM)) + sky2->flow_status = FC_TX; + } + + if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000 && + !(hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX)) + sky2->flow_status = FC_NONE; + + if (sky2->flow_status & FC_TX) + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); + else + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); + + return 0; +} + +/* Interrupt from PHY */ +static void sky2_phy_intr(struct sky2_hw *hw, unsigned port) +{ + struct net_device *dev = hw->dev[port]; + struct sky2_port *sky2 = netdev_priv(dev); + u16 istatus, phystat; + + if (!netif_running(dev)) + return; + + spin_lock(&sky2->phy_lock); + istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT); + phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT); + + netif_info(sky2, intr, sky2->netdev, "phy interrupt status 0x%x 0x%x\n", + istatus, phystat); + + if (istatus & PHY_M_IS_AN_COMPL) { + if (sky2_autoneg_done(sky2, phystat) == 0 && + !netif_carrier_ok(dev)) + sky2_link_up(sky2); + goto out; + } + + if (istatus & PHY_M_IS_LSP_CHANGE) + sky2->speed = sky2_phy_speed(hw, phystat); + + if (istatus & PHY_M_IS_DUP_CHANGE) + sky2->duplex = + (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; + + if (istatus & PHY_M_IS_LST_CHANGE) { + if (phystat & PHY_M_PS_LINK_UP) + sky2_link_up(sky2); + else + sky2_link_down(sky2); + } +out: + spin_unlock(&sky2->phy_lock); +} + +/* Special quick link interrupt (Yukon-2 Optima only) */ +static void sky2_qlink_intr(struct sky2_hw *hw) +{ + struct sky2_port *sky2 = netdev_priv(hw->dev[0]); + u32 imask; + u16 phy; + + /* disable irq */ + imask = sky2_read32(hw, B0_IMSK); + imask &= ~Y2_IS_PHY_QLNK; + sky2_write32(hw, B0_IMSK, imask); + + /* reset PHY Link Detect */ + phy = sky2_pci_read16(hw, PSM_CONFIG_REG4); + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1); + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + + sky2_link_up(sky2); +} + +/* Transmit timeout is only called if we are running, carrier is up + * and tx queue is full (stopped). + */ +static void sky2_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + + netif_err(sky2, timer, dev, "tx timeout\n"); + + netdev_printk(KERN_DEBUG, dev, "transmit ring %u .. %u report=%u done=%u\n", + sky2->tx_cons, sky2->tx_prod, + sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX), + sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE))); + + /* can't restart safely under softirq */ + schedule_work(&hw->restart_work); +} + +static int sky2_change_mtu(struct net_device *dev, int new_mtu) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + int err; + u16 ctl, mode; + u32 imask; + + if (!netif_running(dev)) { + dev->mtu = new_mtu; + netdev_update_features(dev); + return 0; + } + + imask = sky2_read32(hw, B0_IMSK); + sky2_write32(hw, B0_IMSK, 0); + sky2_read32(hw, B0_IMSK); + + netif_trans_update(dev); /* prevent tx timeout */ + napi_disable(&hw->napi); + netif_tx_disable(dev); + + synchronize_irq(hw->pdev->irq); + + if (!(hw->flags & SKY2_HW_RAM_BUFFER)) + sky2_set_tx_stfwd(hw, port); + + ctl = gma_read16(hw, port, GM_GP_CTRL); + gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA); + sky2_rx_stop(sky2); + sky2_rx_clean(sky2); + + dev->mtu = new_mtu; + netdev_update_features(dev); + + mode = DATA_BLIND_VAL(DATA_BLIND_DEF) | GM_SMOD_VLAN_ENA; + if (sky2->speed > SPEED_100) + mode |= IPG_DATA_VAL(IPG_DATA_DEF_1000); + else + mode |= IPG_DATA_VAL(IPG_DATA_DEF_10_100); + + if (dev->mtu > ETH_DATA_LEN) + mode |= GM_SMOD_JUMBO_ENA; + + gma_write16(hw, port, GM_SERIAL_MODE, mode); + + sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD); + + err = sky2_alloc_rx_skbs(sky2); + if (!err) + sky2_rx_start(sky2); + else + sky2_rx_clean(sky2); + sky2_write32(hw, B0_IMSK, imask); + + sky2_read32(hw, B0_Y2_SP_LISR); + napi_enable(&hw->napi); + + if (err) + dev_close(dev); + else { + gma_write16(hw, port, GM_GP_CTRL, ctl); + + netif_wake_queue(dev); + } + + return err; +} + +static inline bool needs_copy(const struct rx_ring_info *re, + unsigned length) +{ +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + /* Some architectures need the IP header to be aligned */ + if (!IS_ALIGNED(re->data_addr + ETH_HLEN, sizeof(u32))) + return true; +#endif + return length < copybreak; +} + +/* For small just reuse existing skb for next receive */ +static struct sk_buff *receive_copy(struct sky2_port *sky2, + const struct rx_ring_info *re, + unsigned length) +{ + struct sk_buff *skb; + + skb = netdev_alloc_skb_ip_align(sky2->netdev, length); + if (likely(skb)) { + dma_sync_single_for_cpu(&sky2->hw->pdev->dev, re->data_addr, + length, DMA_FROM_DEVICE); + skb_copy_from_linear_data(re->skb, skb->data, length); + skb->ip_summed = re->skb->ip_summed; + skb->csum = re->skb->csum; + skb_copy_hash(skb, re->skb); + __vlan_hwaccel_copy_tag(skb, re->skb); + + dma_sync_single_for_device(&sky2->hw->pdev->dev, + re->data_addr, length, + DMA_FROM_DEVICE); + __vlan_hwaccel_clear_tag(re->skb); + skb_clear_hash(re->skb); + re->skb->ip_summed = CHECKSUM_NONE; + skb_put(skb, length); + } + return skb; +} + +/* Adjust length of skb with fragments to match received data */ +static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, + unsigned int length) +{ + int i, num_frags; + unsigned int size; + + /* put header into skb */ + size = min(length, hdr_space); + skb->tail += size; + skb->len += size; + length -= size; + + num_frags = skb_shinfo(skb)->nr_frags; + for (i = 0; i < num_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + if (length == 0) { + /* don't need this page */ + __skb_frag_unref(frag); + --skb_shinfo(skb)->nr_frags; + } else { + size = min(length, (unsigned) PAGE_SIZE); + + skb_frag_size_set(frag, size); + skb->data_len += size; + skb->truesize += PAGE_SIZE; + skb->len += size; + length -= size; + } + } +} + +/* Normal packet - take skb from ring element and put in a new one */ +static struct sk_buff *receive_new(struct sky2_port *sky2, + struct rx_ring_info *re, + unsigned int length) +{ + struct sk_buff *skb; + struct rx_ring_info nre; + unsigned hdr_space = sky2->rx_data_size; + + nre.skb = sky2_rx_alloc(sky2, GFP_ATOMIC); + if (unlikely(!nre.skb)) + goto nobuf; + + if (sky2_rx_map_skb(sky2->hw->pdev, &nre, hdr_space)) + goto nomap; + + skb = re->skb; + sky2_rx_unmap_skb(sky2->hw->pdev, re); + prefetch(skb->data); + *re = nre; + + if (skb_shinfo(skb)->nr_frags) + skb_put_frags(skb, hdr_space, length); + else + skb_put(skb, length); + return skb; + +nomap: + dev_kfree_skb(nre.skb); +nobuf: + return NULL; +} + +/* + * Receive one packet. + * For larger packets, get new buffer. + */ +static struct sk_buff *sky2_receive(struct net_device *dev, + u16 length, u32 status) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next; + struct sk_buff *skb = NULL; + u16 count = (status & GMR_FS_LEN) >> 16; + + netif_printk(sky2, rx_status, KERN_DEBUG, dev, + "rx slot %u status 0x%x len %d\n", + sky2->rx_next, status, length); + + sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending; + prefetch(sky2->rx_ring + sky2->rx_next); + + if (skb_vlan_tag_present(re->skb)) + count -= VLAN_HLEN; /* Account for vlan tag */ + + /* This chip has hardware problems that generates bogus status. + * So do only marginal checking and expect higher level protocols + * to handle crap frames. + */ + if (sky2->hw->chip_id == CHIP_ID_YUKON_FE_P && + sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0 && + length != count) + goto okay; + + if (status & GMR_FS_ANY_ERR) + goto error; + + if (!(status & GMR_FS_RX_OK)) + goto resubmit; + + /* if length reported by DMA does not match PHY, packet was truncated */ + if (length != count) + goto error; + +okay: + if (needs_copy(re, length)) + skb = receive_copy(sky2, re, length); + else + skb = receive_new(sky2, re, length); + + dev->stats.rx_dropped += (skb == NULL); + +resubmit: + sky2_rx_submit(sky2, re); + + return skb; + +error: + ++dev->stats.rx_errors; + + if (net_ratelimit()) + netif_info(sky2, rx_err, dev, + "rx error, status 0x%x length %d\n", status, length); + + goto resubmit; +} + +/* Transmit complete */ +static inline void sky2_tx_done(struct net_device *dev, u16 last) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + if (netif_running(dev)) { + sky2_tx_complete(sky2, last); + + /* Wake unless it's detached, and called e.g. from sky2_close() */ + if (tx_avail(sky2) > MAX_SKB_TX_LE + 4) + netif_wake_queue(dev); + } +} + +static inline void sky2_skb_rx(const struct sky2_port *sky2, + struct sk_buff *skb) +{ + if (skb->ip_summed == CHECKSUM_NONE) + netif_receive_skb(skb); + else + napi_gro_receive(&sky2->hw->napi, skb); +} + +static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port, + unsigned packets, unsigned bytes) +{ + struct net_device *dev = hw->dev[port]; + struct sky2_port *sky2 = netdev_priv(dev); + + if (packets == 0) + return; + + u64_stats_update_begin(&sky2->rx_stats.syncp); + sky2->rx_stats.packets += packets; + sky2->rx_stats.bytes += bytes; + u64_stats_update_end(&sky2->rx_stats.syncp); + + sky2->last_rx = jiffies; + sky2_rx_update(netdev_priv(dev), rxqaddr[port]); +} + +static void sky2_rx_checksum(struct sky2_port *sky2, u32 status) +{ + /* If this happens then driver assuming wrong format for chip type */ + BUG_ON(sky2->hw->flags & SKY2_HW_NEW_LE); + + /* Both checksum counters are programmed to start at + * the same offset, so unless there is a problem they + * should match. This failure is an early indication that + * hardware receive checksumming won't work. + */ + if (likely((u16)(status >> 16) == (u16)status)) { + struct sk_buff *skb = sky2->rx_ring[sky2->rx_next].skb; + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum = le16_to_cpu(status); + } else { + dev_notice(&sky2->hw->pdev->dev, + "%s: receive checksum problem (status = %#x)\n", + sky2->netdev->name, status); + + /* Disable checksum offload + * It will be reenabled on next ndo_set_features, but if it's + * really broken, will get disabled again + */ + sky2->netdev->features &= ~NETIF_F_RXCSUM; + sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), + BMU_DIS_RX_CHKSUM); + } +} + +static void sky2_rx_tag(struct sky2_port *sky2, u16 length) +{ + struct sk_buff *skb; + + skb = sky2->rx_ring[sky2->rx_next].skb; + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(length)); +} + +static void sky2_rx_hash(struct sky2_port *sky2, u32 status) +{ + struct sk_buff *skb; + + skb = sky2->rx_ring[sky2->rx_next].skb; + skb_set_hash(skb, le32_to_cpu(status), PKT_HASH_TYPE_L3); +} + +/* Process status response ring */ +static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) +{ + int work_done = 0; + unsigned int total_bytes[2] = { 0 }; + unsigned int total_packets[2] = { 0 }; + + if (to_do <= 0) + return work_done; + + rmb(); + do { + struct sky2_port *sky2; + struct sky2_status_le *le = hw->st_le + hw->st_idx; + unsigned port; + struct net_device *dev; + struct sk_buff *skb; + u32 status; + u16 length; + u8 opcode = le->opcode; + + if (!(opcode & HW_OWNER)) + break; + + hw->st_idx = RING_NEXT(hw->st_idx, hw->st_size); + + port = le->css & CSS_LINK_BIT; + dev = hw->dev[port]; + sky2 = netdev_priv(dev); + length = le16_to_cpu(le->length); + status = le32_to_cpu(le->status); + + le->opcode = 0; + switch (opcode & ~HW_OWNER) { + case OP_RXSTAT: + total_packets[port]++; + total_bytes[port] += length; + + skb = sky2_receive(dev, length, status); + if (!skb) + break; + + /* This chip reports checksum status differently */ + if (hw->flags & SKY2_HW_NEW_LE) { + if ((dev->features & NETIF_F_RXCSUM) && + (le->css & (CSS_ISIPV4 | CSS_ISIPV6)) && + (le->css & CSS_TCPUDPCSOK)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; + } + + skb->protocol = eth_type_trans(skb, dev); + sky2_skb_rx(sky2, skb); + + /* Stop after net poll weight */ + if (++work_done >= to_do) + goto exit_loop; + break; + + case OP_RXVLAN: + sky2_rx_tag(sky2, length); + break; + + case OP_RXCHKSVLAN: + sky2_rx_tag(sky2, length); + fallthrough; + case OP_RXCHKS: + if (likely(dev->features & NETIF_F_RXCSUM)) + sky2_rx_checksum(sky2, status); + break; + + case OP_RSS_HASH: + sky2_rx_hash(sky2, status); + break; + + case OP_TXINDEXLE: + /* TX index reports status for both ports */ + sky2_tx_done(hw->dev[0], status & 0xfff); + if (hw->dev[1]) + sky2_tx_done(hw->dev[1], + ((status >> 24) & 0xff) + | (u16)(length & 0xf) << 8); + break; + + default: + if (net_ratelimit()) + pr_warn("unknown status opcode 0x%x\n", opcode); + } + } while (hw->st_idx != idx); + + /* Fully processed status ring so clear irq */ + sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); + +exit_loop: + sky2_rx_done(hw, 0, total_packets[0], total_bytes[0]); + sky2_rx_done(hw, 1, total_packets[1], total_bytes[1]); + + return work_done; +} + +static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status) +{ + struct net_device *dev = hw->dev[port]; + + if (net_ratelimit()) + netdev_info(dev, "hw error interrupt status 0x%x\n", status); + + if (status & Y2_IS_PAR_RD1) { + if (net_ratelimit()) + netdev_err(dev, "ram data read parity error\n"); + /* Clear IRQ */ + sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR); + } + + if (status & Y2_IS_PAR_WR1) { + if (net_ratelimit()) + netdev_err(dev, "ram data write parity error\n"); + + sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR); + } + + if (status & Y2_IS_PAR_MAC1) { + if (net_ratelimit()) + netdev_err(dev, "MAC parity error\n"); + sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE); + } + + if (status & Y2_IS_PAR_RX1) { + if (net_ratelimit()) + netdev_err(dev, "RX parity error\n"); + sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR); + } + + if (status & Y2_IS_TCP_TXA1) { + if (net_ratelimit()) + netdev_err(dev, "TCP segmentation error\n"); + sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP); + } +} + +static void sky2_hw_intr(struct sky2_hw *hw) +{ + struct pci_dev *pdev = hw->pdev; + u32 status = sky2_read32(hw, B0_HWE_ISRC); + u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK); + + status &= hwmsk; + + if (status & Y2_IS_TIST_OV) + sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); + + if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { + u16 pci_err; + + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + pci_err = sky2_pci_read16(hw, PCI_STATUS); + if (net_ratelimit()) + dev_err(&pdev->dev, "PCI hardware error (0x%x)\n", + pci_err); + + sky2_pci_write16(hw, PCI_STATUS, + pci_err | PCI_STATUS_ERROR_BITS); + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + } + + if (status & Y2_IS_PCI_EXP) { + /* PCI-Express uncorrectable Error occurred */ + u32 err; + + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); + sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, + 0xfffffffful); + if (net_ratelimit()) + dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err); + + sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + } + + if (status & Y2_HWE_L1_MASK) + sky2_hw_error(hw, 0, status); + status >>= 8; + if (status & Y2_HWE_L1_MASK) + sky2_hw_error(hw, 1, status); +} + +static void sky2_mac_intr(struct sky2_hw *hw, unsigned port) +{ + struct net_device *dev = hw->dev[port]; + struct sky2_port *sky2 = netdev_priv(dev); + u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC)); + + netif_info(sky2, intr, dev, "mac interrupt status 0x%x\n", status); + + if (status & GM_IS_RX_CO_OV) + gma_read16(hw, port, GM_RX_IRQ_SRC); + + if (status & GM_IS_TX_CO_OV) + gma_read16(hw, port, GM_TX_IRQ_SRC); + + if (status & GM_IS_RX_FF_OR) { + ++dev->stats.rx_fifo_errors; + sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); + } + + if (status & GM_IS_TX_FF_UR) { + ++dev->stats.tx_fifo_errors; + sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); + } +} + +/* This should never happen it is a bug. */ +static void sky2_le_error(struct sky2_hw *hw, unsigned port, u16 q) +{ + struct net_device *dev = hw->dev[port]; + u16 idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX)); + + dev_err(&hw->pdev->dev, "%s: descriptor error q=%#x get=%u put=%u\n", + dev->name, (unsigned) q, (unsigned) idx, + (unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX))); + + sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK); +} + +static int sky2_rx_hung(struct net_device *dev) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + unsigned rxq = rxqaddr[port]; + u32 mac_rp = sky2_read32(hw, SK_REG(port, RX_GMF_RP)); + u8 mac_lev = sky2_read8(hw, SK_REG(port, RX_GMF_RLEV)); + u8 fifo_rp = sky2_read8(hw, Q_ADDR(rxq, Q_RP)); + u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL)); + + /* If idle and MAC or PCI is stuck */ + if (sky2->check.last == sky2->last_rx && + ((mac_rp == sky2->check.mac_rp && + mac_lev != 0 && mac_lev >= sky2->check.mac_lev) || + /* Check if the PCI RX hang */ + (fifo_rp == sky2->check.fifo_rp && + fifo_lev != 0 && fifo_lev >= sky2->check.fifo_lev))) { + netdev_printk(KERN_DEBUG, dev, + "hung mac %d:%d fifo %d (%d:%d)\n", + mac_lev, mac_rp, fifo_lev, + fifo_rp, sky2_read8(hw, Q_ADDR(rxq, Q_WP))); + return 1; + } else { + sky2->check.last = sky2->last_rx; + sky2->check.mac_rp = mac_rp; + sky2->check.mac_lev = mac_lev; + sky2->check.fifo_rp = fifo_rp; + sky2->check.fifo_lev = fifo_lev; + return 0; + } +} + +static void sky2_watchdog(struct timer_list *t) +{ + struct sky2_hw *hw = from_timer(hw, t, watchdog_timer); + + /* Check for lost IRQ once a second */ + if (sky2_read32(hw, B0_ISRC)) { + napi_schedule(&hw->napi); + } else { + int i, active = 0; + + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + if (!netif_running(dev)) + continue; + ++active; + + /* For chips with Rx FIFO, check if stuck */ + if ((hw->flags & SKY2_HW_RAM_BUFFER) && + sky2_rx_hung(dev)) { + netdev_info(dev, "receiver hang detected\n"); + schedule_work(&hw->restart_work); + return; + } + } + + if (active == 0) + return; + } + + mod_timer(&hw->watchdog_timer, round_jiffies(jiffies + HZ)); +} + +/* Hardware/software error handling */ +static void sky2_err_intr(struct sky2_hw *hw, u32 status) +{ + if (net_ratelimit()) + dev_warn(&hw->pdev->dev, "error interrupt status=%#x\n", status); + + if (status & Y2_IS_HW_ERR) + sky2_hw_intr(hw); + + if (status & Y2_IS_IRQ_MAC1) + sky2_mac_intr(hw, 0); + + if (status & Y2_IS_IRQ_MAC2) + sky2_mac_intr(hw, 1); + + if (status & Y2_IS_CHK_RX1) + sky2_le_error(hw, 0, Q_R1); + + if (status & Y2_IS_CHK_RX2) + sky2_le_error(hw, 1, Q_R2); + + if (status & Y2_IS_CHK_TXA1) + sky2_le_error(hw, 0, Q_XA1); + + if (status & Y2_IS_CHK_TXA2) + sky2_le_error(hw, 1, Q_XA2); +} + +static int sky2_poll(struct napi_struct *napi, int work_limit) +{ + struct sky2_hw *hw = container_of(napi, struct sky2_hw, napi); + u32 status = sky2_read32(hw, B0_Y2_SP_EISR); + int work_done = 0; + u16 idx; + + if (unlikely(status & Y2_IS_ERROR)) + sky2_err_intr(hw, status); + + if (status & Y2_IS_IRQ_PHY1) + sky2_phy_intr(hw, 0); + + if (status & Y2_IS_IRQ_PHY2) + sky2_phy_intr(hw, 1); + + if (status & Y2_IS_PHY_QLNK) + sky2_qlink_intr(hw); + + while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) { + work_done += sky2_status_intr(hw, work_limit - work_done, idx); + + if (work_done >= work_limit) + goto done; + } + + napi_complete_done(napi, work_done); + sky2_read32(hw, B0_Y2_SP_LISR); +done: + + return work_done; +} + +static irqreturn_t sky2_intr(int irq, void *dev_id) +{ + struct sky2_hw *hw = dev_id; + u32 status; + + /* Reading this mask interrupts as side effect */ + status = sky2_read32(hw, B0_Y2_SP_ISRC2); + if (status == 0 || status == ~0) { + sky2_write32(hw, B0_Y2_SP_ICR, 2); + return IRQ_NONE; + } + + prefetch(&hw->st_le[hw->st_idx]); + + napi_schedule(&hw->napi); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void sky2_netpoll(struct net_device *dev) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + napi_schedule(&sky2->hw->napi); +} +#endif + +/* Chip internal frequency for clock calculations */ +static u32 sky2_mhz(const struct sky2_hw *hw) +{ + switch (hw->chip_id) { + case CHIP_ID_YUKON_EC: + case CHIP_ID_YUKON_EC_U: + case CHIP_ID_YUKON_EX: + case CHIP_ID_YUKON_SUPR: + case CHIP_ID_YUKON_UL_2: + case CHIP_ID_YUKON_OPT: + case CHIP_ID_YUKON_PRM: + case CHIP_ID_YUKON_OP_2: + return 125; + + case CHIP_ID_YUKON_FE: + return 100; + + case CHIP_ID_YUKON_FE_P: + return 50; + + case CHIP_ID_YUKON_XL: + return 156; + + default: + BUG(); + } +} + +static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us) +{ + return sky2_mhz(hw) * us; +} + +static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk) +{ + return clk / sky2_mhz(hw); +} + + +static int sky2_init(struct sky2_hw *hw) +{ + u8 t8; + + /* Enable all clocks and check for bad PCI access */ + sky2_pci_write32(hw, PCI_DEV_REG3, 0); + + sky2_write8(hw, B0_CTST, CS_RST_CLR); + + hw->chip_id = sky2_read8(hw, B2_CHIP_ID); + hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4; + + switch (hw->chip_id) { + case CHIP_ID_YUKON_XL: + hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY; + if (hw->chip_rev < CHIP_REV_YU_XL_A2) + hw->flags |= SKY2_HW_RSS_BROKEN; + break; + + case CHIP_ID_YUKON_EC_U: + hw->flags = SKY2_HW_GIGABIT + | SKY2_HW_NEWER_PHY + | SKY2_HW_ADV_POWER_CTL; + break; + + case CHIP_ID_YUKON_EX: + hw->flags = SKY2_HW_GIGABIT + | SKY2_HW_NEWER_PHY + | SKY2_HW_NEW_LE + | SKY2_HW_ADV_POWER_CTL + | SKY2_HW_RSS_CHKSUM; + + /* New transmit checksum */ + if (hw->chip_rev != CHIP_REV_YU_EX_B0) + hw->flags |= SKY2_HW_AUTO_TX_SUM; + break; + + case CHIP_ID_YUKON_EC: + /* This rev is really old, and requires untested workarounds */ + if (hw->chip_rev == CHIP_REV_YU_EC_A1) { + dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n"); + return -EOPNOTSUPP; + } + hw->flags = SKY2_HW_GIGABIT | SKY2_HW_RSS_BROKEN; + break; + + case CHIP_ID_YUKON_FE: + hw->flags = SKY2_HW_RSS_BROKEN; + break; + + case CHIP_ID_YUKON_FE_P: + hw->flags = SKY2_HW_NEWER_PHY + | SKY2_HW_NEW_LE + | SKY2_HW_AUTO_TX_SUM + | SKY2_HW_ADV_POWER_CTL; + + /* The workaround for status conflicts VLAN tag detection. */ + if (hw->chip_rev == CHIP_REV_YU_FE2_A0) + hw->flags |= SKY2_HW_VLAN_BROKEN | SKY2_HW_RSS_CHKSUM; + break; + + case CHIP_ID_YUKON_SUPR: + hw->flags = SKY2_HW_GIGABIT + | SKY2_HW_NEWER_PHY + | SKY2_HW_NEW_LE + | SKY2_HW_AUTO_TX_SUM + | SKY2_HW_ADV_POWER_CTL; + + if (hw->chip_rev == CHIP_REV_YU_SU_A0) + hw->flags |= SKY2_HW_RSS_CHKSUM; + break; + + case CHIP_ID_YUKON_UL_2: + hw->flags = SKY2_HW_GIGABIT + | SKY2_HW_ADV_POWER_CTL; + break; + + case CHIP_ID_YUKON_OPT: + case CHIP_ID_YUKON_PRM: + case CHIP_ID_YUKON_OP_2: + hw->flags = SKY2_HW_GIGABIT + | SKY2_HW_NEW_LE + | SKY2_HW_ADV_POWER_CTL; + break; + + default: + dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n", + hw->chip_id); + return -EOPNOTSUPP; + } + + hw->pmd_type = sky2_read8(hw, B2_PMD_TYP); + if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P') + hw->flags |= SKY2_HW_FIBRE_PHY; + + hw->ports = 1; + t8 = sky2_read8(hw, B2_Y2_HW_RES); + if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) { + if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) + ++hw->ports; + } + + if (sky2_read8(hw, B2_E_0)) + hw->flags |= SKY2_HW_RAM_BUFFER; + + return 0; +} + +static void sky2_reset(struct sky2_hw *hw) +{ + struct pci_dev *pdev = hw->pdev; + u16 status; + int i; + u32 hwe_mask = Y2_HWE_ALL_MASK; + + /* disable ASF */ + if (hw->chip_id == CHIP_ID_YUKON_EX + || hw->chip_id == CHIP_ID_YUKON_SUPR) { + sky2_write32(hw, CPU_WDOG, 0); + status = sky2_read16(hw, HCU_CCSR); + status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE | + HCU_CCSR_UC_STATE_MSK); + /* + * CPU clock divider shouldn't be used because + * - ASF firmware may malfunction + * - Yukon-Supreme: Parallel FLASH doesn't support divided clocks + */ + status &= ~HCU_CCSR_CPU_CLK_DIVIDE_MSK; + sky2_write16(hw, HCU_CCSR, status); + sky2_write32(hw, CPU_WDOG, 0); + } else + sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); + sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE); + + /* do a SW reset */ + sky2_write8(hw, B0_CTST, CS_RST_SET); + sky2_write8(hw, B0_CTST, CS_RST_CLR); + + /* allow writes to PCI config */ + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + + /* clear PCI errors, if any */ + status = sky2_pci_read16(hw, PCI_STATUS); + status |= PCI_STATUS_ERROR_BITS; + sky2_pci_write16(hw, PCI_STATUS, status); + + sky2_write8(hw, B0_CTST, CS_MRST_CLR); + + if (pci_is_pcie(pdev)) { + sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, + 0xfffffffful); + + /* If error bit is stuck on ignore it */ + if (sky2_read32(hw, B0_HWE_ISRC) & Y2_IS_PCI_EXP) + dev_info(&pdev->dev, "ignoring stuck error report bit\n"); + else + hwe_mask |= Y2_IS_PCI_EXP; + } + + sky2_power_on(hw); + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + + for (i = 0; i < hw->ports; i++) { + sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); + sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); + + if (hw->chip_id == CHIP_ID_YUKON_EX || + hw->chip_id == CHIP_ID_YUKON_SUPR) + sky2_write16(hw, SK_REG(i, GMAC_CTRL), + GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON + | GMC_BYP_RETR_ON); + + } + + if (hw->chip_id == CHIP_ID_YUKON_SUPR && hw->chip_rev > CHIP_REV_YU_SU_B0) { + /* enable MACSec clock gating */ + sky2_pci_write32(hw, PCI_DEV_REG3, P_CLK_MACSEC_DIS); + } + + if (hw->chip_id == CHIP_ID_YUKON_OPT || + hw->chip_id == CHIP_ID_YUKON_PRM || + hw->chip_id == CHIP_ID_YUKON_OP_2) { + u16 reg; + + if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) { + /* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */ + sky2_write32(hw, Y2_PEX_PHY_DATA, (0x80UL << 16) | (1 << 7)); + + /* set PHY Link Detect Timer to 1.1 second (11x 100ms) */ + reg = 10; + + /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */ + sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16)); + } else { + /* set PHY Link Detect Timer to 0.4 second (4x 100ms) */ + reg = 3; + } + + reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE; + reg |= PSM_CONFIG_REG4_RST_PHY_LINK_DETECT; + + /* reset PHY Link Detect */ + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + sky2_pci_write16(hw, PSM_CONFIG_REG4, reg); + + /* check if PSMv2 was running before */ + reg = sky2_pci_read16(hw, PSM_CONFIG_REG3); + if (reg & PCI_EXP_LNKCTL_ASPMC) + /* restore the PCIe Link Control register */ + sky2_pci_write16(hw, pdev->pcie_cap + PCI_EXP_LNKCTL, + reg); + + if (hw->chip_id == CHIP_ID_YUKON_PRM && + hw->chip_rev == CHIP_REV_YU_PRM_A0) { + /* change PHY Interrupt polarity to low active */ + reg = sky2_read16(hw, GPHY_CTRL); + sky2_write16(hw, GPHY_CTRL, reg | GPC_INTPOL); + + /* adapt HW for low active PHY Interrupt */ + reg = sky2_read16(hw, Y2_CFG_SPC + PCI_LDO_CTRL); + sky2_write16(hw, Y2_CFG_SPC + PCI_LDO_CTRL, reg | PHY_M_UNDOC1); + } + + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + + /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */ + sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16)); + } + + /* Clear I2C IRQ noise */ + sky2_write32(hw, B2_I2C_IRQ, 1); + + /* turn off hardware timer (unused) */ + sky2_write8(hw, B2_TI_CTRL, TIM_STOP); + sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ); + + /* Turn off descriptor polling */ + sky2_write32(hw, B28_DPT_CTRL, DPT_STOP); + + /* Turn off receive timestamp */ + sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP); + sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); + + /* enable the Tx Arbiters */ + for (i = 0; i < hw->ports; i++) + sky2_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB); + + /* Initialize ram interface */ + for (i = 0; i < hw->ports; i++) { + sky2_write8(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR); + + sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS1), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R1), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA1), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS1), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R2), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA2), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS2), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R2), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA2), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53); + } + + sky2_write32(hw, B0_HWE_IMSK, hwe_mask); + + for (i = 0; i < hw->ports; i++) + sky2_gmac_reset(hw, i); + + memset(hw->st_le, 0, hw->st_size * sizeof(struct sky2_status_le)); + hw->st_idx = 0; + + sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET); + sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR); + + sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma); + sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32); + + /* Set the list last index */ + sky2_write16(hw, STAT_LAST_IDX, hw->st_size - 1); + + sky2_write16(hw, STAT_TX_IDX_TH, 10); + sky2_write8(hw, STAT_FIFO_WM, 16); + + /* set Status-FIFO ISR watermark */ + if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0) + sky2_write8(hw, STAT_FIFO_ISR_WM, 4); + else + sky2_write8(hw, STAT_FIFO_ISR_WM, 16); + + sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000)); + sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20)); + sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100)); + + /* enable status unit */ + sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON); + + sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); + sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START); + sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START); +} + +/* Take device down (offline). + * Equivalent to doing dev_stop() but this does not + * inform upper layers of the transition. + */ +static void sky2_detach(struct net_device *dev) +{ + if (netif_running(dev)) { + netif_tx_lock(dev); + netif_device_detach(dev); /* stop txq */ + netif_tx_unlock(dev); + sky2_close(dev); + } +} + +/* Bring device back after doing sky2_detach */ +static int sky2_reattach(struct net_device *dev) +{ + int err = 0; + + if (netif_running(dev)) { + err = sky2_open(dev); + if (err) { + netdev_info(dev, "could not restart %d\n", err); + dev_close(dev); + } else { + netif_device_attach(dev); + sky2_set_multicast(dev); + } + } + + return err; +} + +static void sky2_all_down(struct sky2_hw *hw) +{ + int i; + + if (hw->flags & SKY2_HW_IRQ_SETUP) { + sky2_write32(hw, B0_IMSK, 0); + sky2_read32(hw, B0_IMSK); + + synchronize_irq(hw->pdev->irq); + napi_disable(&hw->napi); + } + + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + struct sky2_port *sky2 = netdev_priv(dev); + + if (!netif_running(dev)) + continue; + + netif_carrier_off(dev); + netif_tx_disable(dev); + sky2_hw_down(sky2); + } +} + +static void sky2_all_up(struct sky2_hw *hw) +{ + u32 imask = Y2_IS_BASE; + int i; + + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + struct sky2_port *sky2 = netdev_priv(dev); + + if (!netif_running(dev)) + continue; + + sky2_hw_up(sky2); + sky2_set_multicast(dev); + imask |= portirq_msk[i]; + netif_wake_queue(dev); + } + + if (hw->flags & SKY2_HW_IRQ_SETUP) { + sky2_write32(hw, B0_IMSK, imask); + sky2_read32(hw, B0_IMSK); + sky2_read32(hw, B0_Y2_SP_LISR); + napi_enable(&hw->napi); + } +} + +static void sky2_restart(struct work_struct *work) +{ + struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work); + + rtnl_lock(); + + sky2_all_down(hw); + sky2_reset(hw); + sky2_all_up(hw); + + rtnl_unlock(); +} + +static inline u8 sky2_wol_supported(const struct sky2_hw *hw) +{ + return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0; +} + +static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + const struct sky2_port *sky2 = netdev_priv(dev); + + wol->supported = sky2_wol_supported(sky2->hw); + wol->wolopts = sky2->wol; +} + +static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + bool enable_wakeup = false; + int i; + + if ((wol->wolopts & ~sky2_wol_supported(sky2->hw)) || + !device_can_wakeup(&hw->pdev->dev)) + return -EOPNOTSUPP; + + sky2->wol = wol->wolopts; + + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + struct sky2_port *sky2 = netdev_priv(dev); + + if (sky2->wol) + enable_wakeup = true; + } + device_set_wakeup_enable(&hw->pdev->dev, enable_wakeup); + + return 0; +} + +static u32 sky2_supported_modes(const struct sky2_hw *hw) +{ + if (sky2_is_copper(hw)) { + u32 modes = SUPPORTED_10baseT_Half + | SUPPORTED_10baseT_Full + | SUPPORTED_100baseT_Half + | SUPPORTED_100baseT_Full; + + if (hw->flags & SKY2_HW_GIGABIT) + modes |= SUPPORTED_1000baseT_Half + | SUPPORTED_1000baseT_Full; + return modes; + } else + return SUPPORTED_1000baseT_Half + | SUPPORTED_1000baseT_Full; +} + +static int sky2_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + u32 supported, advertising; + + supported = sky2_supported_modes(hw); + cmd->base.phy_address = PHY_ADDR_MARV; + if (sky2_is_copper(hw)) { + cmd->base.port = PORT_TP; + cmd->base.speed = sky2->speed; + supported |= SUPPORTED_Autoneg | SUPPORTED_TP; + } else { + cmd->base.speed = SPEED_1000; + cmd->base.port = PORT_FIBRE; + supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE; + } + + advertising = sky2->advertising; + cmd->base.autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED) + ? AUTONEG_ENABLE : AUTONEG_DISABLE; + cmd->base.duplex = sky2->duplex; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + + return 0; +} + +static int sky2_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + struct sky2_port *sky2 = netdev_priv(dev); + const struct sky2_hw *hw = sky2->hw; + u32 supported = sky2_supported_modes(hw); + u32 new_advertising; + + ethtool_convert_link_mode_to_legacy_u32(&new_advertising, + cmd->link_modes.advertising); + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + if (new_advertising & ~supported) + return -EINVAL; + + if (sky2_is_copper(hw)) + sky2->advertising = new_advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + else + sky2->advertising = new_advertising | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + + sky2->flags |= SKY2_FLAG_AUTO_SPEED; + sky2->duplex = -1; + sky2->speed = -1; + } else { + u32 setting; + u32 speed = cmd->base.speed; + + switch (speed) { + case SPEED_1000: + if (cmd->base.duplex == DUPLEX_FULL) + setting = SUPPORTED_1000baseT_Full; + else if (cmd->base.duplex == DUPLEX_HALF) + setting = SUPPORTED_1000baseT_Half; + else + return -EINVAL; + break; + case SPEED_100: + if (cmd->base.duplex == DUPLEX_FULL) + setting = SUPPORTED_100baseT_Full; + else if (cmd->base.duplex == DUPLEX_HALF) + setting = SUPPORTED_100baseT_Half; + else + return -EINVAL; + break; + + case SPEED_10: + if (cmd->base.duplex == DUPLEX_FULL) + setting = SUPPORTED_10baseT_Full; + else if (cmd->base.duplex == DUPLEX_HALF) + setting = SUPPORTED_10baseT_Half; + else + return -EINVAL; + break; + default: + return -EINVAL; + } + + if ((setting & supported) == 0) + return -EINVAL; + + sky2->speed = speed; + sky2->duplex = cmd->base.duplex; + sky2->flags &= ~SKY2_FLAG_AUTO_SPEED; + } + + if (netif_running(dev)) { + sky2_phy_reinit(sky2); + sky2_set_multicast(dev); + } + + return 0; +} + +static void sky2_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(sky2->hw->pdev), + sizeof(info->bus_info)); +} + +static const struct sky2_stat { + char name[ETH_GSTRING_LEN]; + u16 offset; +} sky2_stats[] = { + { "tx_bytes", GM_TXO_OK_HI }, + { "rx_bytes", GM_RXO_OK_HI }, + { "tx_broadcast", GM_TXF_BC_OK }, + { "rx_broadcast", GM_RXF_BC_OK }, + { "tx_multicast", GM_TXF_MC_OK }, + { "rx_multicast", GM_RXF_MC_OK }, + { "tx_unicast", GM_TXF_UC_OK }, + { "rx_unicast", GM_RXF_UC_OK }, + { "tx_mac_pause", GM_TXF_MPAUSE }, + { "rx_mac_pause", GM_RXF_MPAUSE }, + { "collisions", GM_TXF_COL }, + { "late_collision",GM_TXF_LAT_COL }, + { "aborted", GM_TXF_ABO_COL }, + { "single_collisions", GM_TXF_SNG_COL }, + { "multi_collisions", GM_TXF_MUL_COL }, + + { "rx_short", GM_RXF_SHT }, + { "rx_runt", GM_RXE_FRAG }, + { "rx_64_byte_packets", GM_RXF_64B }, + { "rx_65_to_127_byte_packets", GM_RXF_127B }, + { "rx_128_to_255_byte_packets", GM_RXF_255B }, + { "rx_256_to_511_byte_packets", GM_RXF_511B }, + { "rx_512_to_1023_byte_packets", GM_RXF_1023B }, + { "rx_1024_to_1518_byte_packets", GM_RXF_1518B }, + { "rx_1518_to_max_byte_packets", GM_RXF_MAX_SZ }, + { "rx_too_long", GM_RXF_LNG_ERR }, + { "rx_fifo_overflow", GM_RXE_FIFO_OV }, + { "rx_jabber", GM_RXF_JAB_PKT }, + { "rx_fcs_error", GM_RXF_FCS_ERR }, + + { "tx_64_byte_packets", GM_TXF_64B }, + { "tx_65_to_127_byte_packets", GM_TXF_127B }, + { "tx_128_to_255_byte_packets", GM_TXF_255B }, + { "tx_256_to_511_byte_packets", GM_TXF_511B }, + { "tx_512_to_1023_byte_packets", GM_TXF_1023B }, + { "tx_1024_to_1518_byte_packets", GM_TXF_1518B }, + { "tx_1519_to_max_byte_packets", GM_TXF_MAX_SZ }, + { "tx_fifo_underrun", GM_TXE_FIFO_UR }, +}; + +static u32 sky2_get_msglevel(struct net_device *netdev) +{ + struct sky2_port *sky2 = netdev_priv(netdev); + return sky2->msg_enable; +} + +static int sky2_nway_reset(struct net_device *dev) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + if (!netif_running(dev) || !(sky2->flags & SKY2_FLAG_AUTO_SPEED)) + return -EINVAL; + + sky2_phy_reinit(sky2); + sky2_set_multicast(dev); + + return 0; +} + +static void sky2_phy_stats(struct sky2_port *sky2, u64 * data, unsigned count) +{ + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + int i; + + data[0] = get_stats64(hw, port, GM_TXO_OK_LO); + data[1] = get_stats64(hw, port, GM_RXO_OK_LO); + + for (i = 2; i < count; i++) + data[i] = get_stats32(hw, port, sky2_stats[i].offset); +} + +static void sky2_set_msglevel(struct net_device *netdev, u32 value) +{ + struct sky2_port *sky2 = netdev_priv(netdev); + sky2->msg_enable = value; +} + +static int sky2_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(sky2_stats); + default: + return -EOPNOTSUPP; + } +} + +static void sky2_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 * data) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + sky2_phy_stats(sky2, data, ARRAY_SIZE(sky2_stats)); +} + +static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(sky2_stats); i++) + memcpy(data + i * ETH_GSTRING_LEN, + sky2_stats[i].name, ETH_GSTRING_LEN); + break; + } +} + +static int sky2_set_mac_address(struct net_device *dev, void *p) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + const struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + memcpy_toio(hw->regs + B2_MAC_1 + port * 8, + dev->dev_addr, ETH_ALEN); + memcpy_toio(hw->regs + B2_MAC_2 + port * 8, + dev->dev_addr, ETH_ALEN); + + /* virtual address for data */ + gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); + + /* physical address: used for pause frames */ + gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); + + return 0; +} + +static inline void sky2_add_filter(u8 filter[8], const u8 *addr) +{ + u32 bit; + + bit = ether_crc(ETH_ALEN, addr) & 63; + filter[bit >> 3] |= 1 << (bit & 7); +} + +static void sky2_set_multicast(struct net_device *dev) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + struct netdev_hw_addr *ha; + u16 reg; + u8 filter[8]; + int rx_pause; + static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 }; + + rx_pause = (sky2->flow_status == FC_RX || sky2->flow_status == FC_BOTH); + memset(filter, 0, sizeof(filter)); + + reg = gma_read16(hw, port, GM_RX_CTRL); + reg |= GM_RXCR_UCF_ENA; + + if (dev->flags & IFF_PROMISC) /* promiscuous */ + reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); + else if (dev->flags & IFF_ALLMULTI) + memset(filter, 0xff, sizeof(filter)); + else if (netdev_mc_empty(dev) && !rx_pause) + reg &= ~GM_RXCR_MCF_ENA; + else { + reg |= GM_RXCR_MCF_ENA; + + if (rx_pause) + sky2_add_filter(filter, pause_mc_addr); + + netdev_for_each_mc_addr(ha, dev) + sky2_add_filter(filter, ha->addr); + } + + gma_write16(hw, port, GM_MC_ADDR_H1, + (u16) filter[0] | ((u16) filter[1] << 8)); + gma_write16(hw, port, GM_MC_ADDR_H2, + (u16) filter[2] | ((u16) filter[3] << 8)); + gma_write16(hw, port, GM_MC_ADDR_H3, + (u16) filter[4] | ((u16) filter[5] << 8)); + gma_write16(hw, port, GM_MC_ADDR_H4, + (u16) filter[6] | ((u16) filter[7] << 8)); + + gma_write16(hw, port, GM_RX_CTRL, reg); +} + +static void sky2_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + unsigned int start; + u64 _bytes, _packets; + + do { + start = u64_stats_fetch_begin_irq(&sky2->rx_stats.syncp); + _bytes = sky2->rx_stats.bytes; + _packets = sky2->rx_stats.packets; + } while (u64_stats_fetch_retry_irq(&sky2->rx_stats.syncp, start)); + + stats->rx_packets = _packets; + stats->rx_bytes = _bytes; + + do { + start = u64_stats_fetch_begin_irq(&sky2->tx_stats.syncp); + _bytes = sky2->tx_stats.bytes; + _packets = sky2->tx_stats.packets; + } while (u64_stats_fetch_retry_irq(&sky2->tx_stats.syncp, start)); + + stats->tx_packets = _packets; + stats->tx_bytes = _bytes; + + stats->multicast = get_stats32(hw, port, GM_RXF_MC_OK) + + get_stats32(hw, port, GM_RXF_BC_OK); + + stats->collisions = get_stats32(hw, port, GM_TXF_COL); + + stats->rx_length_errors = get_stats32(hw, port, GM_RXF_LNG_ERR); + stats->rx_crc_errors = get_stats32(hw, port, GM_RXF_FCS_ERR); + stats->rx_frame_errors = get_stats32(hw, port, GM_RXF_SHT) + + get_stats32(hw, port, GM_RXE_FRAG); + stats->rx_over_errors = get_stats32(hw, port, GM_RXE_FIFO_OV); + + stats->rx_dropped = dev->stats.rx_dropped; + stats->rx_fifo_errors = dev->stats.rx_fifo_errors; + stats->tx_fifo_errors = dev->stats.tx_fifo_errors; +} + +/* Can have one global because blinking is controlled by + * ethtool and that is always under RTNL mutex + */ +static void sky2_led(struct sky2_port *sky2, enum led_mode mode) +{ + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + + spin_lock_bh(&sky2->phy_lock); + if (hw->chip_id == CHIP_ID_YUKON_EC_U || + hw->chip_id == CHIP_ID_YUKON_EX || + hw->chip_id == CHIP_ID_YUKON_SUPR) { + u16 pg; + pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); + + switch (mode) { + case MO_LED_OFF: + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, + PHY_M_LEDC_LOS_CTRL(8) | + PHY_M_LEDC_INIT_CTRL(8) | + PHY_M_LEDC_STA1_CTRL(8) | + PHY_M_LEDC_STA0_CTRL(8)); + break; + case MO_LED_ON: + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, + PHY_M_LEDC_LOS_CTRL(9) | + PHY_M_LEDC_INIT_CTRL(9) | + PHY_M_LEDC_STA1_CTRL(9) | + PHY_M_LEDC_STA0_CTRL(9)); + break; + case MO_LED_BLINK: + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, + PHY_M_LEDC_LOS_CTRL(0xa) | + PHY_M_LEDC_INIT_CTRL(0xa) | + PHY_M_LEDC_STA1_CTRL(0xa) | + PHY_M_LEDC_STA0_CTRL(0xa)); + break; + case MO_LED_NORM: + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, + PHY_M_LEDC_LOS_CTRL(1) | + PHY_M_LEDC_INIT_CTRL(8) | + PHY_M_LEDC_STA1_CTRL(7) | + PHY_M_LEDC_STA0_CTRL(7)); + } + + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); + } else + gm_phy_write(hw, port, PHY_MARV_LED_OVER, + PHY_M_LED_MO_DUP(mode) | + PHY_M_LED_MO_10(mode) | + PHY_M_LED_MO_100(mode) | + PHY_M_LED_MO_1000(mode) | + PHY_M_LED_MO_RX(mode) | + PHY_M_LED_MO_TX(mode)); + + spin_unlock_bh(&sky2->phy_lock); +} + +/* blink LED's for finding board */ +static int sky2_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 1; /* cycle on/off once per second */ + case ETHTOOL_ID_INACTIVE: + sky2_led(sky2, MO_LED_NORM); + break; + case ETHTOOL_ID_ON: + sky2_led(sky2, MO_LED_ON); + break; + case ETHTOOL_ID_OFF: + sky2_led(sky2, MO_LED_OFF); + break; + } + + return 0; +} + +static void sky2_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *ecmd) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + switch (sky2->flow_mode) { + case FC_NONE: + ecmd->tx_pause = ecmd->rx_pause = 0; + break; + case FC_TX: + ecmd->tx_pause = 1, ecmd->rx_pause = 0; + break; + case FC_RX: + ecmd->tx_pause = 0, ecmd->rx_pause = 1; + break; + case FC_BOTH: + ecmd->tx_pause = ecmd->rx_pause = 1; + } + + ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_PAUSE) + ? AUTONEG_ENABLE : AUTONEG_DISABLE; +} + +static int sky2_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *ecmd) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + if (ecmd->autoneg == AUTONEG_ENABLE) + sky2->flags |= SKY2_FLAG_AUTO_PAUSE; + else + sky2->flags &= ~SKY2_FLAG_AUTO_PAUSE; + + sky2->flow_mode = sky2_flow(ecmd->rx_pause, ecmd->tx_pause); + + if (netif_running(dev)) + sky2_phy_reinit(sky2); + + return 0; +} + +static int sky2_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ecmd) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + + if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_STOP) + ecmd->tx_coalesce_usecs = 0; + else { + u32 clks = sky2_read32(hw, STAT_TX_TIMER_INI); + ecmd->tx_coalesce_usecs = sky2_clk2us(hw, clks); + } + ecmd->tx_max_coalesced_frames = sky2_read16(hw, STAT_TX_IDX_TH); + + if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_STOP) + ecmd->rx_coalesce_usecs = 0; + else { + u32 clks = sky2_read32(hw, STAT_LEV_TIMER_INI); + ecmd->rx_coalesce_usecs = sky2_clk2us(hw, clks); + } + ecmd->rx_max_coalesced_frames = sky2_read8(hw, STAT_FIFO_WM); + + if (sky2_read8(hw, STAT_ISR_TIMER_CTRL) == TIM_STOP) + ecmd->rx_coalesce_usecs_irq = 0; + else { + u32 clks = sky2_read32(hw, STAT_ISR_TIMER_INI); + ecmd->rx_coalesce_usecs_irq = sky2_clk2us(hw, clks); + } + + ecmd->rx_max_coalesced_frames_irq = sky2_read8(hw, STAT_FIFO_ISR_WM); + + return 0; +} + +/* Note: this affect both ports */ +static int sky2_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ecmd) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + const u32 tmax = sky2_clk2us(hw, 0x0ffffff); + + if (ecmd->tx_coalesce_usecs > tmax || + ecmd->rx_coalesce_usecs > tmax || + ecmd->rx_coalesce_usecs_irq > tmax) + return -EINVAL; + + if (ecmd->tx_max_coalesced_frames >= sky2->tx_ring_size-1) + return -EINVAL; + if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING) + return -EINVAL; + if (ecmd->rx_max_coalesced_frames_irq > RX_MAX_PENDING) + return -EINVAL; + + if (ecmd->tx_coalesce_usecs == 0) + sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP); + else { + sky2_write32(hw, STAT_TX_TIMER_INI, + sky2_us2clk(hw, ecmd->tx_coalesce_usecs)); + sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); + } + sky2_write16(hw, STAT_TX_IDX_TH, ecmd->tx_max_coalesced_frames); + + if (ecmd->rx_coalesce_usecs == 0) + sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP); + else { + sky2_write32(hw, STAT_LEV_TIMER_INI, + sky2_us2clk(hw, ecmd->rx_coalesce_usecs)); + sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START); + } + sky2_write8(hw, STAT_FIFO_WM, ecmd->rx_max_coalesced_frames); + + if (ecmd->rx_coalesce_usecs_irq == 0) + sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_STOP); + else { + sky2_write32(hw, STAT_ISR_TIMER_INI, + sky2_us2clk(hw, ecmd->rx_coalesce_usecs_irq)); + sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START); + } + sky2_write8(hw, STAT_FIFO_ISR_WM, ecmd->rx_max_coalesced_frames_irq); + return 0; +} + +/* + * Hardware is limited to min of 128 and max of 2048 for ring size + * and rounded up to next power of two + * to avoid division in modulus calclation + */ +static unsigned long roundup_ring_size(unsigned long pending) +{ + return max(128ul, roundup_pow_of_two(pending+1)); +} + +static void sky2_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + ering->rx_max_pending = RX_MAX_PENDING; + ering->tx_max_pending = TX_MAX_PENDING; + + ering->rx_pending = sky2->rx_pending; + ering->tx_pending = sky2->tx_pending; +} + +static int sky2_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + if (ering->rx_pending > RX_MAX_PENDING || + ering->rx_pending < 8 || + ering->tx_pending < TX_MIN_PENDING || + ering->tx_pending > TX_MAX_PENDING) + return -EINVAL; + + sky2_detach(dev); + + sky2->rx_pending = ering->rx_pending; + sky2->tx_pending = ering->tx_pending; + sky2->tx_ring_size = roundup_ring_size(sky2->tx_pending); + + return sky2_reattach(dev); +} + +static int sky2_get_regs_len(struct net_device *dev) +{ + return 0x4000; +} + +static int sky2_reg_access_ok(struct sky2_hw *hw, unsigned int b) +{ + /* This complicated switch statement is to make sure and + * only access regions that are unreserved. + * Some blocks are only valid on dual port cards. + */ + switch (b) { + /* second port */ + case 5: /* Tx Arbiter 2 */ + case 9: /* RX2 */ + case 14 ... 15: /* TX2 */ + case 17: case 19: /* Ram Buffer 2 */ + case 22 ... 23: /* Tx Ram Buffer 2 */ + case 25: /* Rx MAC Fifo 1 */ + case 27: /* Tx MAC Fifo 2 */ + case 31: /* GPHY 2 */ + case 40 ... 47: /* Pattern Ram 2 */ + case 52: case 54: /* TCP Segmentation 2 */ + case 112 ... 116: /* GMAC 2 */ + return hw->ports > 1; + + case 0: /* Control */ + case 2: /* Mac address */ + case 4: /* Tx Arbiter 1 */ + case 7: /* PCI express reg */ + case 8: /* RX1 */ + case 12 ... 13: /* TX1 */ + case 16: case 18:/* Rx Ram Buffer 1 */ + case 20 ... 21: /* Tx Ram Buffer 1 */ + case 24: /* Rx MAC Fifo 1 */ + case 26: /* Tx MAC Fifo 1 */ + case 28 ... 29: /* Descriptor and status unit */ + case 30: /* GPHY 1*/ + case 32 ... 39: /* Pattern Ram 1 */ + case 48: case 50: /* TCP Segmentation 1 */ + case 56 ... 60: /* PCI space */ + case 80 ... 84: /* GMAC 1 */ + return 1; + + default: + return 0; + } +} + +/* + * Returns copy of control register region + * Note: ethtool_get_regs always provides full size (16k) buffer + */ +static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + const struct sky2_port *sky2 = netdev_priv(dev); + const void __iomem *io = sky2->hw->regs; + unsigned int b; + + regs->version = 1; + + for (b = 0; b < 128; b++) { + /* skip poisonous diagnostic ram region in block 3 */ + if (b == 3) + memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10); + else if (sky2_reg_access_ok(sky2->hw, b)) + memcpy_fromio(p, io, 128); + else + memset(p, 0, 128); + + p += 128; + io += 128; + } +} + +static int sky2_get_eeprom_len(struct net_device *dev) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + u16 reg2; + + reg2 = sky2_pci_read16(hw, PCI_DEV_REG2); + return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); +} + +static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy) +{ + unsigned long start = jiffies; + + while ( (sky2_pci_read16(hw, cap + PCI_VPD_ADDR) & PCI_VPD_ADDR_F) == busy) { + /* Can take up to 10.6 ms for write */ + if (time_after(jiffies, start + HZ/4)) { + dev_err(&hw->pdev->dev, "VPD cycle timed out\n"); + return -ETIMEDOUT; + } + msleep(1); + } + + return 0; +} + +static int sky2_vpd_read(struct sky2_hw *hw, int cap, void *data, + u16 offset, size_t length) +{ + int rc = 0; + + while (length > 0) { + u32 val; + + sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset); + rc = sky2_vpd_wait(hw, cap, 0); + if (rc) + break; + + val = sky2_pci_read32(hw, cap + PCI_VPD_DATA); + + memcpy(data, &val, min(sizeof(val), length)); + offset += sizeof(u32); + data += sizeof(u32); + length -= sizeof(u32); + } + + return rc; +} + +static int sky2_vpd_write(struct sky2_hw *hw, int cap, const void *data, + u16 offset, unsigned int length) +{ + unsigned int i; + int rc = 0; + + for (i = 0; i < length; i += sizeof(u32)) { + u32 val = *(u32 *)(data + i); + + sky2_pci_write32(hw, cap + PCI_VPD_DATA, val); + sky2_pci_write32(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F); + + rc = sky2_vpd_wait(hw, cap, PCI_VPD_ADDR_F); + if (rc) + break; + } + return rc; +} + +static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct sky2_port *sky2 = netdev_priv(dev); + int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD); + + if (!cap) + return -EINVAL; + + eeprom->magic = SKY2_EEPROM_MAGIC; + + return sky2_vpd_read(sky2->hw, cap, data, eeprom->offset, eeprom->len); +} + +static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct sky2_port *sky2 = netdev_priv(dev); + int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD); + + if (!cap) + return -EINVAL; + + if (eeprom->magic != SKY2_EEPROM_MAGIC) + return -EINVAL; + + /* Partial writes not supported */ + if ((eeprom->offset & 3) || (eeprom->len & 3)) + return -EINVAL; + + return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len); +} + +static netdev_features_t sky2_fix_features(struct net_device *dev, + netdev_features_t features) +{ + const struct sky2_port *sky2 = netdev_priv(dev); + const struct sky2_hw *hw = sky2->hw; + + /* In order to do Jumbo packets on these chips, need to turn off the + * transmit store/forward. Therefore checksum offload won't work. + */ + if (dev->mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U) { + netdev_info(dev, "checksum offload not possible with jumbo frames\n"); + features &= ~(NETIF_F_TSO | NETIF_F_SG | NETIF_F_CSUM_MASK); + } + + /* Some hardware requires receive checksum for RSS to work. */ + if ( (features & NETIF_F_RXHASH) && + !(features & NETIF_F_RXCSUM) && + (sky2->hw->flags & SKY2_HW_RSS_CHKSUM)) { + netdev_info(dev, "receive hashing forces receive checksum\n"); + features |= NETIF_F_RXCSUM; + } + + return features; +} + +static int sky2_set_features(struct net_device *dev, netdev_features_t features) +{ + struct sky2_port *sky2 = netdev_priv(dev); + netdev_features_t changed = dev->features ^ features; + + if ((changed & NETIF_F_RXCSUM) && + !(sky2->hw->flags & SKY2_HW_NEW_LE)) { + sky2_write32(sky2->hw, + Q_ADDR(rxqaddr[sky2->port], Q_CSR), + (features & NETIF_F_RXCSUM) + ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); + } + + if (changed & NETIF_F_RXHASH) + rx_set_rss(dev, features); + + if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX)) + sky2_vlan_mode(dev, features); + + return 0; +} + +static const struct ethtool_ops sky2_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_RX_USECS_IRQ | + ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ, + .get_drvinfo = sky2_get_drvinfo, + .get_wol = sky2_get_wol, + .set_wol = sky2_set_wol, + .get_msglevel = sky2_get_msglevel, + .set_msglevel = sky2_set_msglevel, + .nway_reset = sky2_nway_reset, + .get_regs_len = sky2_get_regs_len, + .get_regs = sky2_get_regs, + .get_link = ethtool_op_get_link, + .get_eeprom_len = sky2_get_eeprom_len, + .get_eeprom = sky2_get_eeprom, + .set_eeprom = sky2_set_eeprom, + .get_strings = sky2_get_strings, + .get_coalesce = sky2_get_coalesce, + .set_coalesce = sky2_set_coalesce, + .get_ringparam = sky2_get_ringparam, + .set_ringparam = sky2_set_ringparam, + .get_pauseparam = sky2_get_pauseparam, + .set_pauseparam = sky2_set_pauseparam, + .set_phys_id = sky2_set_phys_id, + .get_sset_count = sky2_get_sset_count, + .get_ethtool_stats = sky2_get_ethtool_stats, + .get_link_ksettings = sky2_get_link_ksettings, + .set_link_ksettings = sky2_set_link_ksettings, +}; + +#ifdef CONFIG_SKY2_DEBUG + +static struct dentry *sky2_debug; + + +/* + * Read and parse the first part of Vital Product Data + */ +#define VPD_SIZE 128 +#define VPD_MAGIC 0x82 + +static const struct vpd_tag { + char tag[2]; + char *label; +} vpd_tags[] = { + { "PN", "Part Number" }, + { "EC", "Engineering Level" }, + { "MN", "Manufacturer" }, + { "SN", "Serial Number" }, + { "YA", "Asset Tag" }, + { "VL", "First Error Log Message" }, + { "VF", "Second Error Log Message" }, + { "VB", "Boot Agent ROM Configuration" }, + { "VE", "EFI UNDI Configuration" }, +}; + +static void sky2_show_vpd(struct seq_file *seq, struct sky2_hw *hw) +{ + size_t vpd_size; + loff_t offs; + u8 len; + unsigned char *buf; + u16 reg2; + + reg2 = sky2_pci_read16(hw, PCI_DEV_REG2); + vpd_size = 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); + + seq_printf(seq, "%s Product Data\n", pci_name(hw->pdev)); + buf = kmalloc(vpd_size, GFP_KERNEL); + if (!buf) { + seq_puts(seq, "no memory!\n"); + return; + } + + if (pci_read_vpd(hw->pdev, 0, vpd_size, buf) < 0) { + seq_puts(seq, "VPD read failed\n"); + goto out; + } + + if (buf[0] != VPD_MAGIC) { + seq_printf(seq, "VPD tag mismatch: %#x\n", buf[0]); + goto out; + } + len = buf[1]; + if (len == 0 || len > vpd_size - 4) { + seq_printf(seq, "Invalid id length: %d\n", len); + goto out; + } + + seq_printf(seq, "%.*s\n", len, buf + 3); + offs = len + 3; + + while (offs < vpd_size - 4) { + int i; + + if (!memcmp("RW", buf + offs, 2)) /* end marker */ + break; + len = buf[offs + 2]; + if (offs + len + 3 >= vpd_size) + break; + + for (i = 0; i < ARRAY_SIZE(vpd_tags); i++) { + if (!memcmp(vpd_tags[i].tag, buf + offs, 2)) { + seq_printf(seq, " %s: %.*s\n", + vpd_tags[i].label, len, buf + offs + 3); + break; + } + } + offs += len + 3; + } +out: + kfree(buf); +} + +static int sky2_debug_show(struct seq_file *seq, void *v) +{ + struct net_device *dev = seq->private; + const struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + unsigned idx, last; + int sop; + + sky2_show_vpd(seq, hw); + + seq_printf(seq, "\nIRQ src=%x mask=%x control=%x\n", + sky2_read32(hw, B0_ISRC), + sky2_read32(hw, B0_IMSK), + sky2_read32(hw, B0_Y2_SP_ICR)); + + if (!netif_running(dev)) { + seq_puts(seq, "network not running\n"); + return 0; + } + + napi_disable(&hw->napi); + last = sky2_read16(hw, STAT_PUT_IDX); + + seq_printf(seq, "Status ring %u\n", hw->st_size); + if (hw->st_idx == last) + seq_puts(seq, "Status ring (empty)\n"); + else { + seq_puts(seq, "Status ring\n"); + for (idx = hw->st_idx; idx != last && idx < hw->st_size; + idx = RING_NEXT(idx, hw->st_size)) { + const struct sky2_status_le *le = hw->st_le + idx; + seq_printf(seq, "[%d] %#x %d %#x\n", + idx, le->opcode, le->length, le->status); + } + seq_puts(seq, "\n"); + } + + seq_printf(seq, "Tx ring pending=%u...%u report=%d done=%d\n", + sky2->tx_cons, sky2->tx_prod, + sky2_read16(hw, port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX), + sky2_read16(hw, Q_ADDR(txqaddr[port], Q_DONE))); + + /* Dump contents of tx ring */ + sop = 1; + for (idx = sky2->tx_next; idx != sky2->tx_prod && idx < sky2->tx_ring_size; + idx = RING_NEXT(idx, sky2->tx_ring_size)) { + const struct sky2_tx_le *le = sky2->tx_le + idx; + u32 a = le32_to_cpu(le->addr); + + if (sop) + seq_printf(seq, "%u:", idx); + sop = 0; + + switch (le->opcode & ~HW_OWNER) { + case OP_ADDR64: + seq_printf(seq, " %#x:", a); + break; + case OP_LRGLEN: + seq_printf(seq, " mtu=%d", a); + break; + case OP_VLAN: + seq_printf(seq, " vlan=%d", be16_to_cpu(le->length)); + break; + case OP_TCPLISW: + seq_printf(seq, " csum=%#x", a); + break; + case OP_LARGESEND: + seq_printf(seq, " tso=%#x(%d)", a, le16_to_cpu(le->length)); + break; + case OP_PACKET: + seq_printf(seq, " %#x(%d)", a, le16_to_cpu(le->length)); + break; + case OP_BUFFER: + seq_printf(seq, " frag=%#x(%d)", a, le16_to_cpu(le->length)); + break; + default: + seq_printf(seq, " op=%#x,%#x(%d)", le->opcode, + a, le16_to_cpu(le->length)); + } + + if (le->ctrl & EOP) { + seq_putc(seq, '\n'); + sop = 1; + } + } + + seq_printf(seq, "\nRx ring hw get=%d put=%d last=%d\n", + sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_GET_IDX)), + sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_PUT_IDX)), + sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_LAST_IDX))); + + sky2_read32(hw, B0_Y2_SP_LISR); + napi_enable(&hw->napi); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(sky2_debug); + +/* + * Use network device events to create/remove/rename + * debugfs file entries + */ +static int sky2_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct sky2_port *sky2 = netdev_priv(dev); + + if (dev->netdev_ops->ndo_open != sky2_open || !sky2_debug) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_CHANGENAME: + if (sky2->debugfs) { + sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs, + sky2_debug, dev->name); + } + break; + + case NETDEV_GOING_DOWN: + if (sky2->debugfs) { + netdev_printk(KERN_DEBUG, dev, "remove debugfs\n"); + debugfs_remove(sky2->debugfs); + sky2->debugfs = NULL; + } + break; + + case NETDEV_UP: + sky2->debugfs = debugfs_create_file(dev->name, 0444, + sky2_debug, dev, + &sky2_debug_fops); + if (IS_ERR(sky2->debugfs)) + sky2->debugfs = NULL; + } + + return NOTIFY_DONE; +} + +static struct notifier_block sky2_notifier = { + .notifier_call = sky2_device_event, +}; + + +static __init void sky2_debug_init(void) +{ + struct dentry *ent; + + ent = debugfs_create_dir("sky2", NULL); + if (!ent || IS_ERR(ent)) + return; + + sky2_debug = ent; + register_netdevice_notifier(&sky2_notifier); +} + +static __exit void sky2_debug_cleanup(void) +{ + if (sky2_debug) { + unregister_netdevice_notifier(&sky2_notifier); + debugfs_remove(sky2_debug); + sky2_debug = NULL; + } +} + +#else +#define sky2_debug_init() +#define sky2_debug_cleanup() +#endif + +/* Two copies of network device operations to handle special case of + not allowing netpoll on second port */ +static const struct net_device_ops sky2_netdev_ops[2] = { + { + .ndo_open = sky2_open, + .ndo_stop = sky2_close, + .ndo_start_xmit = sky2_xmit_frame, + .ndo_do_ioctl = sky2_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = sky2_set_mac_address, + .ndo_set_rx_mode = sky2_set_multicast, + .ndo_change_mtu = sky2_change_mtu, + .ndo_fix_features = sky2_fix_features, + .ndo_set_features = sky2_set_features, + .ndo_tx_timeout = sky2_tx_timeout, + .ndo_get_stats64 = sky2_get_stats, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = sky2_netpoll, +#endif + }, + { + .ndo_open = sky2_open, + .ndo_stop = sky2_close, + .ndo_start_xmit = sky2_xmit_frame, + .ndo_do_ioctl = sky2_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = sky2_set_mac_address, + .ndo_set_rx_mode = sky2_set_multicast, + .ndo_change_mtu = sky2_change_mtu, + .ndo_fix_features = sky2_fix_features, + .ndo_set_features = sky2_set_features, + .ndo_tx_timeout = sky2_tx_timeout, + .ndo_get_stats64 = sky2_get_stats, + }, +}; + +/* Initialize network device */ +static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port, + int highmem, int wol) +{ + struct sky2_port *sky2; + struct net_device *dev = alloc_etherdev(sizeof(*sky2)); + const void *iap; + + if (!dev) + return NULL; + + SET_NETDEV_DEV(dev, &hw->pdev->dev); + dev->irq = hw->pdev->irq; + dev->ethtool_ops = &sky2_ethtool_ops; + dev->watchdog_timeo = TX_WATCHDOG; + dev->netdev_ops = &sky2_netdev_ops[port]; + + sky2 = netdev_priv(dev); + sky2->netdev = dev; + sky2->hw = hw; + sky2->msg_enable = netif_msg_init(debug, default_msg); + + u64_stats_init(&sky2->tx_stats.syncp); + u64_stats_init(&sky2->rx_stats.syncp); + + /* Auto speed and flow control */ + sky2->flags = SKY2_FLAG_AUTO_SPEED | SKY2_FLAG_AUTO_PAUSE; + if (hw->chip_id != CHIP_ID_YUKON_XL) + dev->hw_features |= NETIF_F_RXCSUM; + + sky2->flow_mode = FC_BOTH; + + sky2->duplex = -1; + sky2->speed = -1; + sky2->advertising = sky2_supported_modes(hw); + sky2->wol = wol; + + spin_lock_init(&sky2->phy_lock); + + sky2->tx_pending = TX_DEF_PENDING; + sky2->tx_ring_size = roundup_ring_size(TX_DEF_PENDING); + sky2->rx_pending = RX_DEF_PENDING; + + hw->dev[port] = dev; + + sky2->port = port; + + dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO; + + if (highmem) + dev->features |= NETIF_F_HIGHDMA; + + /* Enable receive hashing unless hardware is known broken */ + if (!(hw->flags & SKY2_HW_RSS_BROKEN)) + dev->hw_features |= NETIF_F_RXHASH; + + if (!(hw->flags & SKY2_HW_VLAN_BROKEN)) { + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + dev->vlan_features |= SKY2_VLAN_OFFLOADS; + } + + dev->features |= dev->hw_features; + + /* MTU range: 60 - 1500 or 9000 */ + dev->min_mtu = ETH_ZLEN; + if (hw->chip_id == CHIP_ID_YUKON_FE || + hw->chip_id == CHIP_ID_YUKON_FE_P) + dev->max_mtu = ETH_DATA_LEN; + else + dev->max_mtu = ETH_JUMBO_MTU; + + /* try to get mac address in the following order: + * 1) from device tree data + * 2) from internal registers set by bootloader + */ + iap = of_get_mac_address(hw->pdev->dev.of_node); + if (!IS_ERR(iap)) + ether_addr_copy(dev->dev_addr, iap); + else + memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, + ETH_ALEN); + + /* if the address is invalid, use a random value */ + if (!is_valid_ether_addr(dev->dev_addr)) { + struct sockaddr sa = { AF_UNSPEC }; + + netdev_warn(dev, + "Invalid MAC address, defaulting to random\n"); + eth_hw_addr_random(dev); + memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN); + if (sky2_set_mac_address(dev, &sa)) + netdev_warn(dev, "Failed to set MAC address.\n"); + } + + return dev; +} + +static void sky2_show_addr(struct net_device *dev) +{ + const struct sky2_port *sky2 = netdev_priv(dev); + + netif_info(sky2, probe, dev, "addr %pM\n", dev->dev_addr); +} + +/* Handle software interrupt used during MSI test */ +static irqreturn_t sky2_test_intr(int irq, void *dev_id) +{ + struct sky2_hw *hw = dev_id; + u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2); + + if (status == 0) + return IRQ_NONE; + + if (status & Y2_IS_IRQ_SW) { + hw->flags |= SKY2_HW_USE_MSI; + wake_up(&hw->msi_wait); + sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); + } + sky2_write32(hw, B0_Y2_SP_ICR, 2); + + return IRQ_HANDLED; +} + +/* Test interrupt path by forcing a a software IRQ */ +static int sky2_test_msi(struct sky2_hw *hw) +{ + struct pci_dev *pdev = hw->pdev; + int err; + + init_waitqueue_head(&hw->msi_wait); + + err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw); + if (err) { + dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); + return err; + } + + sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW); + + sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ); + sky2_read8(hw, B0_CTST); + + wait_event_timeout(hw->msi_wait, (hw->flags & SKY2_HW_USE_MSI), HZ/10); + + if (!(hw->flags & SKY2_HW_USE_MSI)) { + /* MSI test failed, go back to INTx mode */ + dev_info(&pdev->dev, "No interrupt generated using MSI, " + "switching to INTx mode.\n"); + + err = -EOPNOTSUPP; + sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); + } + + sky2_write32(hw, B0_IMSK, 0); + sky2_read32(hw, B0_IMSK); + + free_irq(pdev->irq, hw); + + return err; +} + +/* This driver supports yukon2 chipset only */ +static const char *sky2_name(u8 chipid, char *buf, int sz) +{ + const char *name[] = { + "XL", /* 0xb3 */ + "EC Ultra", /* 0xb4 */ + "Extreme", /* 0xb5 */ + "EC", /* 0xb6 */ + "FE", /* 0xb7 */ + "FE+", /* 0xb8 */ + "Supreme", /* 0xb9 */ + "UL 2", /* 0xba */ + "Unknown", /* 0xbb */ + "Optima", /* 0xbc */ + "OptimaEEE", /* 0xbd */ + "Optima 2", /* 0xbe */ + }; + + if (chipid >= CHIP_ID_YUKON_XL && chipid <= CHIP_ID_YUKON_OP_2) + strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz); + else + snprintf(buf, sz, "(chip %#x)", chipid); + return buf; +} + +static const struct dmi_system_id msi_blacklist[] = { + { + .ident = "Dell Inspiron 1545", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1545"), + }, + }, + { + .ident = "Gateway P-79", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Gateway"), + DMI_MATCH(DMI_PRODUCT_NAME, "P-79"), + }, + }, + { + .ident = "ASUS P5W DH Deluxe", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTEK COMPUTER INC"), + DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"), + }, + }, + { + .ident = "ASUS P6T", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "P6T"), + }, + }, + { + .ident = "ASUS P6X", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "P6X"), + }, + }, + {} +}; + +static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *dev, *dev1; + struct sky2_hw *hw; + int err, using_dac = 0, wol_default; + u32 reg; + char buf1[16]; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "cannot enable PCI device\n"); + goto err_out; + } + + /* Get configuration information + * Note: only regular PCI config access once to test for HW issues + * other PCI access through shared memory for speed and to + * avoid MMCONFIG problems. + */ + err = pci_read_config_dword(pdev, PCI_DEV_REG2, ®); + if (err) { + dev_err(&pdev->dev, "PCI read config failed\n"); + goto err_out_disable; + } + + if (~reg == 0) { + dev_err(&pdev->dev, "PCI configuration read error\n"); + err = -EIO; + goto err_out_disable; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(&pdev->dev, "cannot obtain PCI resources\n"); + goto err_out_disable; + } + + pci_set_master(pdev); + + if (sizeof(dma_addr_t) > sizeof(u32) && + !(err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))) { + using_dac = 1; + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (err < 0) { + dev_err(&pdev->dev, "unable to obtain 64 bit DMA " + "for consistent allocations\n"); + goto err_out_free_regions; + } + } else { + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "no usable DMA configuration\n"); + goto err_out_free_regions; + } + } + + +#ifdef __BIG_ENDIAN + /* The sk98lin vendor driver uses hardware byte swapping but + * this driver uses software swapping. + */ + reg &= ~PCI_REV_DESC; + err = pci_write_config_dword(pdev, PCI_DEV_REG2, reg); + if (err) { + dev_err(&pdev->dev, "PCI write config failed\n"); + goto err_out_free_regions; + } +#endif + + wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0; + + err = -ENOMEM; + + hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:") + + strlen(pci_name(pdev)) + 1, GFP_KERNEL); + if (!hw) + goto err_out_free_regions; + + hw->pdev = pdev; + sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev)); + + hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000); + if (!hw->regs) { + dev_err(&pdev->dev, "cannot map device registers\n"); + goto err_out_free_hw; + } + + err = sky2_init(hw); + if (err) + goto err_out_iounmap; + + /* ring for status responses */ + hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING); + hw->st_le = dma_alloc_coherent(&pdev->dev, + hw->st_size * sizeof(struct sky2_status_le), + &hw->st_dma, GFP_KERNEL); + if (!hw->st_le) { + err = -ENOMEM; + goto err_out_reset; + } + + dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n", + sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev); + + sky2_reset(hw); + + dev = sky2_init_netdev(hw, 0, using_dac, wol_default); + if (!dev) { + err = -ENOMEM; + goto err_out_free_pci; + } + + if (disable_msi == -1) + disable_msi = !!dmi_check_system(msi_blacklist); + + if (!disable_msi && pci_enable_msi(pdev) == 0) { + err = sky2_test_msi(hw); + if (err) { + pci_disable_msi(pdev); + if (err != -EOPNOTSUPP) + goto err_out_free_netdev; + } + } + + netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT); + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "cannot register net device\n"); + goto err_out_free_netdev; + } + + netif_carrier_off(dev); + + sky2_show_addr(dev); + + if (hw->ports > 1) { + dev1 = sky2_init_netdev(hw, 1, using_dac, wol_default); + if (!dev1) { + err = -ENOMEM; + goto err_out_unregister; + } + + err = register_netdev(dev1); + if (err) { + dev_err(&pdev->dev, "cannot register second net device\n"); + goto err_out_free_dev1; + } + + err = sky2_setup_irq(hw, hw->irq_name); + if (err) + goto err_out_unregister_dev1; + + sky2_show_addr(dev1); + } + + timer_setup(&hw->watchdog_timer, sky2_watchdog, 0); + INIT_WORK(&hw->restart_work, sky2_restart); + + pci_set_drvdata(pdev, hw); + pdev->d3hot_delay = 300; + + return 0; + +err_out_unregister_dev1: + unregister_netdev(dev1); +err_out_free_dev1: + free_netdev(dev1); +err_out_unregister: + unregister_netdev(dev); +err_out_free_netdev: + if (hw->flags & SKY2_HW_USE_MSI) + pci_disable_msi(pdev); + free_netdev(dev); +err_out_free_pci: + dma_free_coherent(&pdev->dev, + hw->st_size * sizeof(struct sky2_status_le), + hw->st_le, hw->st_dma); +err_out_reset: + sky2_write8(hw, B0_CTST, CS_RST_SET); +err_out_iounmap: + iounmap(hw->regs); +err_out_free_hw: + kfree(hw); +err_out_free_regions: + pci_release_regions(pdev); +err_out_disable: + pci_disable_device(pdev); +err_out: + return err; +} + +static void sky2_remove(struct pci_dev *pdev) +{ + struct sky2_hw *hw = pci_get_drvdata(pdev); + int i; + + if (!hw) + return; + + del_timer_sync(&hw->watchdog_timer); + cancel_work_sync(&hw->restart_work); + + for (i = hw->ports-1; i >= 0; --i) + unregister_netdev(hw->dev[i]); + + sky2_write32(hw, B0_IMSK, 0); + sky2_read32(hw, B0_IMSK); + + sky2_power_aux(hw); + + sky2_write8(hw, B0_CTST, CS_RST_SET); + sky2_read8(hw, B0_CTST); + + if (hw->ports > 1) { + napi_disable(&hw->napi); + free_irq(pdev->irq, hw); + } + + if (hw->flags & SKY2_HW_USE_MSI) + pci_disable_msi(pdev); + dma_free_coherent(&pdev->dev, + hw->st_size * sizeof(struct sky2_status_le), + hw->st_le, hw->st_dma); + pci_release_regions(pdev); + pci_disable_device(pdev); + + for (i = hw->ports-1; i >= 0; --i) + free_netdev(hw->dev[i]); + + iounmap(hw->regs); + kfree(hw); +} + +static int sky2_suspend(struct device *dev) +{ + struct sky2_hw *hw = dev_get_drvdata(dev); + int i; + + if (!hw) + return 0; + + del_timer_sync(&hw->watchdog_timer); + cancel_work_sync(&hw->restart_work); + + rtnl_lock(); + + sky2_all_down(hw); + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + struct sky2_port *sky2 = netdev_priv(dev); + + if (sky2->wol) + sky2_wol_init(sky2); + } + + sky2_power_aux(hw); + rtnl_unlock(); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sky2_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct sky2_hw *hw = pci_get_drvdata(pdev); + int err; + + if (!hw) + return 0; + + /* Re-enable all clocks */ + err = pci_write_config_dword(pdev, PCI_DEV_REG3, 0); + if (err) { + dev_err(&pdev->dev, "PCI write config failed\n"); + goto out; + } + + rtnl_lock(); + sky2_reset(hw); + sky2_all_up(hw); + rtnl_unlock(); + + return 0; +out: + + dev_err(&pdev->dev, "resume failed (%d)\n", err); + pci_disable_device(pdev); + return err; +} + +static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume); +#define SKY2_PM_OPS (&sky2_pm_ops) + +#else + +#define SKY2_PM_OPS NULL +#endif + +static void sky2_shutdown(struct pci_dev *pdev) +{ + struct sky2_hw *hw = pci_get_drvdata(pdev); + int port; + + for (port = 0; port < hw->ports; port++) { + struct net_device *ndev = hw->dev[port]; + + rtnl_lock(); + if (netif_running(ndev)) { + dev_close(ndev); + netif_device_detach(ndev); + } + rtnl_unlock(); + } + sky2_suspend(&pdev->dev); + pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); + pci_set_power_state(pdev, PCI_D3hot); +} + +static struct pci_driver sky2_driver = { + .name = DRV_NAME, + .id_table = sky2_id_table, + .probe = sky2_probe, + .remove = sky2_remove, + .shutdown = sky2_shutdown, + .driver.pm = SKY2_PM_OPS, +}; + +static int __init sky2_init_module(void) +{ + pr_info("driver version " DRV_VERSION "\n"); + + sky2_debug_init(); + return pci_register_driver(&sky2_driver); +} + +static void __exit sky2_cleanup_module(void) +{ + pci_unregister_driver(&sky2_driver); + sky2_debug_cleanup(); +} + +module_init(sky2_init_module); +module_exit(sky2_cleanup_module); + +MODULE_DESCRIPTION("Marvell Yukon 2 Gigabit Ethernet driver"); +MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h new file mode 100644 index 000000000..2bd0a7971 --- /dev/null +++ b/drivers/net/ethernet/marvell/sky2.h @@ -0,0 +1,2428 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Definitions for the new Marvell Yukon 2 driver. + */ +#ifndef _SKY2_H +#define _SKY2_H + +#define ETH_JUMBO_MTU 9000 /* Maximum MTU supported */ + +/* PCI config registers */ +enum { + PCI_DEV_REG1 = 0x40, + PCI_DEV_REG2 = 0x44, + PCI_DEV_STATUS = 0x7c, + PCI_DEV_REG3 = 0x80, + PCI_DEV_REG4 = 0x84, + PCI_DEV_REG5 = 0x88, + PCI_CFG_REG_0 = 0x90, + PCI_CFG_REG_1 = 0x94, + + PSM_CONFIG_REG0 = 0x98, + PSM_CONFIG_REG1 = 0x9C, + PSM_CONFIG_REG2 = 0x160, + PSM_CONFIG_REG3 = 0x164, + PSM_CONFIG_REG4 = 0x168, + + PCI_LDO_CTRL = 0xbc, +}; + +/* Yukon-2 */ +enum pci_dev_reg_1 { + PCI_Y2_PIG_ENA = 1<<31, /* Enable Plug-in-Go (YUKON-2) */ + PCI_Y2_DLL_DIS = 1<<30, /* Disable PCI DLL (YUKON-2) */ + PCI_SW_PWR_ON_RST= 1<<30, /* SW Power on Reset (Yukon-EX) */ + PCI_Y2_PHY2_COMA = 1<<29, /* Set PHY 2 to Coma Mode (YUKON-2) */ + PCI_Y2_PHY1_COMA = 1<<28, /* Set PHY 1 to Coma Mode (YUKON-2) */ + PCI_Y2_PHY2_POWD = 1<<27, /* Set PHY 2 to Power Down (YUKON-2) */ + PCI_Y2_PHY1_POWD = 1<<26, /* Set PHY 1 to Power Down (YUKON-2) */ + PCI_Y2_PME_LEGACY= 1<<15, /* PCI Express legacy power management mode */ + + PCI_PHY_LNK_TIM_MSK= 3L<<8,/* Bit 9.. 8: GPHY Link Trigger Timer */ + PCI_ENA_L1_EVENT = 1<<7, /* Enable PEX L1 Event */ + PCI_ENA_GPHY_LNK = 1<<6, /* Enable PEX L1 on GPHY Link down */ + PCI_FORCE_PEX_L1 = 1<<5, /* Force to PEX L1 */ +}; + +enum pci_dev_reg_2 { + PCI_VPD_WR_THR = 0xffL<<24, /* Bit 31..24: VPD Write Threshold */ + PCI_DEV_SEL = 0x7fL<<17, /* Bit 23..17: EEPROM Device Select */ + PCI_VPD_ROM_SZ = 7L<<14, /* Bit 16..14: VPD ROM Size */ + + PCI_PATCH_DIR = 0xfL<<8, /* Bit 11.. 8: Ext Patches dir 3..0 */ + PCI_EXT_PATCHS = 0xfL<<4, /* Bit 7.. 4: Extended Patches 3..0 */ + PCI_EN_DUMMY_RD = 1<<3, /* Enable Dummy Read */ + PCI_REV_DESC = 1<<2, /* Reverse Desc. Bytes */ + + PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */ +}; + +/* PCI_OUR_REG_3 32 bit Our Register 3 (Yukon-ECU only) */ +enum pci_dev_reg_3 { + P_CLK_ASF_REGS_DIS = 1<<18,/* Disable Clock ASF (Yukon-Ext.) */ + P_CLK_COR_REGS_D0_DIS = 1<<17,/* Disable Clock Core Regs D0 */ + P_CLK_MACSEC_DIS = 1<<17,/* Disable Clock MACSec (Yukon-Ext.) */ + P_CLK_PCI_REGS_D0_DIS = 1<<16,/* Disable Clock PCI Regs D0 */ + P_CLK_COR_YTB_ARB_DIS = 1<<15,/* Disable Clock YTB Arbiter */ + P_CLK_MAC_LNK1_D3_DIS = 1<<14,/* Disable Clock MAC Link1 D3 */ + P_CLK_COR_LNK1_D0_DIS = 1<<13,/* Disable Clock Core Link1 D0 */ + P_CLK_MAC_LNK1_D0_DIS = 1<<12,/* Disable Clock MAC Link1 D0 */ + P_CLK_COR_LNK1_D3_DIS = 1<<11,/* Disable Clock Core Link1 D3 */ + P_CLK_PCI_MST_ARB_DIS = 1<<10,/* Disable Clock PCI Master Arb. */ + P_CLK_COR_REGS_D3_DIS = 1<<9, /* Disable Clock Core Regs D3 */ + P_CLK_PCI_REGS_D3_DIS = 1<<8, /* Disable Clock PCI Regs D3 */ + P_CLK_REF_LNK1_GM_DIS = 1<<7, /* Disable Clock Ref. Link1 GMAC */ + P_CLK_COR_LNK1_GM_DIS = 1<<6, /* Disable Clock Core Link1 GMAC */ + P_CLK_PCI_COMMON_DIS = 1<<5, /* Disable Clock PCI Common */ + P_CLK_COR_COMMON_DIS = 1<<4, /* Disable Clock Core Common */ + P_CLK_PCI_LNK1_BMU_DIS = 1<<3, /* Disable Clock PCI Link1 BMU */ + P_CLK_COR_LNK1_BMU_DIS = 1<<2, /* Disable Clock Core Link1 BMU */ + P_CLK_PCI_LNK1_BIU_DIS = 1<<1, /* Disable Clock PCI Link1 BIU */ + P_CLK_COR_LNK1_BIU_DIS = 1<<0, /* Disable Clock Core Link1 BIU */ + PCIE_OUR3_WOL_D3_COLD_SET = P_CLK_ASF_REGS_DIS | + P_CLK_COR_REGS_D0_DIS | + P_CLK_COR_LNK1_D0_DIS | + P_CLK_MAC_LNK1_D0_DIS | + P_CLK_PCI_MST_ARB_DIS | + P_CLK_COR_COMMON_DIS | + P_CLK_COR_LNK1_BMU_DIS, +}; + +/* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */ +enum pci_dev_reg_4 { + /* (Link Training & Status State Machine) */ + P_PEX_LTSSM_STAT_MSK = 0x7fL<<25, /* Bit 31..25: PEX LTSSM Mask */ +#define P_PEX_LTSSM_STAT(x) ((x << 25) & P_PEX_LTSSM_STAT_MSK) + P_PEX_LTSSM_L1_STAT = 0x34, + P_PEX_LTSSM_DET_STAT = 0x01, + P_TIMER_VALUE_MSK = 0xffL<<16, /* Bit 23..16: Timer Value Mask */ + /* (Active State Power Management) */ + P_FORCE_ASPM_REQUEST = 1<<15, /* Force ASPM Request (A1 only) */ + P_ASPM_GPHY_LINK_DOWN = 1<<14, /* GPHY Link Down (A1 only) */ + P_ASPM_INT_FIFO_EMPTY = 1<<13, /* Internal FIFO Empty (A1 only) */ + P_ASPM_CLKRUN_REQUEST = 1<<12, /* CLKRUN Request (A1 only) */ + + P_ASPM_FORCE_CLKREQ_ENA = 1<<4, /* Force CLKREQ Enable (A1b only) */ + P_ASPM_CLKREQ_PAD_CTL = 1<<3, /* CLKREQ PAD Control (A1 only) */ + P_ASPM_A1_MODE_SELECT = 1<<2, /* A1 Mode Select (A1 only) */ + P_CLK_GATE_PEX_UNIT_ENA = 1<<1, /* Enable Gate PEX Unit Clock */ + P_CLK_GATE_ROOT_COR_ENA = 1<<0, /* Enable Gate Root Core Clock */ + P_ASPM_CONTROL_MSK = P_FORCE_ASPM_REQUEST | P_ASPM_GPHY_LINK_DOWN + | P_ASPM_CLKRUN_REQUEST | P_ASPM_INT_FIFO_EMPTY, +}; + +/* PCI_OUR_REG_5 32 bit Our Register 5 (Yukon-ECU only) */ +enum pci_dev_reg_5 { + /* Bit 31..27: for A3 & later */ + P_CTL_DIV_CORE_CLK_ENA = 1<<31, /* Divide Core Clock Enable */ + P_CTL_SRESET_VMAIN_AV = 1<<30, /* Soft Reset for Vmain_av De-Glitch */ + P_CTL_BYPASS_VMAIN_AV = 1<<29, /* Bypass En. for Vmain_av De-Glitch */ + P_CTL_TIM_VMAIN_AV_MSK = 3<<27, /* Bit 28..27: Timer Vmain_av Mask */ + /* Bit 26..16: Release Clock on Event */ + P_REL_PCIE_RST_DE_ASS = 1<<26, /* PCIe Reset De-Asserted */ + P_REL_GPHY_REC_PACKET = 1<<25, /* GPHY Received Packet */ + P_REL_INT_FIFO_N_EMPTY = 1<<24, /* Internal FIFO Not Empty */ + P_REL_MAIN_PWR_AVAIL = 1<<23, /* Main Power Available */ + P_REL_CLKRUN_REQ_REL = 1<<22, /* CLKRUN Request Release */ + P_REL_PCIE_RESET_ASS = 1<<21, /* PCIe Reset Asserted */ + P_REL_PME_ASSERTED = 1<<20, /* PME Asserted */ + P_REL_PCIE_EXIT_L1_ST = 1<<19, /* PCIe Exit L1 State */ + P_REL_LOADER_NOT_FIN = 1<<18, /* EPROM Loader Not Finished */ + P_REL_PCIE_RX_EX_IDLE = 1<<17, /* PCIe Rx Exit Electrical Idle State */ + P_REL_GPHY_LINK_UP = 1<<16, /* GPHY Link Up */ + + /* Bit 10.. 0: Mask for Gate Clock */ + P_GAT_PCIE_RST_ASSERTED = 1<<10,/* PCIe Reset Asserted */ + P_GAT_GPHY_N_REC_PACKET = 1<<9, /* GPHY Not Received Packet */ + P_GAT_INT_FIFO_EMPTY = 1<<8, /* Internal FIFO Empty */ + P_GAT_MAIN_PWR_N_AVAIL = 1<<7, /* Main Power Not Available */ + P_GAT_CLKRUN_REQ_REL = 1<<6, /* CLKRUN Not Requested */ + P_GAT_PCIE_RESET_ASS = 1<<5, /* PCIe Reset Asserted */ + P_GAT_PME_DE_ASSERTED = 1<<4, /* PME De-Asserted */ + P_GAT_PCIE_ENTER_L1_ST = 1<<3, /* PCIe Enter L1 State */ + P_GAT_LOADER_FINISHED = 1<<2, /* EPROM Loader Finished */ + P_GAT_PCIE_RX_EL_IDLE = 1<<1, /* PCIe Rx Electrical Idle State */ + P_GAT_GPHY_LINK_DOWN = 1<<0, /* GPHY Link Down */ + + PCIE_OUR5_EVENT_CLK_D3_SET = P_REL_GPHY_REC_PACKET | + P_REL_INT_FIFO_N_EMPTY | + P_REL_PCIE_EXIT_L1_ST | + P_REL_PCIE_RX_EX_IDLE | + P_GAT_GPHY_N_REC_PACKET | + P_GAT_INT_FIFO_EMPTY | + P_GAT_PCIE_ENTER_L1_ST | + P_GAT_PCIE_RX_EL_IDLE, +}; + +/* PCI_CFG_REG_1 32 bit Config Register 1 (Yukon-Ext only) */ +enum pci_cfg_reg1 { + P_CF1_DIS_REL_EVT_RST = 1<<24, /* Dis. Rel. Event during PCIE reset */ + /* Bit 23..21: Release Clock on Event */ + P_CF1_REL_LDR_NOT_FIN = 1<<23, /* EEPROM Loader Not Finished */ + P_CF1_REL_VMAIN_AVLBL = 1<<22, /* Vmain available */ + P_CF1_REL_PCIE_RESET = 1<<21, /* PCI-E reset */ + /* Bit 20..18: Gate Clock on Event */ + P_CF1_GAT_LDR_NOT_FIN = 1<<20, /* EEPROM Loader Finished */ + P_CF1_GAT_PCIE_RX_IDLE = 1<<19, /* PCI-E Rx Electrical idle */ + P_CF1_GAT_PCIE_RESET = 1<<18, /* PCI-E Reset */ + P_CF1_PRST_PHY_CLKREQ = 1<<17, /* Enable PCI-E rst & PM2PHY gen. CLKREQ */ + P_CF1_PCIE_RST_CLKREQ = 1<<16, /* Enable PCI-E rst generate CLKREQ */ + + P_CF1_ENA_CFG_LDR_DONE = 1<<8, /* Enable core level Config loader done */ + + P_CF1_ENA_TXBMU_RD_IDLE = 1<<1, /* Enable TX BMU Read IDLE for ASPM */ + P_CF1_ENA_TXBMU_WR_IDLE = 1<<0, /* Enable TX BMU Write IDLE for ASPM */ + + PCIE_CFG1_EVENT_CLK_D3_SET = P_CF1_DIS_REL_EVT_RST | + P_CF1_REL_LDR_NOT_FIN | + P_CF1_REL_VMAIN_AVLBL | + P_CF1_REL_PCIE_RESET | + P_CF1_GAT_LDR_NOT_FIN | + P_CF1_GAT_PCIE_RESET | + P_CF1_PRST_PHY_CLKREQ | + P_CF1_ENA_CFG_LDR_DONE | + P_CF1_ENA_TXBMU_RD_IDLE | + P_CF1_ENA_TXBMU_WR_IDLE, +}; + +/* Yukon-Optima */ +enum { + PSM_CONFIG_REG1_AC_PRESENT_STATUS = 1<<31, /* AC Present Status */ + + PSM_CONFIG_REG1_PTP_CLK_SEL = 1<<29, /* PTP Clock Select */ + PSM_CONFIG_REG1_PTP_MODE = 1<<28, /* PTP Mode */ + + PSM_CONFIG_REG1_MUX_PHY_LINK = 1<<27, /* PHY Energy Detect Event */ + + PSM_CONFIG_REG1_EN_PIN63_AC_PRESENT = 1<<26, /* Enable LED_DUPLEX for ac_present */ + PSM_CONFIG_REG1_EN_PCIE_TIMER = 1<<25, /* Enable PCIe Timer */ + PSM_CONFIG_REG1_EN_SPU_TIMER = 1<<24, /* Enable SPU Timer */ + PSM_CONFIG_REG1_POLARITY_AC_PRESENT = 1<<23, /* AC Present Polarity */ + + PSM_CONFIG_REG1_EN_AC_PRESENT = 1<<21, /* Enable AC Present */ + + PSM_CONFIG_REG1_EN_GPHY_INT_PSM = 1<<20, /* Enable GPHY INT for PSM */ + PSM_CONFIG_REG1_DIS_PSM_TIMER = 1<<19, /* Disable PSM Timer */ +}; + +/* Yukon-Supreme */ +enum { + PSM_CONFIG_REG1_GPHY_ENERGY_STS = 1<<31, /* GPHY Energy Detect Status */ + + PSM_CONFIG_REG1_UART_MODE_MSK = 3<<29, /* UART_Mode */ + PSM_CONFIG_REG1_CLK_RUN_ASF = 1<<28, /* Enable Clock Free Running for ASF Subsystem */ + PSM_CONFIG_REG1_UART_CLK_DISABLE= 1<<27, /* Disable UART clock */ + PSM_CONFIG_REG1_VAUX_ONE = 1<<26, /* Tie internal Vaux to 1'b1 */ + PSM_CONFIG_REG1_UART_FC_RI_VAL = 1<<25, /* Default value for UART_RI_n */ + PSM_CONFIG_REG1_UART_FC_DCD_VAL = 1<<24, /* Default value for UART_DCD_n */ + PSM_CONFIG_REG1_UART_FC_DSR_VAL = 1<<23, /* Default value for UART_DSR_n */ + PSM_CONFIG_REG1_UART_FC_CTS_VAL = 1<<22, /* Default value for UART_CTS_n */ + PSM_CONFIG_REG1_LATCH_VAUX = 1<<21, /* Enable Latch current Vaux_avlbl */ + PSM_CONFIG_REG1_FORCE_TESTMODE_INPUT= 1<<20, /* Force Testmode pin as input PAD */ + PSM_CONFIG_REG1_UART_RST = 1<<19, /* UART_RST */ + PSM_CONFIG_REG1_PSM_PCIE_L1_POL = 1<<18, /* PCIE L1 Event Polarity for PSM */ + PSM_CONFIG_REG1_TIMER_STAT = 1<<17, /* PSM Timer Status */ + PSM_CONFIG_REG1_GPHY_INT = 1<<16, /* GPHY INT Status */ + PSM_CONFIG_REG1_FORCE_TESTMODE_ZERO= 1<<15, /* Force internal Testmode as 1'b0 */ + PSM_CONFIG_REG1_EN_INT_ASPM_CLKREQ = 1<<14, /* ENABLE INT for CLKRUN on ASPM and CLKREQ */ + PSM_CONFIG_REG1_EN_SND_TASK_ASPM_CLKREQ = 1<<13, /* ENABLE Snd_task for CLKRUN on ASPM and CLKREQ */ + PSM_CONFIG_REG1_DIS_CLK_GATE_SND_TASK = 1<<12, /* Disable CLK_GATE control snd_task */ + PSM_CONFIG_REG1_DIS_FF_CHIAN_SND_INTA = 1<<11, /* Disable flip-flop chain for sndmsg_inta */ + + PSM_CONFIG_REG1_DIS_LOADER = 1<<9, /* Disable Loader SM after PSM Goes back to IDLE */ + PSM_CONFIG_REG1_DO_PWDN = 1<<8, /* Do Power Down, Start PSM Scheme */ + PSM_CONFIG_REG1_DIS_PIG = 1<<7, /* Disable Plug-in-Go SM after PSM Goes back to IDLE */ + PSM_CONFIG_REG1_DIS_PERST = 1<<6, /* Disable Internal PCIe Reset after PSM Goes back to IDLE */ + PSM_CONFIG_REG1_EN_REG18_PD = 1<<5, /* Enable REG18 Power Down for PSM */ + PSM_CONFIG_REG1_EN_PSM_LOAD = 1<<4, /* Disable EEPROM Loader after PSM Goes back to IDLE */ + PSM_CONFIG_REG1_EN_PSM_HOT_RST = 1<<3, /* Enable PCIe Hot Reset for PSM */ + PSM_CONFIG_REG1_EN_PSM_PERST = 1<<2, /* Enable PCIe Reset Event for PSM */ + PSM_CONFIG_REG1_EN_PSM_PCIE_L1 = 1<<1, /* Enable PCIe L1 Event for PSM */ + PSM_CONFIG_REG1_EN_PSM = 1<<0, /* Enable PSM Scheme */ +}; + +/* PSM_CONFIG_REG4 0x0168 PSM Config Register 4 */ +enum { + /* PHY Link Detect Timer */ + PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_MSK = 0xf<<4, + PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE = 4, + + PSM_CONFIG_REG4_DEBUG_TIMER = 1<<1, /* Debug Timer */ + PSM_CONFIG_REG4_RST_PHY_LINK_DETECT = 1<<0, /* Reset GPHY Link Detect */ +}; + + +enum csr_regs { + B0_RAP = 0x0000, + B0_CTST = 0x0004, + + B0_POWER_CTRL = 0x0007, + B0_ISRC = 0x0008, + B0_IMSK = 0x000c, + B0_HWE_ISRC = 0x0010, + B0_HWE_IMSK = 0x0014, + + /* Special ISR registers (Yukon-2 only) */ + B0_Y2_SP_ISRC2 = 0x001c, + B0_Y2_SP_ISRC3 = 0x0020, + B0_Y2_SP_EISR = 0x0024, + B0_Y2_SP_LISR = 0x0028, + B0_Y2_SP_ICR = 0x002c, + + B2_MAC_1 = 0x0100, + B2_MAC_2 = 0x0108, + B2_MAC_3 = 0x0110, + B2_CONN_TYP = 0x0118, + B2_PMD_TYP = 0x0119, + B2_MAC_CFG = 0x011a, + B2_CHIP_ID = 0x011b, + B2_E_0 = 0x011c, + + B2_Y2_CLK_GATE = 0x011d, + B2_Y2_HW_RES = 0x011e, + B2_E_3 = 0x011f, + B2_Y2_CLK_CTRL = 0x0120, + + B2_TI_INI = 0x0130, + B2_TI_VAL = 0x0134, + B2_TI_CTRL = 0x0138, + B2_TI_TEST = 0x0139, + + B2_TST_CTRL1 = 0x0158, + B2_TST_CTRL2 = 0x0159, + B2_GP_IO = 0x015c, + + B2_I2C_CTRL = 0x0160, + B2_I2C_DATA = 0x0164, + B2_I2C_IRQ = 0x0168, + B2_I2C_SW = 0x016c, + + Y2_PEX_PHY_DATA = 0x0170, + Y2_PEX_PHY_ADDR = 0x0172, + + B3_RAM_ADDR = 0x0180, + B3_RAM_DATA_LO = 0x0184, + B3_RAM_DATA_HI = 0x0188, + +/* RAM Interface Registers */ +/* Yukon-2: use RAM_BUFFER() to access the RAM buffer */ +/* + * The HW-Spec. calls this registers Timeout Value 0..11. But this names are + * not usable in SW. Please notice these are NOT real timeouts, these are + * the number of qWords transferred continuously. + */ +#define RAM_BUFFER(port, reg) (reg | (port <<6)) + + B3_RI_WTO_R1 = 0x0190, + B3_RI_WTO_XA1 = 0x0191, + B3_RI_WTO_XS1 = 0x0192, + B3_RI_RTO_R1 = 0x0193, + B3_RI_RTO_XA1 = 0x0194, + B3_RI_RTO_XS1 = 0x0195, + B3_RI_WTO_R2 = 0x0196, + B3_RI_WTO_XA2 = 0x0197, + B3_RI_WTO_XS2 = 0x0198, + B3_RI_RTO_R2 = 0x0199, + B3_RI_RTO_XA2 = 0x019a, + B3_RI_RTO_XS2 = 0x019b, + B3_RI_TO_VAL = 0x019c, + B3_RI_CTRL = 0x01a0, + B3_RI_TEST = 0x01a2, + B3_MA_TOINI_RX1 = 0x01b0, + B3_MA_TOINI_RX2 = 0x01b1, + B3_MA_TOINI_TX1 = 0x01b2, + B3_MA_TOINI_TX2 = 0x01b3, + B3_MA_TOVAL_RX1 = 0x01b4, + B3_MA_TOVAL_RX2 = 0x01b5, + B3_MA_TOVAL_TX1 = 0x01b6, + B3_MA_TOVAL_TX2 = 0x01b7, + B3_MA_TO_CTRL = 0x01b8, + B3_MA_TO_TEST = 0x01ba, + B3_MA_RCINI_RX1 = 0x01c0, + B3_MA_RCINI_RX2 = 0x01c1, + B3_MA_RCINI_TX1 = 0x01c2, + B3_MA_RCINI_TX2 = 0x01c3, + B3_MA_RCVAL_RX1 = 0x01c4, + B3_MA_RCVAL_RX2 = 0x01c5, + B3_MA_RCVAL_TX1 = 0x01c6, + B3_MA_RCVAL_TX2 = 0x01c7, + B3_MA_RC_CTRL = 0x01c8, + B3_MA_RC_TEST = 0x01ca, + B3_PA_TOINI_RX1 = 0x01d0, + B3_PA_TOINI_RX2 = 0x01d4, + B3_PA_TOINI_TX1 = 0x01d8, + B3_PA_TOINI_TX2 = 0x01dc, + B3_PA_TOVAL_RX1 = 0x01e0, + B3_PA_TOVAL_RX2 = 0x01e4, + B3_PA_TOVAL_TX1 = 0x01e8, + B3_PA_TOVAL_TX2 = 0x01ec, + B3_PA_CTRL = 0x01f0, + B3_PA_TEST = 0x01f2, + + Y2_CFG_SPC = 0x1c00, /* PCI config space region */ + Y2_CFG_AER = 0x1d00, /* PCI Advanced Error Report region */ +}; + +/* B0_CTST 24 bit Control/Status register */ +enum { + Y2_VMAIN_AVAIL = 1<<17,/* VMAIN available (YUKON-2 only) */ + Y2_VAUX_AVAIL = 1<<16,/* VAUX available (YUKON-2 only) */ + Y2_HW_WOL_ON = 1<<15,/* HW WOL On (Yukon-EC Ultra A1 only) */ + Y2_HW_WOL_OFF = 1<<14,/* HW WOL On (Yukon-EC Ultra A1 only) */ + Y2_ASF_ENABLE = 1<<13,/* ASF Unit Enable (YUKON-2 only) */ + Y2_ASF_DISABLE = 1<<12,/* ASF Unit Disable (YUKON-2 only) */ + Y2_CLK_RUN_ENA = 1<<11,/* CLK_RUN Enable (YUKON-2 only) */ + Y2_CLK_RUN_DIS = 1<<10,/* CLK_RUN Disable (YUKON-2 only) */ + Y2_LED_STAT_ON = 1<<9, /* Status LED On (YUKON-2 only) */ + Y2_LED_STAT_OFF = 1<<8, /* Status LED Off (YUKON-2 only) */ + + CS_ST_SW_IRQ = 1<<7, /* Set IRQ SW Request */ + CS_CL_SW_IRQ = 1<<6, /* Clear IRQ SW Request */ + CS_STOP_DONE = 1<<5, /* Stop Master is finished */ + CS_STOP_MAST = 1<<4, /* Command Bit to stop the master */ + CS_MRST_CLR = 1<<3, /* Clear Master reset */ + CS_MRST_SET = 1<<2, /* Set Master reset */ + CS_RST_CLR = 1<<1, /* Clear Software reset */ + CS_RST_SET = 1, /* Set Software reset */ +}; + +/* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */ +enum { + PC_VAUX_ENA = 1<<7, /* Switch VAUX Enable */ + PC_VAUX_DIS = 1<<6, /* Switch VAUX Disable */ + PC_VCC_ENA = 1<<5, /* Switch VCC Enable */ + PC_VCC_DIS = 1<<4, /* Switch VCC Disable */ + PC_VAUX_ON = 1<<3, /* Switch VAUX On */ + PC_VAUX_OFF = 1<<2, /* Switch VAUX Off */ + PC_VCC_ON = 1<<1, /* Switch VCC On */ + PC_VCC_OFF = 1<<0, /* Switch VCC Off */ +}; + +/* B2_IRQM_MSK 32 bit IRQ Moderation Mask */ + +/* B0_Y2_SP_ISRC2 32 bit Special Interrupt Source Reg 2 */ +/* B0_Y2_SP_ISRC3 32 bit Special Interrupt Source Reg 3 */ +/* B0_Y2_SP_EISR 32 bit Enter ISR Reg */ +/* B0_Y2_SP_LISR 32 bit Leave ISR Reg */ +enum { + Y2_IS_HW_ERR = 1<<31, /* Interrupt HW Error */ + Y2_IS_STAT_BMU = 1<<30, /* Status BMU Interrupt */ + Y2_IS_ASF = 1<<29, /* ASF subsystem Interrupt */ + Y2_IS_CPU_TO = 1<<28, /* CPU Timeout */ + Y2_IS_POLL_CHK = 1<<27, /* Check IRQ from polling unit */ + Y2_IS_TWSI_RDY = 1<<26, /* IRQ on end of TWSI Tx */ + Y2_IS_IRQ_SW = 1<<25, /* SW forced IRQ */ + Y2_IS_TIMINT = 1<<24, /* IRQ from Timer */ + + Y2_IS_IRQ_PHY2 = 1<<12, /* Interrupt from PHY 2 */ + Y2_IS_IRQ_MAC2 = 1<<11, /* Interrupt from MAC 2 */ + Y2_IS_CHK_RX2 = 1<<10, /* Descriptor error Rx 2 */ + Y2_IS_CHK_TXS2 = 1<<9, /* Descriptor error TXS 2 */ + Y2_IS_CHK_TXA2 = 1<<8, /* Descriptor error TXA 2 */ + + Y2_IS_PSM_ACK = 1<<7, /* PSM Acknowledge (Yukon-Optima only) */ + Y2_IS_PTP_TIST = 1<<6, /* PTP Time Stamp (Yukon-Optima only) */ + Y2_IS_PHY_QLNK = 1<<5, /* PHY Quick Link (Yukon-Optima only) */ + + Y2_IS_IRQ_PHY1 = 1<<4, /* Interrupt from PHY 1 */ + Y2_IS_IRQ_MAC1 = 1<<3, /* Interrupt from MAC 1 */ + Y2_IS_CHK_RX1 = 1<<2, /* Descriptor error Rx 1 */ + Y2_IS_CHK_TXS1 = 1<<1, /* Descriptor error TXS 1 */ + Y2_IS_CHK_TXA1 = 1<<0, /* Descriptor error TXA 1 */ + + Y2_IS_BASE = Y2_IS_HW_ERR | Y2_IS_STAT_BMU, + Y2_IS_PORT_1 = Y2_IS_IRQ_PHY1 | Y2_IS_IRQ_MAC1 + | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1, + Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2 + | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2, + Y2_IS_ERROR = Y2_IS_HW_ERR | + Y2_IS_IRQ_MAC1 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1 | + Y2_IS_IRQ_MAC2 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2, +}; + +/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */ +enum { + IS_ERR_MSK = 0x00003fff,/* All Error bits */ + + IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */ + IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */ + IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */ + IS_IRQ_STAT = 1<<10, /* IRQ status exception */ + IS_NO_STAT_M1 = 1<<9, /* No Rx Status from MAC 1 */ + IS_NO_STAT_M2 = 1<<8, /* No Rx Status from MAC 2 */ + IS_NO_TIST_M1 = 1<<7, /* No Time Stamp from MAC 1 */ + IS_NO_TIST_M2 = 1<<6, /* No Time Stamp from MAC 2 */ + IS_RAM_RD_PAR = 1<<5, /* RAM Read Parity Error */ + IS_RAM_WR_PAR = 1<<4, /* RAM Write Parity Error */ + IS_M1_PAR_ERR = 1<<3, /* MAC 1 Parity Error */ + IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */ + IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */ + IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */ +}; + +/* Hardware error interrupt mask for Yukon 2 */ +enum { + Y2_IS_TIST_OV = 1<<29,/* Time Stamp Timer overflow interrupt */ + Y2_IS_SENSOR = 1<<28, /* Sensor interrupt */ + Y2_IS_MST_ERR = 1<<27, /* Master error interrupt */ + Y2_IS_IRQ_STAT = 1<<26, /* Status exception interrupt */ + Y2_IS_PCI_EXP = 1<<25, /* PCI-Express interrupt */ + Y2_IS_PCI_NEXP = 1<<24, /* PCI-Express error similar to PCI error */ + /* Link 2 */ + Y2_IS_PAR_RD2 = 1<<13, /* Read RAM parity error interrupt */ + Y2_IS_PAR_WR2 = 1<<12, /* Write RAM parity error interrupt */ + Y2_IS_PAR_MAC2 = 1<<11, /* MAC hardware fault interrupt */ + Y2_IS_PAR_RX2 = 1<<10, /* Parity Error Rx Queue 2 */ + Y2_IS_TCP_TXS2 = 1<<9, /* TCP length mismatch sync Tx queue IRQ */ + Y2_IS_TCP_TXA2 = 1<<8, /* TCP length mismatch async Tx queue IRQ */ + /* Link 1 */ + Y2_IS_PAR_RD1 = 1<<5, /* Read RAM parity error interrupt */ + Y2_IS_PAR_WR1 = 1<<4, /* Write RAM parity error interrupt */ + Y2_IS_PAR_MAC1 = 1<<3, /* MAC hardware fault interrupt */ + Y2_IS_PAR_RX1 = 1<<2, /* Parity Error Rx Queue 1 */ + Y2_IS_TCP_TXS1 = 1<<1, /* TCP length mismatch sync Tx queue IRQ */ + Y2_IS_TCP_TXA1 = 1<<0, /* TCP length mismatch async Tx queue IRQ */ + + Y2_HWE_L1_MASK = Y2_IS_PAR_RD1 | Y2_IS_PAR_WR1 | Y2_IS_PAR_MAC1 | + Y2_IS_PAR_RX1 | Y2_IS_TCP_TXS1| Y2_IS_TCP_TXA1, + Y2_HWE_L2_MASK = Y2_IS_PAR_RD2 | Y2_IS_PAR_WR2 | Y2_IS_PAR_MAC2 | + Y2_IS_PAR_RX2 | Y2_IS_TCP_TXS2| Y2_IS_TCP_TXA2, + + Y2_HWE_ALL_MASK = Y2_IS_TIST_OV | Y2_IS_MST_ERR | Y2_IS_IRQ_STAT | + Y2_HWE_L1_MASK | Y2_HWE_L2_MASK, +}; + +/* B28_DPT_CTRL 8 bit Descriptor Poll Timer Ctrl Reg */ +enum { + DPT_START = 1<<1, + DPT_STOP = 1<<0, +}; + +/* B2_TST_CTRL1 8 bit Test Control Register 1 */ +enum { + TST_FRC_DPERR_MR = 1<<7, /* force DATAPERR on MST RD */ + TST_FRC_DPERR_MW = 1<<6, /* force DATAPERR on MST WR */ + TST_FRC_DPERR_TR = 1<<5, /* force DATAPERR on TRG RD */ + TST_FRC_DPERR_TW = 1<<4, /* force DATAPERR on TRG WR */ + TST_FRC_APERR_M = 1<<3, /* force ADDRPERR on MST */ + TST_FRC_APERR_T = 1<<2, /* force ADDRPERR on TRG */ + TST_CFG_WRITE_ON = 1<<1, /* Enable Config Reg WR */ + TST_CFG_WRITE_OFF= 1<<0, /* Disable Config Reg WR */ +}; + +/* B2_GPIO */ +enum { + GLB_GPIO_CLK_DEB_ENA = 1<<31, /* Clock Debug Enable */ + GLB_GPIO_CLK_DBG_MSK = 0xf<<26, /* Clock Debug */ + + GLB_GPIO_INT_RST_D3_DIS = 1<<15, /* Disable Internal Reset After D3 to D0 */ + GLB_GPIO_LED_PAD_SPEED_UP = 1<<14, /* LED PAD Speed Up */ + GLB_GPIO_STAT_RACE_DIS = 1<<13, /* Status Race Disable */ + GLB_GPIO_TEST_SEL_MSK = 3<<11, /* Testmode Select */ + GLB_GPIO_TEST_SEL_BASE = 1<<11, + GLB_GPIO_RAND_ENA = 1<<10, /* Random Enable */ + GLB_GPIO_RAND_BIT_1 = 1<<9, /* Random Bit 1 */ +}; + +/* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */ +enum { + CFG_CHIP_R_MSK = 0xf<<4, /* Bit 7.. 4: Chip Revision */ + /* Bit 3.. 2: reserved */ + CFG_DIS_M2_CLK = 1<<1, /* Disable Clock for 2nd MAC */ + CFG_SNG_MAC = 1<<0, /* MAC Config: 0=2 MACs / 1=1 MAC*/ +}; + +/* B2_CHIP_ID 8 bit Chip Identification Number */ +enum { + CHIP_ID_YUKON_XL = 0xb3, /* YUKON-2 XL */ + CHIP_ID_YUKON_EC_U = 0xb4, /* YUKON-2 EC Ultra */ + CHIP_ID_YUKON_EX = 0xb5, /* YUKON-2 Extreme */ + CHIP_ID_YUKON_EC = 0xb6, /* YUKON-2 EC */ + CHIP_ID_YUKON_FE = 0xb7, /* YUKON-2 FE */ + CHIP_ID_YUKON_FE_P = 0xb8, /* YUKON-2 FE+ */ + CHIP_ID_YUKON_SUPR = 0xb9, /* YUKON-2 Supreme */ + CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */ + CHIP_ID_YUKON_OPT = 0xbc, /* YUKON-2 Optima */ + CHIP_ID_YUKON_PRM = 0xbd, /* YUKON-2 Optima Prime */ + CHIP_ID_YUKON_OP_2 = 0xbe, /* YUKON-2 Optima 2 */ +}; + +enum yukon_xl_rev { + CHIP_REV_YU_XL_A0 = 0, + CHIP_REV_YU_XL_A1 = 1, + CHIP_REV_YU_XL_A2 = 2, + CHIP_REV_YU_XL_A3 = 3, +}; + +enum yukon_ec_rev { + CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */ + CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */ + CHIP_REV_YU_EC_A3 = 2, /* Chip Rev. for Yukon-EC A3 */ +}; +enum yukon_ec_u_rev { + CHIP_REV_YU_EC_U_A0 = 1, + CHIP_REV_YU_EC_U_A1 = 2, + CHIP_REV_YU_EC_U_B0 = 3, + CHIP_REV_YU_EC_U_B1 = 5, +}; +enum yukon_fe_rev { + CHIP_REV_YU_FE_A1 = 1, + CHIP_REV_YU_FE_A2 = 2, +}; +enum yukon_fe_p_rev { + CHIP_REV_YU_FE2_A0 = 0, +}; +enum yukon_ex_rev { + CHIP_REV_YU_EX_A0 = 1, + CHIP_REV_YU_EX_B0 = 2, +}; +enum yukon_supr_rev { + CHIP_REV_YU_SU_A0 = 0, + CHIP_REV_YU_SU_B0 = 1, + CHIP_REV_YU_SU_B1 = 3, +}; + +enum yukon_prm_rev { + CHIP_REV_YU_PRM_Z1 = 1, + CHIP_REV_YU_PRM_A0 = 2, +}; + +/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */ +enum { + Y2_STATUS_LNK2_INAC = 1<<7, /* Status Link 2 inactive (0 = active) */ + Y2_CLK_GAT_LNK2_DIS = 1<<6, /* Disable clock gating Link 2 */ + Y2_COR_CLK_LNK2_DIS = 1<<5, /* Disable Core clock Link 2 */ + Y2_PCI_CLK_LNK2_DIS = 1<<4, /* Disable PCI clock Link 2 */ + Y2_STATUS_LNK1_INAC = 1<<3, /* Status Link 1 inactive (0 = active) */ + Y2_CLK_GAT_LNK1_DIS = 1<<2, /* Disable clock gating Link 1 */ + Y2_COR_CLK_LNK1_DIS = 1<<1, /* Disable Core clock Link 1 */ + Y2_PCI_CLK_LNK1_DIS = 1<<0, /* Disable PCI clock Link 1 */ +}; + +/* B2_Y2_HW_RES 8 bit HW Resources (Yukon-2 only) */ +enum { + CFG_LED_MODE_MSK = 7<<2, /* Bit 4.. 2: LED Mode Mask */ + CFG_LINK_2_AVAIL = 1<<1, /* Link 2 available */ + CFG_LINK_1_AVAIL = 1<<0, /* Link 1 available */ +}; +#define CFG_LED_MODE(x) (((x) & CFG_LED_MODE_MSK) >> 2) +#define CFG_DUAL_MAC_MSK (CFG_LINK_2_AVAIL | CFG_LINK_1_AVAIL) + + +/* B2_Y2_CLK_CTRL 32 bit Clock Frequency Control Register (Yukon-2/EC) */ +enum { + Y2_CLK_DIV_VAL_MSK = 0xff<<16,/* Bit 23..16: Clock Divisor Value */ +#define Y2_CLK_DIV_VAL(x) (((x)<<16) & Y2_CLK_DIV_VAL_MSK) + Y2_CLK_DIV_VAL2_MSK = 7<<21, /* Bit 23..21: Clock Divisor Value */ + Y2_CLK_SELECT2_MSK = 0x1f<<16,/* Bit 20..16: Clock Select */ +#define Y2_CLK_DIV_VAL_2(x) (((x)<<21) & Y2_CLK_DIV_VAL2_MSK) +#define Y2_CLK_SEL_VAL_2(x) (((x)<<16) & Y2_CLK_SELECT2_MSK) + Y2_CLK_DIV_ENA = 1<<1, /* Enable Core Clock Division */ + Y2_CLK_DIV_DIS = 1<<0, /* Disable Core Clock Division */ +}; + +/* B2_TI_CTRL 8 bit Timer control */ +/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */ +enum { + TIM_START = 1<<2, /* Start Timer */ + TIM_STOP = 1<<1, /* Stop Timer */ + TIM_CLR_IRQ = 1<<0, /* Clear Timer IRQ (!IRQM) */ +}; + +/* B2_TI_TEST 8 Bit Timer Test */ +/* B2_IRQM_TEST 8 bit IRQ Moderation Timer Test */ +/* B28_DPT_TST 8 bit Descriptor Poll Timer Test Reg */ +enum { + TIM_T_ON = 1<<2, /* Test mode on */ + TIM_T_OFF = 1<<1, /* Test mode off */ + TIM_T_STEP = 1<<0, /* Test step */ +}; + +/* Y2_PEX_PHY_ADDR/DATA PEX PHY address and data reg (Yukon-2 only) */ +enum { + PEX_RD_ACCESS = 1<<31, /* Access Mode Read = 1, Write = 0 */ + PEX_DB_ACCESS = 1<<30, /* Access to debug register */ +}; + +/* B3_RAM_ADDR 32 bit RAM Address, to read or write */ + /* Bit 31..19: reserved */ +#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */ +/* RAM Interface Registers */ + +/* B3_RI_CTRL 16 bit RAM Interface Control Register */ +enum { + RI_CLR_RD_PERR = 1<<9, /* Clear IRQ RAM Read Parity Err */ + RI_CLR_WR_PERR = 1<<8, /* Clear IRQ RAM Write Parity Err*/ + + RI_RST_CLR = 1<<1, /* Clear RAM Interface Reset */ + RI_RST_SET = 1<<0, /* Set RAM Interface Reset */ +}; + +#define SK_RI_TO_53 36 /* RAM interface timeout */ + + +/* Port related registers FIFO, and Arbiter */ +#define SK_REG(port,reg) (((port)<<7)+(reg)) + +/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */ +/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */ +/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */ +/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */ +/* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */ + +#define TXA_MAX_VAL 0x00ffffffUL /* Bit 23.. 0: Max TXA Timer/Cnt Val */ + +/* TXA_CTRL 8 bit Tx Arbiter Control Register */ +enum { + TXA_ENA_FSYNC = 1<<7, /* Enable force of sync Tx queue */ + TXA_DIS_FSYNC = 1<<6, /* Disable force of sync Tx queue */ + TXA_ENA_ALLOC = 1<<5, /* Enable alloc of free bandwidth */ + TXA_DIS_ALLOC = 1<<4, /* Disable alloc of free bandwidth */ + TXA_START_RC = 1<<3, /* Start sync Rate Control */ + TXA_STOP_RC = 1<<2, /* Stop sync Rate Control */ + TXA_ENA_ARB = 1<<1, /* Enable Tx Arbiter */ + TXA_DIS_ARB = 1<<0, /* Disable Tx Arbiter */ +}; + +/* + * Bank 4 - 5 + */ +/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */ +enum { + TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/ + TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */ + TXA_LIM_INI = 0x0208,/* 32 bit Tx Arb Limit Counter Init Val */ + TXA_LIM_VAL = 0x020c,/* 32 bit Tx Arb Limit Counter Value */ + TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */ + TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */ + TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */ + + RSS_KEY = 0x0220, /* RSS Key setup */ + RSS_CFG = 0x0248, /* RSS Configuration */ +}; + +enum { + HASH_TCP_IPV6_EX_CTRL = 1<<5, + HASH_IPV6_EX_CTRL = 1<<4, + HASH_TCP_IPV6_CTRL = 1<<3, + HASH_IPV6_CTRL = 1<<2, + HASH_TCP_IPV4_CTRL = 1<<1, + HASH_IPV4_CTRL = 1<<0, + + HASH_ALL = 0x3f, +}; + +enum { + B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */ + B7_CFG_SPC = 0x0380,/* copy of the Configuration register */ + B8_RQ1_REGS = 0x0400,/* Receive Queue 1 */ + B8_RQ2_REGS = 0x0480,/* Receive Queue 2 */ + B8_TS1_REGS = 0x0600,/* Transmit sync queue 1 */ + B8_TA1_REGS = 0x0680,/* Transmit async queue 1 */ + B8_TS2_REGS = 0x0700,/* Transmit sync queue 2 */ + B8_TA2_REGS = 0x0780,/* Transmit sync queue 2 */ + B16_RAM_REGS = 0x0800,/* RAM Buffer Registers */ +}; + +/* Queue Register Offsets, use Q_ADDR() to access */ +enum { + B8_Q_REGS = 0x0400, /* base of Queue registers */ + Q_D = 0x00, /* 8*32 bit Current Descriptor */ + Q_VLAN = 0x20, /* 16 bit Current VLAN Tag */ + Q_DONE = 0x24, /* 16 bit Done Index */ + Q_AC_L = 0x28, /* 32 bit Current Address Counter Low dWord */ + Q_AC_H = 0x2c, /* 32 bit Current Address Counter High dWord */ + Q_BC = 0x30, /* 32 bit Current Byte Counter */ + Q_CSR = 0x34, /* 32 bit BMU Control/Status Register */ + Q_TEST = 0x38, /* 32 bit Test/Control Register */ + +/* Yukon-2 */ + Q_WM = 0x40, /* 16 bit FIFO Watermark */ + Q_AL = 0x42, /* 8 bit FIFO Alignment */ + Q_RSP = 0x44, /* 16 bit FIFO Read Shadow Pointer */ + Q_RSL = 0x46, /* 8 bit FIFO Read Shadow Level */ + Q_RP = 0x48, /* 8 bit FIFO Read Pointer */ + Q_RL = 0x4a, /* 8 bit FIFO Read Level */ + Q_WP = 0x4c, /* 8 bit FIFO Write Pointer */ + Q_WSP = 0x4d, /* 8 bit FIFO Write Shadow Pointer */ + Q_WL = 0x4e, /* 8 bit FIFO Write Level */ + Q_WSL = 0x4f, /* 8 bit FIFO Write Shadow Level */ +}; +#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs)) + +/* Q_TEST 32 bit Test Register */ +enum { + /* Transmit */ + F_TX_CHK_AUTO_OFF = 1<<31, /* Tx checksum auto calc off (Yukon EX) */ + F_TX_CHK_AUTO_ON = 1<<30, /* Tx checksum auto calc off (Yukon EX) */ + + /* Receive */ + F_M_RX_RAM_DIS = 1<<24, /* MAC Rx RAM Read Port disable */ + + /* Hardware testbits not used */ +}; + +/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/ +enum { + Y2_B8_PREF_REGS = 0x0450, + + PREF_UNIT_CTRL = 0x00, /* 32 bit Control register */ + PREF_UNIT_LAST_IDX = 0x04, /* 16 bit Last Index */ + PREF_UNIT_ADDR_LO = 0x08, /* 32 bit List start addr, low part */ + PREF_UNIT_ADDR_HI = 0x0c, /* 32 bit List start addr, high part*/ + PREF_UNIT_GET_IDX = 0x10, /* 16 bit Get Index */ + PREF_UNIT_PUT_IDX = 0x14, /* 16 bit Put Index */ + PREF_UNIT_FIFO_WP = 0x20, /* 8 bit FIFO write pointer */ + PREF_UNIT_FIFO_RP = 0x24, /* 8 bit FIFO read pointer */ + PREF_UNIT_FIFO_WM = 0x28, /* 8 bit FIFO watermark */ + PREF_UNIT_FIFO_LEV = 0x2c, /* 8 bit FIFO level */ + + PREF_UNIT_MASK_IDX = 0x0fff, +}; +#define Y2_QADDR(q,reg) (Y2_B8_PREF_REGS + (q) + (reg)) + +/* RAM Buffer Register Offsets */ +enum { + + RB_START = 0x00,/* 32 bit RAM Buffer Start Address */ + RB_END = 0x04,/* 32 bit RAM Buffer End Address */ + RB_WP = 0x08,/* 32 bit RAM Buffer Write Pointer */ + RB_RP = 0x0c,/* 32 bit RAM Buffer Read Pointer */ + RB_RX_UTPP = 0x10,/* 32 bit Rx Upper Threshold, Pause Packet */ + RB_RX_LTPP = 0x14,/* 32 bit Rx Lower Threshold, Pause Packet */ + RB_RX_UTHP = 0x18,/* 32 bit Rx Upper Threshold, High Prio */ + RB_RX_LTHP = 0x1c,/* 32 bit Rx Lower Threshold, High Prio */ + /* 0x10 - 0x1f: reserved at Tx RAM Buffer Registers */ + RB_PC = 0x20,/* 32 bit RAM Buffer Packet Counter */ + RB_LEV = 0x24,/* 32 bit RAM Buffer Level Register */ + RB_CTRL = 0x28,/* 32 bit RAM Buffer Control Register */ + RB_TST1 = 0x29,/* 8 bit RAM Buffer Test Register 1 */ + RB_TST2 = 0x2a,/* 8 bit RAM Buffer Test Register 2 */ +}; + +/* Receive and Transmit Queues */ +enum { + Q_R1 = 0x0000, /* Receive Queue 1 */ + Q_R2 = 0x0080, /* Receive Queue 2 */ + Q_XS1 = 0x0200, /* Synchronous Transmit Queue 1 */ + Q_XA1 = 0x0280, /* Asynchronous Transmit Queue 1 */ + Q_XS2 = 0x0300, /* Synchronous Transmit Queue 2 */ + Q_XA2 = 0x0380, /* Asynchronous Transmit Queue 2 */ +}; + +/* Different PHY Types */ +enum { + PHY_ADDR_MARV = 0, +}; + +#define RB_ADDR(offs, queue) ((u16) B16_RAM_REGS + (queue) + (offs)) + + +enum { + LNK_SYNC_INI = 0x0c30,/* 32 bit Link Sync Cnt Init Value */ + LNK_SYNC_VAL = 0x0c34,/* 32 bit Link Sync Cnt Current Value */ + LNK_SYNC_CTRL = 0x0c38,/* 8 bit Link Sync Cnt Control Register */ + LNK_SYNC_TST = 0x0c39,/* 8 bit Link Sync Cnt Test Register */ + + LNK_LED_REG = 0x0c3c,/* 8 bit Link LED Register */ + +/* Receive GMAC FIFO (YUKON and Yukon-2) */ + + RX_GMF_EA = 0x0c40,/* 32 bit Rx GMAC FIFO End Address */ + RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */ + RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */ + RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */ + RX_GMF_FL_THR = 0x0c50,/* 16 bit Rx GMAC FIFO Flush Threshold */ + RX_GMF_FL_CTRL = 0x0c52,/* 16 bit Rx GMAC FIFO Flush Control */ + RX_GMF_TR_THR = 0x0c54,/* 32 bit Rx Truncation Threshold (Yukon-2) */ + RX_GMF_UP_THR = 0x0c58,/* 16 bit Rx Upper Pause Thr (Yukon-EC_U) */ + RX_GMF_LP_THR = 0x0c5a,/* 16 bit Rx Lower Pause Thr (Yukon-EC_U) */ + RX_GMF_VLAN = 0x0c5c,/* 32 bit Rx VLAN Type Register (Yukon-2) */ + RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */ + + RX_GMF_WLEV = 0x0c68,/* 32 bit Rx GMAC FIFO Write Level */ + + RX_GMF_RP = 0x0c70,/* 32 bit Rx GMAC FIFO Read Pointer */ + + RX_GMF_RLEV = 0x0c78,/* 32 bit Rx GMAC FIFO Read Level */ +}; + + +/* Q_BC 32 bit Current Byte Counter */ + +/* BMU Control Status Registers */ +/* B0_R1_CSR 32 bit BMU Ctrl/Stat Rx Queue 1 */ +/* B0_R2_CSR 32 bit BMU Ctrl/Stat Rx Queue 2 */ +/* B0_XA1_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */ +/* B0_XS1_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 1 */ +/* B0_XA2_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */ +/* B0_XS2_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 2 */ +/* Q_CSR 32 bit BMU Control/Status Register */ + +/* Rx BMU Control / Status Registers (Yukon-2) */ +enum { + BMU_IDLE = 1<<31, /* BMU Idle State */ + BMU_RX_TCP_PKT = 1<<30, /* Rx TCP Packet (when RSS Hash enabled) */ + BMU_RX_IP_PKT = 1<<29, /* Rx IP Packet (when RSS Hash enabled) */ + + BMU_ENA_RX_RSS_HASH = 1<<15, /* Enable Rx RSS Hash */ + BMU_DIS_RX_RSS_HASH = 1<<14, /* Disable Rx RSS Hash */ + BMU_ENA_RX_CHKSUM = 1<<13, /* Enable Rx TCP/IP Checksum Check */ + BMU_DIS_RX_CHKSUM = 1<<12, /* Disable Rx TCP/IP Checksum Check */ + BMU_CLR_IRQ_PAR = 1<<11, /* Clear IRQ on Parity errors (Rx) */ + BMU_CLR_IRQ_TCP = 1<<11, /* Clear IRQ on TCP segment. error (Tx) */ + BMU_CLR_IRQ_CHK = 1<<10, /* Clear IRQ Check */ + BMU_STOP = 1<<9, /* Stop Rx/Tx Queue */ + BMU_START = 1<<8, /* Start Rx/Tx Queue */ + BMU_FIFO_OP_ON = 1<<7, /* FIFO Operational On */ + BMU_FIFO_OP_OFF = 1<<6, /* FIFO Operational Off */ + BMU_FIFO_ENA = 1<<5, /* Enable FIFO */ + BMU_FIFO_RST = 1<<4, /* Reset FIFO */ + BMU_OP_ON = 1<<3, /* BMU Operational On */ + BMU_OP_OFF = 1<<2, /* BMU Operational Off */ + BMU_RST_CLR = 1<<1, /* Clear BMU Reset (Enable) */ + BMU_RST_SET = 1<<0, /* Set BMU Reset */ + + BMU_CLR_RESET = BMU_FIFO_RST | BMU_OP_OFF | BMU_RST_CLR, + BMU_OPER_INIT = BMU_CLR_IRQ_PAR | BMU_CLR_IRQ_CHK | BMU_START | + BMU_FIFO_ENA | BMU_OP_ON, + + BMU_WM_DEFAULT = 0x600, + BMU_WM_PEX = 0x80, +}; + +/* Tx BMU Control / Status Registers (Yukon-2) */ + /* Bit 31: same as for Rx */ +enum { + BMU_TX_IPIDINCR_ON = 1<<13, /* Enable IP ID Increment */ + BMU_TX_IPIDINCR_OFF = 1<<12, /* Disable IP ID Increment */ + BMU_TX_CLR_IRQ_TCP = 1<<11, /* Clear IRQ on TCP segment length mismatch */ +}; + +/* TBMU_TEST 0x06B8 Transmit BMU Test Register */ +enum { + TBMU_TEST_BMU_TX_CHK_AUTO_OFF = 1<<31, /* BMU Tx Checksum Auto Calculation Disable */ + TBMU_TEST_BMU_TX_CHK_AUTO_ON = 1<<30, /* BMU Tx Checksum Auto Calculation Enable */ + TBMU_TEST_HOME_ADD_PAD_FIX1_EN = 1<<29, /* Home Address Paddiing FIX1 Enable */ + TBMU_TEST_HOME_ADD_PAD_FIX1_DIS = 1<<28, /* Home Address Paddiing FIX1 Disable */ + TBMU_TEST_ROUTING_ADD_FIX_EN = 1<<27, /* Routing Address Fix Enable */ + TBMU_TEST_ROUTING_ADD_FIX_DIS = 1<<26, /* Routing Address Fix Disable */ + TBMU_TEST_HOME_ADD_FIX_EN = 1<<25, /* Home address checksum fix enable */ + TBMU_TEST_HOME_ADD_FIX_DIS = 1<<24, /* Home address checksum fix disable */ + + TBMU_TEST_TEST_RSPTR_ON = 1<<22, /* Testmode Shadow Read Ptr On */ + TBMU_TEST_TEST_RSPTR_OFF = 1<<21, /* Testmode Shadow Read Ptr Off */ + TBMU_TEST_TESTSTEP_RSPTR = 1<<20, /* Teststep Shadow Read Ptr */ + + TBMU_TEST_TEST_RPTR_ON = 1<<18, /* Testmode Read Ptr On */ + TBMU_TEST_TEST_RPTR_OFF = 1<<17, /* Testmode Read Ptr Off */ + TBMU_TEST_TESTSTEP_RPTR = 1<<16, /* Teststep Read Ptr */ + + TBMU_TEST_TEST_WSPTR_ON = 1<<14, /* Testmode Shadow Write Ptr On */ + TBMU_TEST_TEST_WSPTR_OFF = 1<<13, /* Testmode Shadow Write Ptr Off */ + TBMU_TEST_TESTSTEP_WSPTR = 1<<12, /* Teststep Shadow Write Ptr */ + + TBMU_TEST_TEST_WPTR_ON = 1<<10, /* Testmode Write Ptr On */ + TBMU_TEST_TEST_WPTR_OFF = 1<<9, /* Testmode Write Ptr Off */ + TBMU_TEST_TESTSTEP_WPTR = 1<<8, /* Teststep Write Ptr */ + + TBMU_TEST_TEST_REQ_NB_ON = 1<<6, /* Testmode Req Nbytes/Addr On */ + TBMU_TEST_TEST_REQ_NB_OFF = 1<<5, /* Testmode Req Nbytes/Addr Off */ + TBMU_TEST_TESTSTEP_REQ_NB = 1<<4, /* Teststep Req Nbytes/Addr */ + + TBMU_TEST_TEST_DONE_IDX_ON = 1<<2, /* Testmode Done Index On */ + TBMU_TEST_TEST_DONE_IDX_OFF = 1<<1, /* Testmode Done Index Off */ + TBMU_TEST_TESTSTEP_DONE_IDX = 1<<0, /* Teststep Done Index */ +}; + +/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/ +/* PREF_UNIT_CTRL 32 bit Prefetch Control register */ +enum { + PREF_UNIT_OP_ON = 1<<3, /* prefetch unit operational */ + PREF_UNIT_OP_OFF = 1<<2, /* prefetch unit not operational */ + PREF_UNIT_RST_CLR = 1<<1, /* Clear Prefetch Unit Reset */ + PREF_UNIT_RST_SET = 1<<0, /* Set Prefetch Unit Reset */ +}; + +/* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */ +/* RB_START 32 bit RAM Buffer Start Address */ +/* RB_END 32 bit RAM Buffer End Address */ +/* RB_WP 32 bit RAM Buffer Write Pointer */ +/* RB_RP 32 bit RAM Buffer Read Pointer */ +/* RB_RX_UTPP 32 bit Rx Upper Threshold, Pause Pack */ +/* RB_RX_LTPP 32 bit Rx Lower Threshold, Pause Pack */ +/* RB_RX_UTHP 32 bit Rx Upper Threshold, High Prio */ +/* RB_RX_LTHP 32 bit Rx Lower Threshold, High Prio */ +/* RB_PC 32 bit RAM Buffer Packet Counter */ +/* RB_LEV 32 bit RAM Buffer Level Register */ + +#define RB_MSK 0x0007ffff /* Bit 18.. 0: RAM Buffer Pointer Bits */ +/* RB_TST2 8 bit RAM Buffer Test Register 2 */ +/* RB_TST1 8 bit RAM Buffer Test Register 1 */ + +/* RB_CTRL 8 bit RAM Buffer Control Register */ +enum { + RB_ENA_STFWD = 1<<5, /* Enable Store & Forward */ + RB_DIS_STFWD = 1<<4, /* Disable Store & Forward */ + RB_ENA_OP_MD = 1<<3, /* Enable Operation Mode */ + RB_DIS_OP_MD = 1<<2, /* Disable Operation Mode */ + RB_RST_CLR = 1<<1, /* Clear RAM Buf STM Reset */ + RB_RST_SET = 1<<0, /* Set RAM Buf STM Reset */ +}; + + +/* Transmit GMAC FIFO (YUKON only) */ +enum { + TX_GMF_EA = 0x0d40,/* 32 bit Tx GMAC FIFO End Address */ + TX_GMF_AE_THR = 0x0d44,/* 32 bit Tx GMAC FIFO Almost Empty Thresh.*/ + TX_GMF_CTRL_T = 0x0d48,/* 32 bit Tx GMAC FIFO Control/Test */ + + TX_GMF_WP = 0x0d60,/* 32 bit Tx GMAC FIFO Write Pointer */ + TX_GMF_WSP = 0x0d64,/* 32 bit Tx GMAC FIFO Write Shadow Ptr. */ + TX_GMF_WLEV = 0x0d68,/* 32 bit Tx GMAC FIFO Write Level */ + + TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */ + TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */ + TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */ + + /* Threshold values for Yukon-EC Ultra and Extreme */ + ECU_AE_THR = 0x0070, /* Almost Empty Threshold */ + ECU_TXFF_LEV = 0x01a0, /* Tx BMU FIFO Level */ + ECU_JUMBO_WM = 0x0080, /* Jumbo Mode Watermark */ +}; + +/* Descriptor Poll Timer Registers */ +enum { + B28_DPT_INI = 0x0e00,/* 24 bit Descriptor Poll Timer Init Val */ + B28_DPT_VAL = 0x0e04,/* 24 bit Descriptor Poll Timer Curr Val */ + B28_DPT_CTRL = 0x0e08,/* 8 bit Descriptor Poll Timer Ctrl Reg */ + + B28_DPT_TST = 0x0e0a,/* 8 bit Descriptor Poll Timer Test Reg */ +}; + +/* Time Stamp Timer Registers (YUKON only) */ +enum { + GMAC_TI_ST_VAL = 0x0e14,/* 32 bit Time Stamp Timer Curr Val */ + GMAC_TI_ST_CTRL = 0x0e18,/* 8 bit Time Stamp Timer Ctrl Reg */ + GMAC_TI_ST_TST = 0x0e1a,/* 8 bit Time Stamp Timer Test Reg */ +}; + +/* Polling Unit Registers (Yukon-2 only) */ +enum { + POLL_CTRL = 0x0e20, /* 32 bit Polling Unit Control Reg */ + POLL_LAST_IDX = 0x0e24,/* 16 bit Polling Unit List Last Index */ + + POLL_LIST_ADDR_LO= 0x0e28,/* 32 bit Poll. List Start Addr (low) */ + POLL_LIST_ADDR_HI= 0x0e2c,/* 32 bit Poll. List Start Addr (high) */ +}; + +enum { + SMB_CFG = 0x0e40, /* 32 bit SMBus Config Register */ + SMB_CSR = 0x0e44, /* 32 bit SMBus Control/Status Register */ +}; + +enum { + CPU_WDOG = 0x0e48, /* 32 bit Watchdog Register */ + CPU_CNTR = 0x0e4C, /* 32 bit Counter Register */ + CPU_TIM = 0x0e50,/* 32 bit Timer Compare Register */ + CPU_AHB_ADDR = 0x0e54, /* 32 bit CPU AHB Debug Register */ + CPU_AHB_WDATA = 0x0e58, /* 32 bit CPU AHB Debug Register */ + CPU_AHB_RDATA = 0x0e5C, /* 32 bit CPU AHB Debug Register */ + HCU_MAP_BASE = 0x0e60, /* 32 bit Reset Mapping Base */ + CPU_AHB_CTRL = 0x0e64, /* 32 bit CPU AHB Debug Register */ + HCU_CCSR = 0x0e68, /* 32 bit CPU Control and Status Register */ + HCU_HCSR = 0x0e6C, /* 32 bit Host Control and Status Register */ +}; + +/* ASF Subsystem Registers (Yukon-2 only) */ +enum { + B28_Y2_SMB_CONFIG = 0x0e40,/* 32 bit ASF SMBus Config Register */ + B28_Y2_SMB_CSD_REG = 0x0e44,/* 32 bit ASF SMB Control/Status/Data */ + B28_Y2_ASF_IRQ_V_BASE=0x0e60,/* 32 bit ASF IRQ Vector Base */ + + B28_Y2_ASF_STAT_CMD= 0x0e68,/* 32 bit ASF Status and Command Reg */ + B28_Y2_ASF_HOST_COM= 0x0e6c,/* 32 bit ASF Host Communication Reg */ + B28_Y2_DATA_REG_1 = 0x0e70,/* 32 bit ASF/Host Data Register 1 */ + B28_Y2_DATA_REG_2 = 0x0e74,/* 32 bit ASF/Host Data Register 2 */ + B28_Y2_DATA_REG_3 = 0x0e78,/* 32 bit ASF/Host Data Register 3 */ + B28_Y2_DATA_REG_4 = 0x0e7c,/* 32 bit ASF/Host Data Register 4 */ +}; + +/* Status BMU Registers (Yukon-2 only)*/ +enum { + STAT_CTRL = 0x0e80,/* 32 bit Status BMU Control Reg */ + STAT_LAST_IDX = 0x0e84,/* 16 bit Status BMU Last Index */ + + STAT_LIST_ADDR_LO= 0x0e88,/* 32 bit Status List Start Addr (low) */ + STAT_LIST_ADDR_HI= 0x0e8c,/* 32 bit Status List Start Addr (high) */ + STAT_TXA1_RIDX = 0x0e90,/* 16 bit Status TxA1 Report Index Reg */ + STAT_TXS1_RIDX = 0x0e92,/* 16 bit Status TxS1 Report Index Reg */ + STAT_TXA2_RIDX = 0x0e94,/* 16 bit Status TxA2 Report Index Reg */ + STAT_TXS2_RIDX = 0x0e96,/* 16 bit Status TxS2 Report Index Reg */ + STAT_TX_IDX_TH = 0x0e98,/* 16 bit Status Tx Index Threshold Reg */ + STAT_PUT_IDX = 0x0e9c,/* 16 bit Status Put Index Reg */ + +/* FIFO Control/Status Registers (Yukon-2 only)*/ + STAT_FIFO_WP = 0x0ea0,/* 8 bit Status FIFO Write Pointer Reg */ + STAT_FIFO_RP = 0x0ea4,/* 8 bit Status FIFO Read Pointer Reg */ + STAT_FIFO_RSP = 0x0ea6,/* 8 bit Status FIFO Read Shadow Ptr */ + STAT_FIFO_LEVEL = 0x0ea8,/* 8 bit Status FIFO Level Reg */ + STAT_FIFO_SHLVL = 0x0eaa,/* 8 bit Status FIFO Shadow Level Reg */ + STAT_FIFO_WM = 0x0eac,/* 8 bit Status FIFO Watermark Reg */ + STAT_FIFO_ISR_WM= 0x0ead,/* 8 bit Status FIFO ISR Watermark Reg */ + +/* Level and ISR Timer Registers (Yukon-2 only)*/ + STAT_LEV_TIMER_INI= 0x0eb0,/* 32 bit Level Timer Init. Value Reg */ + STAT_LEV_TIMER_CNT= 0x0eb4,/* 32 bit Level Timer Counter Reg */ + STAT_LEV_TIMER_CTRL= 0x0eb8,/* 8 bit Level Timer Control Reg */ + STAT_LEV_TIMER_TEST= 0x0eb9,/* 8 bit Level Timer Test Reg */ + STAT_TX_TIMER_INI = 0x0ec0,/* 32 bit Tx Timer Init. Value Reg */ + STAT_TX_TIMER_CNT = 0x0ec4,/* 32 bit Tx Timer Counter Reg */ + STAT_TX_TIMER_CTRL = 0x0ec8,/* 8 bit Tx Timer Control Reg */ + STAT_TX_TIMER_TEST = 0x0ec9,/* 8 bit Tx Timer Test Reg */ + STAT_ISR_TIMER_INI = 0x0ed0,/* 32 bit ISR Timer Init. Value Reg */ + STAT_ISR_TIMER_CNT = 0x0ed4,/* 32 bit ISR Timer Counter Reg */ + STAT_ISR_TIMER_CTRL= 0x0ed8,/* 8 bit ISR Timer Control Reg */ + STAT_ISR_TIMER_TEST= 0x0ed9,/* 8 bit ISR Timer Test Reg */ +}; + +enum { + LINKLED_OFF = 0x01, + LINKLED_ON = 0x02, + LINKLED_LINKSYNC_OFF = 0x04, + LINKLED_LINKSYNC_ON = 0x08, + LINKLED_BLINK_OFF = 0x10, + LINKLED_BLINK_ON = 0x20, +}; + +/* GMAC and GPHY Control Registers (YUKON only) */ +enum { + GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */ + GPHY_CTRL = 0x0f04,/* 32 bit GPHY Control Reg */ + GMAC_IRQ_SRC = 0x0f08,/* 8 bit GMAC Interrupt Source Reg */ + GMAC_IRQ_MSK = 0x0f0c,/* 8 bit GMAC Interrupt Mask Reg */ + GMAC_LINK_CTRL = 0x0f10,/* 16 bit Link Control Reg */ + +/* Wake-up Frame Pattern Match Control Registers (YUKON only) */ + WOL_CTRL_STAT = 0x0f20,/* 16 bit WOL Control/Status Reg */ + WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */ + WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */ + WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */ + WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */ + +/* WOL Pattern Length Registers (YUKON only) */ + WOL_PATT_LEN_LO = 0x0f30,/* 32 bit WOL Pattern Length 3..0 */ + WOL_PATT_LEN_HI = 0x0f34,/* 24 bit WOL Pattern Length 6..4 */ + +/* WOL Pattern Counter Registers (YUKON only) */ + WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */ + WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */ +}; +#define WOL_REGS(port, x) (x + (port)*0x80) + +enum { + WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */ + WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */ +}; +#define WOL_PATT_RAM_BASE(port) (WOL_PATT_RAM_1 + (port)*0x400) + +enum { + BASE_GMAC_1 = 0x2800,/* GMAC 1 registers */ + BASE_GMAC_2 = 0x3800,/* GMAC 2 registers */ +}; + +/* + * Marvel-PHY Registers, indirect addressed over GMAC + */ +enum { + PHY_MARV_CTRL = 0x00,/* 16 bit r/w PHY Control Register */ + PHY_MARV_STAT = 0x01,/* 16 bit r/o PHY Status Register */ + PHY_MARV_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */ + PHY_MARV_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */ + PHY_MARV_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */ + PHY_MARV_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */ + PHY_MARV_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */ + PHY_MARV_NEPG = 0x07,/* 16 bit r/w Next Page Register */ + PHY_MARV_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */ + /* Marvel-specific registers */ + PHY_MARV_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */ + PHY_MARV_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */ + PHY_MARV_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */ + PHY_MARV_PHY_CTRL = 0x10,/* 16 bit r/w PHY Specific Ctrl Reg */ + PHY_MARV_PHY_STAT = 0x11,/* 16 bit r/o PHY Specific Stat Reg */ + PHY_MARV_INT_MASK = 0x12,/* 16 bit r/w Interrupt Mask Reg */ + PHY_MARV_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */ + PHY_MARV_EXT_CTRL = 0x14,/* 16 bit r/w Ext. PHY Specific Ctrl */ + PHY_MARV_RXE_CNT = 0x15,/* 16 bit r/w Receive Error Counter */ + PHY_MARV_EXT_ADR = 0x16,/* 16 bit r/w Ext. Ad. for Cable Diag. */ + PHY_MARV_PORT_IRQ = 0x17,/* 16 bit r/o Port 0 IRQ (88E1111 only) */ + PHY_MARV_LED_CTRL = 0x18,/* 16 bit r/w LED Control Reg */ + PHY_MARV_LED_OVER = 0x19,/* 16 bit r/w Manual LED Override Reg */ + PHY_MARV_EXT_CTRL_2 = 0x1a,/* 16 bit r/w Ext. PHY Specific Ctrl 2 */ + PHY_MARV_EXT_P_STAT = 0x1b,/* 16 bit r/w Ext. PHY Spec. Stat Reg */ + PHY_MARV_CABLE_DIAG = 0x1c,/* 16 bit r/o Cable Diagnostic Reg */ + PHY_MARV_PAGE_ADDR = 0x1d,/* 16 bit r/w Extended Page Address Reg */ + PHY_MARV_PAGE_DATA = 0x1e,/* 16 bit r/w Extended Page Data Reg */ + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ + PHY_MARV_FE_LED_PAR = 0x16,/* 16 bit r/w LED Parallel Select Reg. */ + PHY_MARV_FE_LED_SER = 0x17,/* 16 bit r/w LED Stream Select S. LED */ + PHY_MARV_FE_VCT_TX = 0x1a,/* 16 bit r/w VCT Reg. for TXP/N Pins */ + PHY_MARV_FE_VCT_RX = 0x1b,/* 16 bit r/o VCT Reg. for RXP/N Pins */ + PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */ +}; + +enum { + PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */ + PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */ + PHY_CT_SPS_LSB = 1<<13, /* Bit 13: Speed select, lower bit */ + PHY_CT_ANE = 1<<12, /* Bit 12: Auto-Negotiation Enabled */ + PHY_CT_PDOWN = 1<<11, /* Bit 11: Power Down Mode */ + PHY_CT_ISOL = 1<<10, /* Bit 10: Isolate Mode */ + PHY_CT_RE_CFG = 1<<9, /* Bit 9: (sc) Restart Auto-Negotiation */ + PHY_CT_DUP_MD = 1<<8, /* Bit 8: Duplex Mode */ + PHY_CT_COL_TST = 1<<7, /* Bit 7: Collision Test enabled */ + PHY_CT_SPS_MSB = 1<<6, /* Bit 6: Speed select, upper bit */ +}; + +enum { + PHY_CT_SP1000 = PHY_CT_SPS_MSB, /* enable speed of 1000 Mbps */ + PHY_CT_SP100 = PHY_CT_SPS_LSB, /* enable speed of 100 Mbps */ + PHY_CT_SP10 = 0, /* enable speed of 10 Mbps */ +}; + +enum { + PHY_ST_EXT_ST = 1<<8, /* Bit 8: Extended Status Present */ + + PHY_ST_PRE_SUP = 1<<6, /* Bit 6: Preamble Suppression */ + PHY_ST_AN_OVER = 1<<5, /* Bit 5: Auto-Negotiation Over */ + PHY_ST_REM_FLT = 1<<4, /* Bit 4: Remote Fault Condition Occurred */ + PHY_ST_AN_CAP = 1<<3, /* Bit 3: Auto-Negotiation Capability */ + PHY_ST_LSYNC = 1<<2, /* Bit 2: Link Synchronized */ + PHY_ST_JAB_DET = 1<<1, /* Bit 1: Jabber Detected */ + PHY_ST_EXT_REG = 1<<0, /* Bit 0: Extended Register available */ +}; + +enum { + PHY_I1_OUI_MSK = 0x3f<<10, /* Bit 15..10: Organization Unique ID */ + PHY_I1_MOD_NUM = 0x3f<<4, /* Bit 9.. 4: Model Number */ + PHY_I1_REV_MSK = 0xf, /* Bit 3.. 0: Revision Number */ +}; + +/* different Marvell PHY Ids */ +enum { + PHY_MARV_ID0_VAL= 0x0141, /* Marvell Unique Identifier */ + + PHY_BCOM_ID1_A1 = 0x6041, + PHY_BCOM_ID1_B2 = 0x6043, + PHY_BCOM_ID1_C0 = 0x6044, + PHY_BCOM_ID1_C5 = 0x6047, + + PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */ + PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */ + PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */ + PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */ + PHY_MARV_ID1_FE = 0x0C83, /* Yukon-FE (PHY 88E3082 Rev.A1) */ + PHY_MARV_ID1_ECU= 0x0CB0, /* Yukon-ECU (PHY 88E1149 Rev.B2?) */ +}; + +/* Advertisement register bits */ +enum { + PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */ + PHY_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */ + PHY_AN_RF = 1<<13, /* Bit 13: Remote Fault Bits */ + + PHY_AN_PAUSE_ASYM = 1<<11,/* Bit 11: Try for asymmetric */ + PHY_AN_PAUSE_CAP = 1<<10, /* Bit 10: Try for pause */ + PHY_AN_100BASE4 = 1<<9, /* Bit 9: Try for 100mbps 4k packets */ + PHY_AN_100FULL = 1<<8, /* Bit 8: Try for 100mbps full-duplex */ + PHY_AN_100HALF = 1<<7, /* Bit 7: Try for 100mbps half-duplex */ + PHY_AN_10FULL = 1<<6, /* Bit 6: Try for 10mbps full-duplex */ + PHY_AN_10HALF = 1<<5, /* Bit 5: Try for 10mbps half-duplex */ + PHY_AN_CSMA = 1<<0, /* Bit 0: Only selector supported */ + PHY_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/ + PHY_AN_FULL = PHY_AN_100FULL | PHY_AN_10FULL | PHY_AN_CSMA, + PHY_AN_ALL = PHY_AN_10HALF | PHY_AN_10FULL | + PHY_AN_100HALF | PHY_AN_100FULL, +}; + +/***** PHY_BCOM_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ +/***** PHY_MARV_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ +enum { + PHY_B_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */ + PHY_B_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */ + PHY_B_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */ + PHY_B_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status */ + PHY_B_1000S_LP_FD = 1<<11, /* Bit 11: Link Partner can FD */ + PHY_B_1000S_LP_HD = 1<<10, /* Bit 10: Link Partner can HD */ + /* Bit 9..8: reserved */ + PHY_B_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */ +}; + +/** Marvell-Specific */ +enum { + PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */ + PHY_M_AN_ACK = 1<<14, /* (ro) Acknowledge Received */ + PHY_M_AN_RF = 1<<13, /* Remote Fault */ + + PHY_M_AN_ASP = 1<<11, /* Asymmetric Pause */ + PHY_M_AN_PC = 1<<10, /* MAC Pause implemented */ + PHY_M_AN_100_T4 = 1<<9, /* Not cap. 100Base-T4 (always 0) */ + PHY_M_AN_100_FD = 1<<8, /* Advertise 100Base-TX Full Duplex */ + PHY_M_AN_100_HD = 1<<7, /* Advertise 100Base-TX Half Duplex */ + PHY_M_AN_10_FD = 1<<6, /* Advertise 10Base-TX Full Duplex */ + PHY_M_AN_10_HD = 1<<5, /* Advertise 10Base-TX Half Duplex */ + PHY_M_AN_SEL_MSK =0x1f<<4, /* Bit 4.. 0: Selector Field Mask */ +}; + +/* special defines for FIBER (88E1011S only) */ +enum { + PHY_M_AN_ASP_X = 1<<8, /* Asymmetric Pause */ + PHY_M_AN_PC_X = 1<<7, /* MAC Pause implemented */ + PHY_M_AN_1000X_AHD = 1<<6, /* Advertise 10000Base-X Half Duplex */ + PHY_M_AN_1000X_AFD = 1<<5, /* Advertise 10000Base-X Full Duplex */ +}; + +/* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */ +enum { + PHY_M_P_NO_PAUSE_X = 0<<7,/* Bit 8.. 7: no Pause Mode */ + PHY_M_P_SYM_MD_X = 1<<7, /* Bit 8.. 7: symmetric Pause Mode */ + PHY_M_P_ASYM_MD_X = 2<<7,/* Bit 8.. 7: asymmetric Pause Mode */ + PHY_M_P_BOTH_MD_X = 3<<7,/* Bit 8.. 7: both Pause Mode */ +}; + +/***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ +enum { + PHY_M_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */ + PHY_M_1000C_MSE = 1<<12, /* Manual Master/Slave Enable */ + PHY_M_1000C_MSC = 1<<11, /* M/S Configuration (1=Master) */ + PHY_M_1000C_MPD = 1<<10, /* Multi-Port Device */ + PHY_M_1000C_AFD = 1<<9, /* Advertise Full Duplex */ + PHY_M_1000C_AHD = 1<<8, /* Advertise Half Duplex */ +}; + +/***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/ +enum { + PHY_M_PC_TX_FFD_MSK = 3<<14,/* Bit 15..14: Tx FIFO Depth Mask */ + PHY_M_PC_RX_FFD_MSK = 3<<12,/* Bit 13..12: Rx FIFO Depth Mask */ + PHY_M_PC_ASS_CRS_TX = 1<<11, /* Assert CRS on Transmit */ + PHY_M_PC_FL_GOOD = 1<<10, /* Force Link Good */ + PHY_M_PC_EN_DET_MSK = 3<<8,/* Bit 9.. 8: Energy Detect Mask */ + PHY_M_PC_ENA_EXT_D = 1<<7, /* Enable Ext. Distance (10BT) */ + PHY_M_PC_MDIX_MSK = 3<<5,/* Bit 6.. 5: MDI/MDIX Config. Mask */ + PHY_M_PC_DIS_125CLK = 1<<4, /* Disable 125 CLK */ + PHY_M_PC_MAC_POW_UP = 1<<3, /* MAC Power up */ + PHY_M_PC_SQE_T_ENA = 1<<2, /* SQE Test Enabled */ + PHY_M_PC_POL_R_DIS = 1<<1, /* Polarity Reversal Disabled */ + PHY_M_PC_DIS_JABBER = 1<<0, /* Disable Jabber */ +}; + +enum { + PHY_M_PC_EN_DET = 2<<8, /* Energy Detect (Mode 1) */ + PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */ +}; + +#define PHY_M_PC_MDI_XMODE(x) (((u16)(x)<<5) & PHY_M_PC_MDIX_MSK) + +enum { + PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */ + PHY_M_PC_MAN_MDIX = 1, /* 01 = Manual MDIX configuration */ + PHY_M_PC_ENA_AUTO = 3, /* 11 = Enable Automatic Crossover */ +}; + +/* for Yukon-EC Ultra Gigabit Ethernet PHY (88E1149 only) */ +enum { + PHY_M_PC_COP_TX_DIS = 1<<3, /* Copper Transmitter Disable */ + PHY_M_PC_POW_D_ENA = 1<<2, /* Power Down Enable */ +}; + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ +enum { + PHY_M_PC_ENA_DTE_DT = 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */ + PHY_M_PC_ENA_ENE_DT = 1<<14, /* Enable Energy Detect (sense & pulse) */ + PHY_M_PC_DIS_NLP_CK = 1<<13, /* Disable Normal Link Puls (NLP) Check */ + PHY_M_PC_ENA_LIP_NP = 1<<12, /* Enable Link Partner Next Page Reg. */ + PHY_M_PC_DIS_NLP_GN = 1<<11, /* Disable Normal Link Puls Generation */ + + PHY_M_PC_DIS_SCRAMB = 1<<9, /* Disable Scrambler */ + PHY_M_PC_DIS_FEFI = 1<<8, /* Disable Far End Fault Indic. (FEFI) */ + + PHY_M_PC_SH_TP_SEL = 1<<6, /* Shielded Twisted Pair Select */ + PHY_M_PC_RX_FD_MSK = 3<<2,/* Bit 3.. 2: Rx FIFO Depth Mask */ +}; + +/***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/ +enum { + PHY_M_PS_SPEED_MSK = 3<<14, /* Bit 15..14: Speed Mask */ + PHY_M_PS_SPEED_1000 = 1<<15, /* 10 = 1000 Mbps */ + PHY_M_PS_SPEED_100 = 1<<14, /* 01 = 100 Mbps */ + PHY_M_PS_SPEED_10 = 0, /* 00 = 10 Mbps */ + PHY_M_PS_FULL_DUP = 1<<13, /* Full Duplex */ + PHY_M_PS_PAGE_REC = 1<<12, /* Page Received */ + PHY_M_PS_SPDUP_RES = 1<<11, /* Speed & Duplex Resolved */ + PHY_M_PS_LINK_UP = 1<<10, /* Link Up */ + PHY_M_PS_CABLE_MSK = 7<<7, /* Bit 9.. 7: Cable Length Mask */ + PHY_M_PS_MDI_X_STAT = 1<<6, /* MDI Crossover Stat (1=MDIX) */ + PHY_M_PS_DOWNS_STAT = 1<<5, /* Downshift Status (1=downsh.) */ + PHY_M_PS_ENDET_STAT = 1<<4, /* Energy Detect Status (1=act) */ + PHY_M_PS_TX_P_EN = 1<<3, /* Tx Pause Enabled */ + PHY_M_PS_RX_P_EN = 1<<2, /* Rx Pause Enabled */ + PHY_M_PS_POL_REV = 1<<1, /* Polarity Reversed */ + PHY_M_PS_JABBER = 1<<0, /* Jabber */ +}; + +#define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN) + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ +enum { + PHY_M_PS_DTE_DETECT = 1<<15, /* Data Terminal Equipment (DTE) Detected */ + PHY_M_PS_RES_SPEED = 1<<14, /* Resolved Speed (1=100 Mbps, 0=10 Mbps */ +}; + +enum { + PHY_M_IS_AN_ERROR = 1<<15, /* Auto-Negotiation Error */ + PHY_M_IS_LSP_CHANGE = 1<<14, /* Link Speed Changed */ + PHY_M_IS_DUP_CHANGE = 1<<13, /* Duplex Mode Changed */ + PHY_M_IS_AN_PR = 1<<12, /* Page Received */ + PHY_M_IS_AN_COMPL = 1<<11, /* Auto-Negotiation Completed */ + PHY_M_IS_LST_CHANGE = 1<<10, /* Link Status Changed */ + PHY_M_IS_SYMB_ERROR = 1<<9, /* Symbol Error */ + PHY_M_IS_FALSE_CARR = 1<<8, /* False Carrier */ + PHY_M_IS_FIFO_ERROR = 1<<7, /* FIFO Overflow/Underrun Error */ + PHY_M_IS_MDI_CHANGE = 1<<6, /* MDI Crossover Changed */ + PHY_M_IS_DOWNSH_DET = 1<<5, /* Downshift Detected */ + PHY_M_IS_END_CHANGE = 1<<4, /* Energy Detect Changed */ + + PHY_M_IS_DTE_CHANGE = 1<<2, /* DTE Power Det. Status Changed */ + PHY_M_IS_POL_CHANGE = 1<<1, /* Polarity Changed */ + PHY_M_IS_JABBER = 1<<0, /* Jabber */ + + PHY_M_DEF_MSK = PHY_M_IS_LSP_CHANGE | PHY_M_IS_LST_CHANGE + | PHY_M_IS_DUP_CHANGE, + PHY_M_AN_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_AN_COMPL, +}; + + +/***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/ +enum { + PHY_M_EC_ENA_BC_EXT = 1<<15, /* Enable Block Carr. Ext. (88E1111 only) */ + PHY_M_EC_ENA_LIN_LB = 1<<14, /* Enable Line Loopback (88E1111 only) */ + + PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */ + PHY_M_EC_M_DSC_MSK = 3<<10, /* Bit 11..10: Master Downshift Counter */ + /* (88E1011 only) */ + PHY_M_EC_S_DSC_MSK = 3<<8,/* Bit 9.. 8: Slave Downshift Counter */ + /* (88E1011 only) */ + PHY_M_EC_M_DSC_MSK2 = 7<<9,/* Bit 11.. 9: Master Downshift Counter */ + /* (88E1111 only) */ + PHY_M_EC_DOWN_S_ENA = 1<<8, /* Downshift Enable (88E1111 only) */ + /* !!! Errata in spec. (1 = disable) */ + PHY_M_EC_RX_TIM_CT = 1<<7, /* RGMII Rx Timing Control*/ + PHY_M_EC_MAC_S_MSK = 7<<4,/* Bit 6.. 4: Def. MAC interface speed */ + PHY_M_EC_FIB_AN_ENA = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */ + PHY_M_EC_DTE_D_ENA = 1<<2, /* DTE Detect Enable (88E1111 only) */ + PHY_M_EC_TX_TIM_CT = 1<<1, /* RGMII Tx Timing Control */ + PHY_M_EC_TRANS_DIS = 1<<0, /* Transmitter Disable (88E1111 only) */ + + PHY_M_10B_TE_ENABLE = 1<<7, /* 10Base-Te Enable (88E8079 and above) */ +}; +#define PHY_M_EC_M_DSC(x) ((u16)(x)<<10 & PHY_M_EC_M_DSC_MSK) + /* 00=1x; 01=2x; 10=3x; 11=4x */ +#define PHY_M_EC_S_DSC(x) ((u16)(x)<<8 & PHY_M_EC_S_DSC_MSK) + /* 00=dis; 01=1x; 10=2x; 11=3x */ +#define PHY_M_EC_DSC_2(x) ((u16)(x)<<9 & PHY_M_EC_M_DSC_MSK2) + /* 000=1x; 001=2x; 010=3x; 011=4x */ +#define PHY_M_EC_MAC_S(x) ((u16)(x)<<4 & PHY_M_EC_MAC_S_MSK) + /* 01X=0; 110=2.5; 111=25 (MHz) */ + +/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */ +enum { + PHY_M_PC_DIS_LINK_Pa = 1<<15,/* Disable Link Pulses */ + PHY_M_PC_DSC_MSK = 7<<12,/* Bit 14..12: Downshift Counter */ + PHY_M_PC_DOWN_S_ENA = 1<<11,/* Downshift Enable */ +}; +/* !!! Errata in spec. (1 = disable) */ + +#define PHY_M_PC_DSC(x) (((u16)(x)<<12) & PHY_M_PC_DSC_MSK) + /* 100=5x; 101=6x; 110=7x; 111=8x */ +enum { + MAC_TX_CLK_0_MHZ = 2, + MAC_TX_CLK_2_5_MHZ = 6, + MAC_TX_CLK_25_MHZ = 7, +}; + +/***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/ +enum { + PHY_M_LEDC_DIS_LED = 1<<15, /* Disable LED */ + PHY_M_LEDC_PULS_MSK = 7<<12,/* Bit 14..12: Pulse Stretch Mask */ + PHY_M_LEDC_F_INT = 1<<11, /* Force Interrupt */ + PHY_M_LEDC_BL_R_MSK = 7<<8,/* Bit 10.. 8: Blink Rate Mask */ + PHY_M_LEDC_DP_C_LSB = 1<<7, /* Duplex Control (LSB, 88E1111 only) */ + PHY_M_LEDC_TX_C_LSB = 1<<6, /* Tx Control (LSB, 88E1111 only) */ + PHY_M_LEDC_LK_C_MSK = 7<<3,/* Bit 5.. 3: Link Control Mask */ + /* (88E1111 only) */ +}; + +enum { + PHY_M_LEDC_LINK_MSK = 3<<3,/* Bit 4.. 3: Link Control Mask */ + /* (88E1011 only) */ + PHY_M_LEDC_DP_CTRL = 1<<2, /* Duplex Control */ + PHY_M_LEDC_DP_C_MSB = 1<<2, /* Duplex Control (MSB, 88E1111 only) */ + PHY_M_LEDC_RX_CTRL = 1<<1, /* Rx Activity / Link */ + PHY_M_LEDC_TX_CTRL = 1<<0, /* Tx Activity / Link */ + PHY_M_LEDC_TX_C_MSB = 1<<0, /* Tx Control (MSB, 88E1111 only) */ +}; + +#define PHY_M_LED_PULS_DUR(x) (((u16)(x)<<12) & PHY_M_LEDC_PULS_MSK) + +/***** PHY_MARV_PHY_STAT (page 3)16 bit r/w Polarity Control Reg. *****/ +enum { + PHY_M_POLC_LS1M_MSK = 0xf<<12, /* Bit 15..12: LOS,STAT1 Mix % Mask */ + PHY_M_POLC_IS0M_MSK = 0xf<<8, /* Bit 11.. 8: INIT,STAT0 Mix % Mask */ + PHY_M_POLC_LOS_MSK = 0x3<<6, /* Bit 7.. 6: LOS Pol. Ctrl. Mask */ + PHY_M_POLC_INIT_MSK = 0x3<<4, /* Bit 5.. 4: INIT Pol. Ctrl. Mask */ + PHY_M_POLC_STA1_MSK = 0x3<<2, /* Bit 3.. 2: STAT1 Pol. Ctrl. Mask */ + PHY_M_POLC_STA0_MSK = 0x3, /* Bit 1.. 0: STAT0 Pol. Ctrl. Mask */ +}; + +#define PHY_M_POLC_LS1_P_MIX(x) (((x)<<12) & PHY_M_POLC_LS1M_MSK) +#define PHY_M_POLC_IS0_P_MIX(x) (((x)<<8) & PHY_M_POLC_IS0M_MSK) +#define PHY_M_POLC_LOS_CTRL(x) (((x)<<6) & PHY_M_POLC_LOS_MSK) +#define PHY_M_POLC_INIT_CTRL(x) (((x)<<4) & PHY_M_POLC_INIT_MSK) +#define PHY_M_POLC_STA1_CTRL(x) (((x)<<2) & PHY_M_POLC_STA1_MSK) +#define PHY_M_POLC_STA0_CTRL(x) (((x)<<0) & PHY_M_POLC_STA0_MSK) + +enum { + PULS_NO_STR = 0,/* no pulse stretching */ + PULS_21MS = 1,/* 21 ms to 42 ms */ + PULS_42MS = 2,/* 42 ms to 84 ms */ + PULS_84MS = 3,/* 84 ms to 170 ms */ + PULS_170MS = 4,/* 170 ms to 340 ms */ + PULS_340MS = 5,/* 340 ms to 670 ms */ + PULS_670MS = 6,/* 670 ms to 1.3 s */ + PULS_1300MS = 7,/* 1.3 s to 2.7 s */ +}; + +#define PHY_M_LED_BLINK_RT(x) (((u16)(x)<<8) & PHY_M_LEDC_BL_R_MSK) + +enum { + BLINK_42MS = 0,/* 42 ms */ + BLINK_84MS = 1,/* 84 ms */ + BLINK_170MS = 2,/* 170 ms */ + BLINK_340MS = 3,/* 340 ms */ + BLINK_670MS = 4,/* 670 ms */ +}; + +/***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/ +#define PHY_M_LED_MO_SGMII(x) ((x)<<14) /* Bit 15..14: SGMII AN Timer */ + +#define PHY_M_LED_MO_DUP(x) ((x)<<10) /* Bit 11..10: Duplex */ +#define PHY_M_LED_MO_10(x) ((x)<<8) /* Bit 9.. 8: Link 10 */ +#define PHY_M_LED_MO_100(x) ((x)<<6) /* Bit 7.. 6: Link 100 */ +#define PHY_M_LED_MO_1000(x) ((x)<<4) /* Bit 5.. 4: Link 1000 */ +#define PHY_M_LED_MO_RX(x) ((x)<<2) /* Bit 3.. 2: Rx */ +#define PHY_M_LED_MO_TX(x) ((x)<<0) /* Bit 1.. 0: Tx */ + +enum led_mode { + MO_LED_NORM = 0, + MO_LED_BLINK = 1, + MO_LED_OFF = 2, + MO_LED_ON = 3, +}; + +/***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/ +enum { + PHY_M_EC2_FI_IMPED = 1<<6, /* Fiber Input Impedance */ + PHY_M_EC2_FO_IMPED = 1<<5, /* Fiber Output Impedance */ + PHY_M_EC2_FO_M_CLK = 1<<4, /* Fiber Mode Clock Enable */ + PHY_M_EC2_FO_BOOST = 1<<3, /* Fiber Output Boost */ + PHY_M_EC2_FO_AM_MSK = 7,/* Bit 2.. 0: Fiber Output Amplitude */ +}; + +/***** PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/ +enum { + PHY_M_FC_AUTO_SEL = 1<<15, /* Fiber/Copper Auto Sel. Dis. */ + PHY_M_FC_AN_REG_ACC = 1<<14, /* Fiber/Copper AN Reg. Access */ + PHY_M_FC_RESOLUTION = 1<<13, /* Fiber/Copper Resolution */ + PHY_M_SER_IF_AN_BP = 1<<12, /* Ser. IF AN Bypass Enable */ + PHY_M_SER_IF_BP_ST = 1<<11, /* Ser. IF AN Bypass Status */ + PHY_M_IRQ_POLARITY = 1<<10, /* IRQ polarity */ + PHY_M_DIS_AUT_MED = 1<<9, /* Disable Aut. Medium Reg. Selection */ + /* (88E1111 only) */ + + PHY_M_UNDOC1 = 1<<7, /* undocumented bit !! */ + PHY_M_DTE_POW_STAT = 1<<4, /* DTE Power Status (88E1111 only) */ + PHY_M_MODE_MASK = 0xf, /* Bit 3.. 0: copy of HWCFG MODE[3:0] */ +}; + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ +/***** PHY_MARV_FE_LED_PAR 16 bit r/w LED Parallel Select Reg. *****/ + /* Bit 15..12: reserved (used internally) */ +enum { + PHY_M_FELP_LED2_MSK = 0xf<<8, /* Bit 11.. 8: LED2 Mask (LINK) */ + PHY_M_FELP_LED1_MSK = 0xf<<4, /* Bit 7.. 4: LED1 Mask (ACT) */ + PHY_M_FELP_LED0_MSK = 0xf, /* Bit 3.. 0: LED0 Mask (SPEED) */ +}; + +#define PHY_M_FELP_LED2_CTRL(x) (((u16)(x)<<8) & PHY_M_FELP_LED2_MSK) +#define PHY_M_FELP_LED1_CTRL(x) (((u16)(x)<<4) & PHY_M_FELP_LED1_MSK) +#define PHY_M_FELP_LED0_CTRL(x) (((u16)(x)<<0) & PHY_M_FELP_LED0_MSK) + +enum { + LED_PAR_CTRL_COLX = 0x00, + LED_PAR_CTRL_ERROR = 0x01, + LED_PAR_CTRL_DUPLEX = 0x02, + LED_PAR_CTRL_DP_COL = 0x03, + LED_PAR_CTRL_SPEED = 0x04, + LED_PAR_CTRL_LINK = 0x05, + LED_PAR_CTRL_TX = 0x06, + LED_PAR_CTRL_RX = 0x07, + LED_PAR_CTRL_ACT = 0x08, + LED_PAR_CTRL_LNK_RX = 0x09, + LED_PAR_CTRL_LNK_AC = 0x0a, + LED_PAR_CTRL_ACT_BL = 0x0b, + LED_PAR_CTRL_TX_BL = 0x0c, + LED_PAR_CTRL_RX_BL = 0x0d, + LED_PAR_CTRL_COL_BL = 0x0e, + LED_PAR_CTRL_INACT = 0x0f +}; + +/*****,PHY_MARV_FE_SPEC_2 16 bit r/w Specific Control Reg. 2 *****/ +enum { + PHY_M_FESC_DIS_WAIT = 1<<2, /* Disable TDR Waiting Period */ + PHY_M_FESC_ENA_MCLK = 1<<1, /* Enable MAC Rx Clock in sleep mode */ + PHY_M_FESC_SEL_CL_A = 1<<0, /* Select Class A driver (100B-TX) */ +}; + +/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */ +/***** PHY_MARV_PHY_CTRL (page 1) 16 bit r/w Fiber Specific Ctrl *****/ +enum { + PHY_M_FIB_FORCE_LNK = 1<<10,/* Force Link Good */ + PHY_M_FIB_SIGD_POL = 1<<9, /* SIGDET Polarity */ + PHY_M_FIB_TX_DIS = 1<<3, /* Transmitter Disable */ +}; + +/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */ +/***** PHY_MARV_PHY_CTRL (page 2) 16 bit r/w MAC Specific Ctrl *****/ +enum { + PHY_M_MAC_MD_MSK = 7<<7, /* Bit 9.. 7: Mode Select Mask */ + PHY_M_MAC_GMIF_PUP = 1<<3, /* GMII Power Up (88E1149 only) */ + PHY_M_MAC_MD_AUTO = 3,/* Auto Copper/1000Base-X */ + PHY_M_MAC_MD_COPPER = 5,/* Copper only */ + PHY_M_MAC_MD_1000BX = 7,/* 1000Base-X only */ +}; +#define PHY_M_MAC_MODE_SEL(x) (((x)<<7) & PHY_M_MAC_MD_MSK) + +/***** PHY_MARV_PHY_CTRL (page 3) 16 bit r/w LED Control Reg. *****/ +enum { + PHY_M_LEDC_LOS_MSK = 0xf<<12,/* Bit 15..12: LOS LED Ctrl. Mask */ + PHY_M_LEDC_INIT_MSK = 0xf<<8, /* Bit 11.. 8: INIT LED Ctrl. Mask */ + PHY_M_LEDC_STA1_MSK = 0xf<<4,/* Bit 7.. 4: STAT1 LED Ctrl. Mask */ + PHY_M_LEDC_STA0_MSK = 0xf, /* Bit 3.. 0: STAT0 LED Ctrl. Mask */ +}; + +#define PHY_M_LEDC_LOS_CTRL(x) (((x)<<12) & PHY_M_LEDC_LOS_MSK) +#define PHY_M_LEDC_INIT_CTRL(x) (((x)<<8) & PHY_M_LEDC_INIT_MSK) +#define PHY_M_LEDC_STA1_CTRL(x) (((x)<<4) & PHY_M_LEDC_STA1_MSK) +#define PHY_M_LEDC_STA0_CTRL(x) (((x)<<0) & PHY_M_LEDC_STA0_MSK) + +/* GMAC registers */ +/* Port Registers */ +enum { + GM_GP_STAT = 0x0000, /* 16 bit r/o General Purpose Status */ + GM_GP_CTRL = 0x0004, /* 16 bit r/w General Purpose Control */ + GM_TX_CTRL = 0x0008, /* 16 bit r/w Transmit Control Reg. */ + GM_RX_CTRL = 0x000c, /* 16 bit r/w Receive Control Reg. */ + GM_TX_FLOW_CTRL = 0x0010, /* 16 bit r/w Transmit Flow-Control */ + GM_TX_PARAM = 0x0014, /* 16 bit r/w Transmit Parameter Reg. */ + GM_SERIAL_MODE = 0x0018, /* 16 bit r/w Serial Mode Register */ +/* Source Address Registers */ + GM_SRC_ADDR_1L = 0x001c, /* 16 bit r/w Source Address 1 (low) */ + GM_SRC_ADDR_1M = 0x0020, /* 16 bit r/w Source Address 1 (middle) */ + GM_SRC_ADDR_1H = 0x0024, /* 16 bit r/w Source Address 1 (high) */ + GM_SRC_ADDR_2L = 0x0028, /* 16 bit r/w Source Address 2 (low) */ + GM_SRC_ADDR_2M = 0x002c, /* 16 bit r/w Source Address 2 (middle) */ + GM_SRC_ADDR_2H = 0x0030, /* 16 bit r/w Source Address 2 (high) */ + +/* Multicast Address Hash Registers */ + GM_MC_ADDR_H1 = 0x0034, /* 16 bit r/w Multicast Address Hash 1 */ + GM_MC_ADDR_H2 = 0x0038, /* 16 bit r/w Multicast Address Hash 2 */ + GM_MC_ADDR_H3 = 0x003c, /* 16 bit r/w Multicast Address Hash 3 */ + GM_MC_ADDR_H4 = 0x0040, /* 16 bit r/w Multicast Address Hash 4 */ + +/* Interrupt Source Registers */ + GM_TX_IRQ_SRC = 0x0044, /* 16 bit r/o Tx Overflow IRQ Source */ + GM_RX_IRQ_SRC = 0x0048, /* 16 bit r/o Rx Overflow IRQ Source */ + GM_TR_IRQ_SRC = 0x004c, /* 16 bit r/o Tx/Rx Over. IRQ Source */ + +/* Interrupt Mask Registers */ + GM_TX_IRQ_MSK = 0x0050, /* 16 bit r/w Tx Overflow IRQ Mask */ + GM_RX_IRQ_MSK = 0x0054, /* 16 bit r/w Rx Overflow IRQ Mask */ + GM_TR_IRQ_MSK = 0x0058, /* 16 bit r/w Tx/Rx Over. IRQ Mask */ + +/* Serial Management Interface (SMI) Registers */ + GM_SMI_CTRL = 0x0080, /* 16 bit r/w SMI Control Register */ + GM_SMI_DATA = 0x0084, /* 16 bit r/w SMI Data Register */ + GM_PHY_ADDR = 0x0088, /* 16 bit r/w GPHY Address Register */ +/* MIB Counters */ + GM_MIB_CNT_BASE = 0x0100, /* Base Address of MIB Counters */ + GM_MIB_CNT_END = 0x025C, /* Last MIB counter */ +}; + + +/* + * MIB Counters base address definitions (low word) - + * use offset 4 for access to high word (32 bit r/o) + */ +enum { + GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */ + GM_RXF_BC_OK = GM_MIB_CNT_BASE + 8, /* Broadcast Frames Received OK */ + GM_RXF_MPAUSE = GM_MIB_CNT_BASE + 16, /* Pause MAC Ctrl Frames Received */ + GM_RXF_MC_OK = GM_MIB_CNT_BASE + 24, /* Multicast Frames Received OK */ + GM_RXF_FCS_ERR = GM_MIB_CNT_BASE + 32, /* Rx Frame Check Seq. Error */ + + GM_RXO_OK_LO = GM_MIB_CNT_BASE + 48, /* Octets Received OK Low */ + GM_RXO_OK_HI = GM_MIB_CNT_BASE + 56, /* Octets Received OK High */ + GM_RXO_ERR_LO = GM_MIB_CNT_BASE + 64, /* Octets Received Invalid Low */ + GM_RXO_ERR_HI = GM_MIB_CNT_BASE + 72, /* Octets Received Invalid High */ + GM_RXF_SHT = GM_MIB_CNT_BASE + 80, /* Frames <64 Byte Received OK */ + GM_RXE_FRAG = GM_MIB_CNT_BASE + 88, /* Frames <64 Byte Received with FCS Err */ + GM_RXF_64B = GM_MIB_CNT_BASE + 96, /* 64 Byte Rx Frame */ + GM_RXF_127B = GM_MIB_CNT_BASE + 104,/* 65-127 Byte Rx Frame */ + GM_RXF_255B = GM_MIB_CNT_BASE + 112,/* 128-255 Byte Rx Frame */ + GM_RXF_511B = GM_MIB_CNT_BASE + 120,/* 256-511 Byte Rx Frame */ + GM_RXF_1023B = GM_MIB_CNT_BASE + 128,/* 512-1023 Byte Rx Frame */ + GM_RXF_1518B = GM_MIB_CNT_BASE + 136,/* 1024-1518 Byte Rx Frame */ + GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144,/* 1519-MaxSize Byte Rx Frame */ + GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152,/* Rx Frame too Long Error */ + GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160,/* Rx Jabber Packet Frame */ + + GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176,/* Rx FIFO overflow Event */ + GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192,/* Unicast Frames Xmitted OK */ + GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200,/* Broadcast Frames Xmitted OK */ + GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208,/* Pause MAC Ctrl Frames Xmitted */ + GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216,/* Multicast Frames Xmitted OK */ + GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224,/* Octets Transmitted OK Low */ + GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232,/* Octets Transmitted OK High */ + GM_TXF_64B = GM_MIB_CNT_BASE + 240,/* 64 Byte Tx Frame */ + GM_TXF_127B = GM_MIB_CNT_BASE + 248,/* 65-127 Byte Tx Frame */ + GM_TXF_255B = GM_MIB_CNT_BASE + 256,/* 128-255 Byte Tx Frame */ + GM_TXF_511B = GM_MIB_CNT_BASE + 264,/* 256-511 Byte Tx Frame */ + GM_TXF_1023B = GM_MIB_CNT_BASE + 272,/* 512-1023 Byte Tx Frame */ + GM_TXF_1518B = GM_MIB_CNT_BASE + 280,/* 1024-1518 Byte Tx Frame */ + GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288,/* 1519-MaxSize Byte Tx Frame */ + + GM_TXF_COL = GM_MIB_CNT_BASE + 304,/* Tx Collision */ + GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312,/* Tx Late Collision */ + GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320,/* Tx aborted due to Exces. Col. */ + GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328,/* Tx Multiple Collision */ + GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336,/* Tx Single Collision */ + GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344,/* Tx FIFO Underrun Event */ +}; + +/* GMAC Bit Definitions */ +/* GM_GP_STAT 16 bit r/o General Purpose Status Register */ +enum { + GM_GPSR_SPEED = 1<<15, /* Bit 15: Port Speed (1 = 100 Mbps) */ + GM_GPSR_DUPLEX = 1<<14, /* Bit 14: Duplex Mode (1 = Full) */ + GM_GPSR_FC_TX_DIS = 1<<13, /* Bit 13: Tx Flow-Control Mode Disabled */ + GM_GPSR_LINK_UP = 1<<12, /* Bit 12: Link Up Status */ + GM_GPSR_PAUSE = 1<<11, /* Bit 11: Pause State */ + GM_GPSR_TX_ACTIVE = 1<<10, /* Bit 10: Tx in Progress */ + GM_GPSR_EXC_COL = 1<<9, /* Bit 9: Excessive Collisions Occurred */ + GM_GPSR_LAT_COL = 1<<8, /* Bit 8: Late Collisions Occurred */ + + GM_GPSR_PHY_ST_CH = 1<<5, /* Bit 5: PHY Status Change */ + GM_GPSR_GIG_SPEED = 1<<4, /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */ + GM_GPSR_PART_MODE = 1<<3, /* Bit 3: Partition mode */ + GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */ + GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */ +}; + +/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */ +enum { + GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */ + GM_GPCR_FC_TX_DIS = 1<<13, /* Bit 13: Disable Tx Flow-Control Mode */ + GM_GPCR_TX_ENA = 1<<12, /* Bit 12: Enable Transmit */ + GM_GPCR_RX_ENA = 1<<11, /* Bit 11: Enable Receive */ + GM_GPCR_BURST_ENA = 1<<10, /* Bit 10: Enable Burst Mode */ + GM_GPCR_LOOP_ENA = 1<<9, /* Bit 9: Enable MAC Loopback Mode */ + GM_GPCR_PART_ENA = 1<<8, /* Bit 8: Enable Partition Mode */ + GM_GPCR_GIGS_ENA = 1<<7, /* Bit 7: Gigabit Speed (1000 Mbps) */ + GM_GPCR_FL_PASS = 1<<6, /* Bit 6: Force Link Pass */ + GM_GPCR_DUP_FULL = 1<<5, /* Bit 5: Full Duplex Mode */ + GM_GPCR_FC_RX_DIS = 1<<4, /* Bit 4: Disable Rx Flow-Control Mode */ + GM_GPCR_SPEED_100 = 1<<3, /* Bit 3: Port Speed 100 Mbps */ + GM_GPCR_AU_DUP_DIS = 1<<2, /* Bit 2: Disable Auto-Update Duplex */ + GM_GPCR_AU_FCT_DIS = 1<<1, /* Bit 1: Disable Auto-Update Flow-C. */ + GM_GPCR_AU_SPD_DIS = 1<<0, /* Bit 0: Disable Auto-Update Speed */ +}; + +#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100) + +/* GM_TX_CTRL 16 bit r/w Transmit Control Register */ +enum { + GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */ + GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */ + GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */ + GM_TXCR_COL_THR_MSK = 7<<10, /* Bit 12..10: Collision Threshold */ +}; + +#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK) +#define TX_COL_DEF 0x04 + +/* GM_RX_CTRL 16 bit r/w Receive Control Register */ +enum { + GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */ + GM_RXCR_MCF_ENA = 1<<14, /* Bit 14: Enable Multicast filtering */ + GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */ + GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */ +}; + +/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */ +enum { + GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */ + GM_TXPA_JAMIPG_MSK = 0x1f<<9, /* Bit 13..9: Jam IPG */ + GM_TXPA_JAMDAT_MSK = 0x1f<<4, /* Bit 8..4: IPG Jam to Data */ + GM_TXPA_BO_LIM_MSK = 0x0f, /* Bit 3.. 0: Backoff Limit Mask */ + + TX_JAM_LEN_DEF = 0x03, + TX_JAM_IPG_DEF = 0x0b, + TX_IPG_JAM_DEF = 0x1c, + TX_BOF_LIM_DEF = 0x04, +}; + +#define TX_JAM_LEN_VAL(x) (((x)<<14) & GM_TXPA_JAMLEN_MSK) +#define TX_JAM_IPG_VAL(x) (((x)<<9) & GM_TXPA_JAMIPG_MSK) +#define TX_IPG_JAM_DATA(x) (((x)<<4) & GM_TXPA_JAMDAT_MSK) +#define TX_BACK_OFF_LIM(x) ((x) & GM_TXPA_BO_LIM_MSK) + + +/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */ +enum { + GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */ + GM_SMOD_LIMIT_4 = 1<<10, /* 4 consecutive Tx trials */ + GM_SMOD_VLAN_ENA = 1<<9, /* Enable VLAN (Max. Frame Len) */ + GM_SMOD_JUMBO_ENA = 1<<8, /* Enable Jumbo (Max. Frame Len) */ + + GM_NEW_FLOW_CTRL = 1<<6, /* Enable New Flow-Control */ + + GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */ +}; + +#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK) +#define IPG_DATA_VAL(x) (x & GM_SMOD_IPG_MSK) + +#define DATA_BLIND_DEF 0x04 +#define IPG_DATA_DEF_1000 0x1e +#define IPG_DATA_DEF_10_100 0x18 + +/* GM_SMI_CTRL 16 bit r/w SMI Control Register */ +enum { + GM_SMI_CT_PHY_A_MSK = 0x1f<<11,/* Bit 15..11: PHY Device Address */ + GM_SMI_CT_REG_A_MSK = 0x1f<<6,/* Bit 10.. 6: PHY Register Address */ + GM_SMI_CT_OP_RD = 1<<5, /* Bit 5: OpCode Read (0=Write)*/ + GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */ + GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */ +}; + +#define GM_SMI_CT_PHY_AD(x) (((u16)(x)<<11) & GM_SMI_CT_PHY_A_MSK) +#define GM_SMI_CT_REG_AD(x) (((u16)(x)<<6) & GM_SMI_CT_REG_A_MSK) + +/* GM_PHY_ADDR 16 bit r/w GPHY Address Register */ +enum { + GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */ + GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */ +}; + +/* Receive Frame Status Encoding */ +enum { + GMR_FS_LEN = 0x7fff<<16, /* Bit 30..16: Rx Frame Length */ + GMR_FS_VLAN = 1<<13, /* VLAN Packet */ + GMR_FS_JABBER = 1<<12, /* Jabber Packet */ + GMR_FS_UN_SIZE = 1<<11, /* Undersize Packet */ + GMR_FS_MC = 1<<10, /* Multicast Packet */ + GMR_FS_BC = 1<<9, /* Broadcast Packet */ + GMR_FS_RX_OK = 1<<8, /* Receive OK (Good Packet) */ + GMR_FS_GOOD_FC = 1<<7, /* Good Flow-Control Packet */ + GMR_FS_BAD_FC = 1<<6, /* Bad Flow-Control Packet */ + GMR_FS_MII_ERR = 1<<5, /* MII Error */ + GMR_FS_LONG_ERR = 1<<4, /* Too Long Packet */ + GMR_FS_FRAGMENT = 1<<3, /* Fragment */ + + GMR_FS_CRC_ERR = 1<<1, /* CRC Error */ + GMR_FS_RX_FF_OV = 1<<0, /* Rx FIFO Overflow */ + + GMR_FS_ANY_ERR = GMR_FS_RX_FF_OV | GMR_FS_CRC_ERR | + GMR_FS_FRAGMENT | GMR_FS_LONG_ERR | + GMR_FS_MII_ERR | GMR_FS_BAD_FC | + GMR_FS_UN_SIZE | GMR_FS_JABBER, +}; + +/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */ +enum { + RX_GCLKMAC_ENA = 1<<31, /* RX MAC Clock Gating Enable */ + RX_GCLKMAC_OFF = 1<<30, + + RX_STFW_DIS = 1<<29, /* RX Store and Forward Enable */ + RX_STFW_ENA = 1<<28, + + RX_TRUNC_ON = 1<<27, /* enable packet truncation */ + RX_TRUNC_OFF = 1<<26, /* disable packet truncation */ + RX_VLAN_STRIP_ON = 1<<25, /* enable VLAN stripping */ + RX_VLAN_STRIP_OFF = 1<<24, /* disable VLAN stripping */ + + RX_MACSEC_FLUSH_ON = 1<<23, + RX_MACSEC_FLUSH_OFF = 1<<22, + RX_MACSEC_ASF_FLUSH_ON = 1<<21, + RX_MACSEC_ASF_FLUSH_OFF = 1<<20, + + GMF_RX_OVER_ON = 1<<19, /* enable flushing on receive overrun */ + GMF_RX_OVER_OFF = 1<<18, /* disable flushing on receive overrun */ + GMF_ASF_RX_OVER_ON = 1<<17, /* enable flushing of ASF when overrun */ + GMF_ASF_RX_OVER_OFF = 1<<16, /* disable flushing of ASF when overrun */ + + GMF_WP_TST_ON = 1<<14, /* Write Pointer Test On */ + GMF_WP_TST_OFF = 1<<13, /* Write Pointer Test Off */ + GMF_WP_STEP = 1<<12, /* Write Pointer Step/Increment */ + + GMF_RP_TST_ON = 1<<10, /* Read Pointer Test On */ + GMF_RP_TST_OFF = 1<<9, /* Read Pointer Test Off */ + GMF_RP_STEP = 1<<8, /* Read Pointer Step/Increment */ + GMF_RX_F_FL_ON = 1<<7, /* Rx FIFO Flush Mode On */ + GMF_RX_F_FL_OFF = 1<<6, /* Rx FIFO Flush Mode Off */ + GMF_CLI_RX_FO = 1<<5, /* Clear IRQ Rx FIFO Overrun */ + GMF_CLI_RX_C = 1<<4, /* Clear IRQ Rx Frame Complete */ + + GMF_OPER_ON = 1<<3, /* Operational Mode On */ + GMF_OPER_OFF = 1<<2, /* Operational Mode Off */ + GMF_RST_CLR = 1<<1, /* Clear GMAC FIFO Reset */ + GMF_RST_SET = 1<<0, /* Set GMAC FIFO Reset */ + + RX_GMF_FL_THR_DEF = 0xa, /* flush threshold (default) */ + + GMF_RX_CTRL_DEF = GMF_OPER_ON | GMF_RX_F_FL_ON, +}; + +/* RX_GMF_FL_CTRL 16 bit Rx GMAC FIFO Flush Control (Yukon-Supreme) */ +enum { + RX_IPV6_SA_MOB_ENA = 1<<9, /* IPv6 SA Mobility Support Enable */ + RX_IPV6_SA_MOB_DIS = 1<<8, /* IPv6 SA Mobility Support Disable */ + RX_IPV6_DA_MOB_ENA = 1<<7, /* IPv6 DA Mobility Support Enable */ + RX_IPV6_DA_MOB_DIS = 1<<6, /* IPv6 DA Mobility Support Disable */ + RX_PTR_SYNCDLY_ENA = 1<<5, /* Pointers Delay Synch Enable */ + RX_PTR_SYNCDLY_DIS = 1<<4, /* Pointers Delay Synch Disable */ + RX_ASF_NEWFLAG_ENA = 1<<3, /* RX ASF Flag New Logic Enable */ + RX_ASF_NEWFLAG_DIS = 1<<2, /* RX ASF Flag New Logic Disable */ + RX_FLSH_MISSPKT_ENA = 1<<1, /* RX Flush Miss-Packet Enable */ + RX_FLSH_MISSPKT_DIS = 1<<0, /* RX Flush Miss-Packet Disable */ +}; + +/* TX_GMF_EA 32 bit Tx GMAC FIFO End Address */ +enum { + TX_DYN_WM_ENA = 3, /* Yukon-FE+ specific */ +}; + +/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */ +enum { + TX_STFW_DIS = 1<<31,/* Disable Store & Forward */ + TX_STFW_ENA = 1<<30,/* Enable Store & Forward */ + + TX_VLAN_TAG_ON = 1<<25,/* enable VLAN tagging */ + TX_VLAN_TAG_OFF = 1<<24,/* disable VLAN tagging */ + + TX_PCI_JUM_ENA = 1<<23,/* PCI Jumbo Mode enable */ + TX_PCI_JUM_DIS = 1<<22,/* PCI Jumbo Mode enable */ + + GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */ + GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */ + GMF_WSP_STEP = 1<<16,/* Write Shadow Pointer Step/Increment */ + + GMF_CLI_TX_FU = 1<<6, /* Clear IRQ Tx FIFO Underrun */ + GMF_CLI_TX_FC = 1<<5, /* Clear IRQ Tx Frame Complete */ + GMF_CLI_TX_PE = 1<<4, /* Clear IRQ Tx Parity Error */ +}; + +/* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */ +enum { + GMT_ST_START = 1<<2, /* Start Time Stamp Timer */ + GMT_ST_STOP = 1<<1, /* Stop Time Stamp Timer */ + GMT_ST_CLR_IRQ = 1<<0, /* Clear Time Stamp Timer IRQ */ +}; + +/* B28_Y2_ASF_STAT_CMD 32 bit ASF Status and Command Reg */ +enum { + Y2_ASF_OS_PRES = 1<<4, /* ASF operation system present */ + Y2_ASF_RESET = 1<<3, /* ASF system in reset state */ + Y2_ASF_RUNNING = 1<<2, /* ASF system operational */ + Y2_ASF_CLR_HSTI = 1<<1, /* Clear ASF IRQ */ + Y2_ASF_IRQ = 1<<0, /* Issue an IRQ to ASF system */ + + Y2_ASF_UC_STATE = 3<<2, /* ASF uC State */ + Y2_ASF_CLK_HALT = 0, /* ASF system clock stopped */ +}; + +/* B28_Y2_ASF_HOST_COM 32 bit ASF Host Communication Reg */ +enum { + Y2_ASF_CLR_ASFI = 1<<1, /* Clear host IRQ */ + Y2_ASF_HOST_IRQ = 1<<0, /* Issue an IRQ to HOST system */ +}; +/* HCU_CCSR CPU Control and Status Register */ +enum { + HCU_CCSR_SMBALERT_MONITOR= 1<<27, /* SMBALERT pin monitor */ + HCU_CCSR_CPU_SLEEP = 1<<26, /* CPU sleep status */ + /* Clock Stretching Timeout */ + HCU_CCSR_CS_TO = 1<<25, + HCU_CCSR_WDOG = 1<<24, /* Watchdog Reset */ + + HCU_CCSR_CLR_IRQ_HOST = 1<<17, /* Clear IRQ_HOST */ + HCU_CCSR_SET_IRQ_HCU = 1<<16, /* Set IRQ_HCU */ + + HCU_CCSR_AHB_RST = 1<<9, /* Reset AHB bridge */ + HCU_CCSR_CPU_RST_MODE = 1<<8, /* CPU Reset Mode */ + + HCU_CCSR_SET_SYNC_CPU = 1<<5, + HCU_CCSR_CPU_CLK_DIVIDE_MSK = 3<<3,/* CPU Clock Divide */ + HCU_CCSR_CPU_CLK_DIVIDE_BASE= 1<<3, + HCU_CCSR_OS_PRSNT = 1<<2, /* ASF OS Present */ +/* Microcontroller State */ + HCU_CCSR_UC_STATE_MSK = 3, + HCU_CCSR_UC_STATE_BASE = 1<<0, + HCU_CCSR_ASF_RESET = 0, + HCU_CCSR_ASF_HALTED = 1<<1, + HCU_CCSR_ASF_RUNNING = 1<<0, +}; + +/* HCU_HCSR Host Control and Status Register */ +enum { + HCU_HCSR_SET_IRQ_CPU = 1<<16, /* Set IRQ_CPU */ + + HCU_HCSR_CLR_IRQ_HCU = 1<<1, /* Clear IRQ_HCU */ + HCU_HCSR_SET_IRQ_HOST = 1<<0, /* Set IRQ_HOST */ +}; + +/* STAT_CTRL 32 bit Status BMU control register (Yukon-2 only) */ +enum { + SC_STAT_CLR_IRQ = 1<<4, /* Status Burst IRQ clear */ + SC_STAT_OP_ON = 1<<3, /* Operational Mode On */ + SC_STAT_OP_OFF = 1<<2, /* Operational Mode Off */ + SC_STAT_RST_CLR = 1<<1, /* Clear Status Unit Reset (Enable) */ + SC_STAT_RST_SET = 1<<0, /* Set Status Unit Reset */ +}; + +/* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */ +enum { + GMC_SET_RST = 1<<15,/* MAC SEC RST */ + GMC_SEC_RST_OFF = 1<<14,/* MAC SEC RSt OFF */ + GMC_BYP_MACSECRX_ON = 1<<13,/* Bypass macsec RX */ + GMC_BYP_MACSECRX_OFF= 1<<12,/* Bypass macsec RX off */ + GMC_BYP_MACSECTX_ON = 1<<11,/* Bypass macsec TX */ + GMC_BYP_MACSECTX_OFF= 1<<10,/* Bypass macsec TX off*/ + GMC_BYP_RETR_ON = 1<<9, /* Bypass retransmit FIFO On */ + GMC_BYP_RETR_OFF= 1<<8, /* Bypass retransmit FIFO Off */ + + GMC_H_BURST_ON = 1<<7, /* Half Duplex Burst Mode On */ + GMC_H_BURST_OFF = 1<<6, /* Half Duplex Burst Mode Off */ + GMC_F_LOOPB_ON = 1<<5, /* FIFO Loopback On */ + GMC_F_LOOPB_OFF = 1<<4, /* FIFO Loopback Off */ + GMC_PAUSE_ON = 1<<3, /* Pause On */ + GMC_PAUSE_OFF = 1<<2, /* Pause Off */ + GMC_RST_CLR = 1<<1, /* Clear GMAC Reset */ + GMC_RST_SET = 1<<0, /* Set GMAC Reset */ +}; + +/* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */ +enum { + GPC_TX_PAUSE = 1<<30, /* Tx pause enabled (ro) */ + GPC_RX_PAUSE = 1<<29, /* Rx pause enabled (ro) */ + GPC_SPEED = 3<<27, /* PHY speed (ro) */ + GPC_LINK = 1<<26, /* Link up (ro) */ + GPC_DUPLEX = 1<<25, /* Duplex (ro) */ + GPC_CLOCK = 1<<24, /* 125Mhz clock stable (ro) */ + + GPC_PDOWN = 1<<23, /* Internal regulator 2.5 power down */ + GPC_TSTMODE = 1<<22, /* Test mode */ + GPC_REG18 = 1<<21, /* Reg18 Power down */ + GPC_REG12SEL = 3<<19, /* Reg12 power setting */ + GPC_REG18SEL = 3<<17, /* Reg18 power setting */ + GPC_SPILOCK = 1<<16, /* SPI lock (ASF) */ + + GPC_LEDMUX = 3<<14, /* LED Mux */ + GPC_INTPOL = 1<<13, /* Interrupt polarity */ + GPC_DETECT = 1<<12, /* Energy detect */ + GPC_1000HD = 1<<11, /* Enable 1000Mbit HD */ + GPC_SLAVE = 1<<10, /* Slave mode */ + GPC_PAUSE = 1<<9, /* Pause enable */ + GPC_LEDCTL = 3<<6, /* GPHY Leds */ + + GPC_RST_CLR = 1<<1, /* Clear GPHY Reset */ + GPC_RST_SET = 1<<0, /* Set GPHY Reset */ +}; + +/* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */ +/* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */ +enum { + GM_IS_TX_CO_OV = 1<<5, /* Transmit Counter Overflow IRQ */ + GM_IS_RX_CO_OV = 1<<4, /* Receive Counter Overflow IRQ */ + GM_IS_TX_FF_UR = 1<<3, /* Transmit FIFO Underrun */ + GM_IS_TX_COMPL = 1<<2, /* Frame Transmission Complete */ + GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ + GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ + +#define GMAC_DEF_MSK (GM_IS_TX_FF_UR | GM_IS_RX_FF_OR) +}; + +/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ +enum { /* Bits 15.. 2: reserved */ + GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */ + GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */ +}; + + +/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */ +enum { + WOL_CTL_LINK_CHG_OCC = 1<<15, + WOL_CTL_MAGIC_PKT_OCC = 1<<14, + WOL_CTL_PATTERN_OCC = 1<<13, + WOL_CTL_CLEAR_RESULT = 1<<12, + WOL_CTL_ENA_PME_ON_LINK_CHG = 1<<11, + WOL_CTL_DIS_PME_ON_LINK_CHG = 1<<10, + WOL_CTL_ENA_PME_ON_MAGIC_PKT = 1<<9, + WOL_CTL_DIS_PME_ON_MAGIC_PKT = 1<<8, + WOL_CTL_ENA_PME_ON_PATTERN = 1<<7, + WOL_CTL_DIS_PME_ON_PATTERN = 1<<6, + WOL_CTL_ENA_LINK_CHG_UNIT = 1<<5, + WOL_CTL_DIS_LINK_CHG_UNIT = 1<<4, + WOL_CTL_ENA_MAGIC_PKT_UNIT = 1<<3, + WOL_CTL_DIS_MAGIC_PKT_UNIT = 1<<2, + WOL_CTL_ENA_PATTERN_UNIT = 1<<1, + WOL_CTL_DIS_PATTERN_UNIT = 1<<0, +}; + + +/* Control flags */ +enum { + UDPTCP = 1<<0, + CALSUM = 1<<1, + WR_SUM = 1<<2, + INIT_SUM= 1<<3, + LOCK_SUM= 1<<4, + INS_VLAN= 1<<5, + EOP = 1<<7, +}; + +enum { + HW_OWNER = 1<<7, + OP_TCPWRITE = 0x11, + OP_TCPSTART = 0x12, + OP_TCPINIT = 0x14, + OP_TCPLCK = 0x18, + OP_TCPCHKSUM = OP_TCPSTART, + OP_TCPIS = OP_TCPINIT | OP_TCPSTART, + OP_TCPLW = OP_TCPLCK | OP_TCPWRITE, + OP_TCPLSW = OP_TCPLCK | OP_TCPSTART | OP_TCPWRITE, + OP_TCPLISW = OP_TCPLCK | OP_TCPINIT | OP_TCPSTART | OP_TCPWRITE, + + OP_ADDR64 = 0x21, + OP_VLAN = 0x22, + OP_ADDR64VLAN = OP_ADDR64 | OP_VLAN, + OP_LRGLEN = 0x24, + OP_LRGLENVLAN = OP_LRGLEN | OP_VLAN, + OP_MSS = 0x28, + OP_MSSVLAN = OP_MSS | OP_VLAN, + + OP_BUFFER = 0x40, + OP_PACKET = 0x41, + OP_LARGESEND = 0x43, + OP_LSOV2 = 0x45, + +/* YUKON-2 STATUS opcodes defines */ + OP_RXSTAT = 0x60, + OP_RXTIMESTAMP = 0x61, + OP_RXVLAN = 0x62, + OP_RXCHKS = 0x64, + OP_RXCHKSVLAN = OP_RXCHKS | OP_RXVLAN, + OP_RXTIMEVLAN = OP_RXTIMESTAMP | OP_RXVLAN, + OP_RSS_HASH = 0x65, + OP_TXINDEXLE = 0x68, + OP_MACSEC = 0x6c, + OP_PUTIDX = 0x70, +}; + +enum status_css { + CSS_TCPUDPCSOK = 1<<7, /* TCP / UDP checksum is ok */ + CSS_ISUDP = 1<<6, /* packet is a UDP packet */ + CSS_ISTCP = 1<<5, /* packet is a TCP packet */ + CSS_ISIPFRAG = 1<<4, /* packet is a TCP/UDP frag, CS calc not done */ + CSS_ISIPV6 = 1<<3, /* packet is a IPv6 packet */ + CSS_IPV4CSUMOK = 1<<2, /* IP v4: TCP header checksum is ok */ + CSS_ISIPV4 = 1<<1, /* packet is a IPv4 packet */ + CSS_LINK_BIT = 1<<0, /* port number (legacy) */ +}; + +/* Yukon 2 hardware interface */ +struct sky2_tx_le { + __le32 addr; + __le16 length; /* also vlan tag or checksum start */ + u8 ctrl; + u8 opcode; +} __packed; + +struct sky2_rx_le { + __le32 addr; + __le16 length; + u8 ctrl; + u8 opcode; +} __packed; + +struct sky2_status_le { + __le32 status; /* also checksum */ + __le16 length; /* also vlan tag */ + u8 css; + u8 opcode; +} __packed; + +struct tx_ring_info { + struct sk_buff *skb; + unsigned long flags; +#define TX_MAP_SINGLE 0x0001 +#define TX_MAP_PAGE 0x0002 + DEFINE_DMA_UNMAP_ADDR(mapaddr); + DEFINE_DMA_UNMAP_LEN(maplen); +}; + +struct rx_ring_info { + struct sk_buff *skb; + dma_addr_t data_addr; + DEFINE_DMA_UNMAP_LEN(data_size); + dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT ?: 1]; +}; + +enum flow_control { + FC_NONE = 0, + FC_TX = 1, + FC_RX = 2, + FC_BOTH = 3, +}; + +struct sky2_stats { + struct u64_stats_sync syncp; + u64 packets; + u64 bytes; +}; + +struct sky2_port { + struct sky2_hw *hw; + struct net_device *netdev; + unsigned port; + u32 msg_enable; + spinlock_t phy_lock; + + struct tx_ring_info *tx_ring; + struct sky2_tx_le *tx_le; + struct sky2_stats tx_stats; + + u16 tx_ring_size; + u16 tx_cons; /* next le to check */ + u16 tx_prod; /* next le to use */ + u16 tx_next; /* debug only */ + + u16 tx_pending; + u16 tx_last_mss; + u32 tx_last_upper; + u32 tx_tcpsum; + + struct rx_ring_info *rx_ring ____cacheline_aligned_in_smp; + struct sky2_rx_le *rx_le; + struct sky2_stats rx_stats; + + u16 rx_next; /* next re to check */ + u16 rx_put; /* next le index to use */ + u16 rx_pending; + u16 rx_data_size; + u16 rx_nfrags; + + unsigned long last_rx; + struct { + unsigned long last; + u32 mac_rp; + u8 mac_lev; + u8 fifo_rp; + u8 fifo_lev; + } check; + + dma_addr_t rx_le_map; + dma_addr_t tx_le_map; + + u16 advertising; /* ADVERTISED_ bits */ + u16 speed; /* SPEED_1000, SPEED_100, ... */ + u8 wol; /* WAKE_ bits */ + u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */ + u16 flags; +#define SKY2_FLAG_AUTO_SPEED 0x0002 +#define SKY2_FLAG_AUTO_PAUSE 0x0004 + + enum flow_control flow_mode; + enum flow_control flow_status; + +#ifdef CONFIG_SKY2_DEBUG + struct dentry *debugfs; +#endif +}; + +struct sky2_hw { + void __iomem *regs; + struct pci_dev *pdev; + struct napi_struct napi; + struct net_device *dev[2]; + unsigned long flags; +#define SKY2_HW_USE_MSI 0x00000001 +#define SKY2_HW_FIBRE_PHY 0x00000002 +#define SKY2_HW_GIGABIT 0x00000004 +#define SKY2_HW_NEWER_PHY 0x00000008 +#define SKY2_HW_RAM_BUFFER 0x00000010 +#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */ +#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ +#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ +#define SKY2_HW_RSS_BROKEN 0x00000100 +#define SKY2_HW_VLAN_BROKEN 0x00000200 +#define SKY2_HW_RSS_CHKSUM 0x00000400 /* RSS requires chksum */ +#define SKY2_HW_IRQ_SETUP 0x00000800 + + u8 chip_id; + u8 chip_rev; + u8 pmd_type; + u8 ports; + + struct sky2_status_le *st_le; + u32 st_size; + u32 st_idx; + dma_addr_t st_dma; + + struct timer_list watchdog_timer; + struct work_struct restart_work; + wait_queue_head_t msi_wait; + + char irq_name[]; +}; + +static inline int sky2_is_copper(const struct sky2_hw *hw) +{ + return !(hw->flags & SKY2_HW_FIBRE_PHY); +} + +/* Register accessor for memory mapped device */ +static inline u32 sky2_read32(const struct sky2_hw *hw, unsigned reg) +{ + return readl(hw->regs + reg); +} + +static inline u16 sky2_read16(const struct sky2_hw *hw, unsigned reg) +{ + return readw(hw->regs + reg); +} + +static inline u8 sky2_read8(const struct sky2_hw *hw, unsigned reg) +{ + return readb(hw->regs + reg); +} + +static inline void sky2_write32(const struct sky2_hw *hw, unsigned reg, u32 val) +{ + writel(val, hw->regs + reg); +} + +static inline void sky2_write16(const struct sky2_hw *hw, unsigned reg, u16 val) +{ + writew(val, hw->regs + reg); +} + +static inline void sky2_write8(const struct sky2_hw *hw, unsigned reg, u8 val) +{ + writeb(val, hw->regs + reg); +} + +/* Yukon PHY related registers */ +#define SK_GMAC_REG(port,reg) \ + (BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg)) +#define GM_PHY_RETRIES 100 + +static inline u16 gma_read16(const struct sky2_hw *hw, unsigned port, unsigned reg) +{ + return sky2_read16(hw, SK_GMAC_REG(port,reg)); +} + +static inline u32 gma_read32(struct sky2_hw *hw, unsigned port, unsigned reg) +{ + unsigned base = SK_GMAC_REG(port, reg); + return (u32) sky2_read16(hw, base) + | (u32) sky2_read16(hw, base+4) << 16; +} + +static inline u64 gma_read64(struct sky2_hw *hw, unsigned port, unsigned reg) +{ + unsigned base = SK_GMAC_REG(port, reg); + + return (u64) sky2_read16(hw, base) + | (u64) sky2_read16(hw, base+4) << 16 + | (u64) sky2_read16(hw, base+8) << 32 + | (u64) sky2_read16(hw, base+12) << 48; +} + +/* There is no way to atomically read32 bit values from PHY, so retry */ +static inline u32 get_stats32(struct sky2_hw *hw, unsigned port, unsigned reg) +{ + u32 val; + + do { + val = gma_read32(hw, port, reg); + } while (gma_read32(hw, port, reg) != val); + + return val; +} + +static inline u64 get_stats64(struct sky2_hw *hw, unsigned port, unsigned reg) +{ + u64 val; + + do { + val = gma_read64(hw, port, reg); + } while (gma_read64(hw, port, reg) != val); + + return val; +} + +static inline void gma_write16(const struct sky2_hw *hw, unsigned port, int r, u16 v) +{ + sky2_write16(hw, SK_GMAC_REG(port,r), v); +} + +static inline void gma_set_addr(struct sky2_hw *hw, unsigned port, unsigned reg, + const u8 *addr) +{ + gma_write16(hw, port, reg, (u16) addr[0] | ((u16) addr[1] << 8)); + gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8)); + gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8)); +} + +/* PCI config space access */ +static inline u32 sky2_pci_read32(const struct sky2_hw *hw, unsigned reg) +{ + return sky2_read32(hw, Y2_CFG_SPC + reg); +} + +static inline u16 sky2_pci_read16(const struct sky2_hw *hw, unsigned reg) +{ + return sky2_read16(hw, Y2_CFG_SPC + reg); +} + +static inline void sky2_pci_write32(struct sky2_hw *hw, unsigned reg, u32 val) +{ + sky2_write32(hw, Y2_CFG_SPC + reg, val); +} + +static inline void sky2_pci_write16(struct sky2_hw *hw, unsigned reg, u16 val) +{ + sky2_write16(hw, Y2_CFG_SPC + reg, val); +} +#endif |