summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/aquantia/atlantic
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/aquantia/atlantic')
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile36
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h86
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h77
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c158
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.h17
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c1026
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h17
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c889
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.h36
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h404
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c125
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h42
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_macsec.c1770
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_macsec.h133
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c541
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.h22
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c1686
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h221
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c487
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h32
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_phy.c172
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_phy.h36
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.c1423
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.h163
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c989
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h217
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_rss.h23
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_utils.h39
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c382
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.h48
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c981
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h23
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h90
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c1715
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h75
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h160
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c1853
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h872
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h2878
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c1099
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h643
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c732
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c863
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h16
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_internal.h127
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.c234
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.h102
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh_internal.h391
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c130
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h636
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c616
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/MSS_Egress_registers.h73
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/MSS_Ingress_registers.h77
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c2477
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.h323
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h914
56 files changed, 29397 insertions, 0 deletions
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
new file mode 100644
index 000000000..8ebcc68e8
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: GPL-2.0-only
+################################################################################
+#
+# aQuantia Ethernet Controller AQtion Linux Driver
+# Copyright(c) 2014-2017 aQuantia Corporation.
+#
+################################################################################
+
+obj-$(CONFIG_AQTION) += atlantic.o
+
+ccflags-y += -I$(srctree)/$(src)
+
+atlantic-objs := aq_main.o \
+ aq_nic.o \
+ aq_pci_func.o \
+ aq_vec.o \
+ aq_ring.o \
+ aq_hw_utils.o \
+ aq_ethtool.o \
+ aq_drvinfo.o \
+ aq_filters.o \
+ aq_phy.o \
+ hw_atl/hw_atl_a0.o \
+ hw_atl/hw_atl_b0.o \
+ hw_atl/hw_atl_utils.o \
+ hw_atl/hw_atl_utils_fw2x.o \
+ hw_atl/hw_atl_llh.o \
+ hw_atl2/hw_atl2.o \
+ hw_atl2/hw_atl2_utils.o \
+ hw_atl2/hw_atl2_utils_fw.o \
+ hw_atl2/hw_atl2_llh.o \
+ macsec/macsec_api.o
+
+atlantic-$(CONFIG_MACSEC) += aq_macsec.o
+
+atlantic-$(CONFIG_PTP_1588_CLOCK) += aq_ptp.o
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
new file mode 100644
index 000000000..7e9c74b14
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+ */
+
+/* File aq_cfg.h: Definition of configuration parameters and constants. */
+
+#ifndef AQ_CFG_H
+#define AQ_CFG_H
+
+#define AQ_CFG_VECS_DEF 8U
+#define AQ_CFG_TCS_DEF 1U
+
+#define AQ_CFG_TXDS_DEF 4096U
+#define AQ_CFG_RXDS_DEF 2048U
+
+#define AQ_CFG_IS_POLLING_DEF 0U
+
+#define AQ_CFG_FORCE_LEGACY_INT 0U
+
+#define AQ_CFG_INTERRUPT_MODERATION_OFF 0
+#define AQ_CFG_INTERRUPT_MODERATION_ON 1
+#define AQ_CFG_INTERRUPT_MODERATION_AUTO 0xFFFFU
+
+#define AQ_CFG_INTERRUPT_MODERATION_USEC_MAX (0x1FF * 2)
+
+#define AQ_CFG_IRQ_MASK 0x3FFU
+
+#define AQ_CFG_VECS_MAX 8U
+#define AQ_CFG_TCS_MAX 8U
+
+#define AQ_CFG_TX_FRAME_MAX (16U * 1024U)
+#define AQ_CFG_RX_FRAME_MAX (2U * 1024U)
+
+#define AQ_CFG_TX_CLEAN_BUDGET 256U
+
+#define AQ_CFG_RX_REFILL_THRES 32U
+
+#define AQ_CFG_RX_HDR_SIZE 256U
+
+#define AQ_CFG_RX_PAGEORDER 0U
+#define AQ_CFG_XDP_PAGEORDER 2U
+
+/* LRO */
+#define AQ_CFG_IS_LRO_DEF 1U
+
+/* RSS */
+#define AQ_CFG_RSS_INDIRECTION_TABLE_MAX 64U
+#define AQ_CFG_RSS_HASHKEY_SIZE 40U
+
+#define AQ_CFG_IS_RSS_DEF 1U
+#define AQ_CFG_NUM_RSS_QUEUES_DEF AQ_CFG_VECS_DEF
+#define AQ_CFG_RSS_BASE_CPU_NUM_DEF 0U
+
+#define AQ_CFG_PCI_FUNC_MSIX_IRQS 9U
+#define AQ_CFG_PCI_FUNC_PORTS 2U
+
+#define AQ_CFG_SERVICE_TIMER_INTERVAL (1 * HZ)
+#define AQ_CFG_POLLING_TIMER_INTERVAL ((unsigned int)(2 * HZ))
+
+#define AQ_CFG_SKB_FRAGS_MAX 32U
+
+/* Number of descriptors available in one ring to resume this ring queue
+ */
+#define AQ_CFG_RESTART_DESC_THRES (AQ_CFG_SKB_FRAGS_MAX * 2)
+
+/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
+
+#define AQ_CFG_FC_MODE AQ_NIC_FC_FULL
+
+/* Default WOL modes used on initialization */
+#define AQ_CFG_WOL_MODES WAKE_MAGIC
+
+#define AQ_CFG_SPEED_MSK 0xFFFFU /* 0xFFFFU==auto_neg */
+
+#define AQ_CFG_IS_AUTONEG_DEF 1U
+#define AQ_CFG_MTU_DEF 1514U
+
+#define AQ_CFG_LOCK_TRYS 100U
+
+#define AQ_CFG_DRV_AUTHOR "Marvell"
+#define AQ_CFG_DRV_DESC "Marvell (Aquantia) Corporation(R) Network Driver"
+#define AQ_CFG_DRV_NAME "atlantic"
+
+#endif /* AQ_CFG_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
new file mode 100644
index 000000000..ace691d7c
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+/* File aq_common.h: Basic includes for all files in project. */
+
+#ifndef AQ_COMMON_H
+#define AQ_COMMON_H
+
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+#include <linux/if_vlan.h>
+#include "aq_cfg.h"
+#include "aq_utils.h"
+
+#define PCI_VENDOR_ID_AQUANTIA 0x1D6A
+
+#define AQ_DEVICE_ID_0001 0x0001
+#define AQ_DEVICE_ID_D100 0xD100
+#define AQ_DEVICE_ID_D107 0xD107
+#define AQ_DEVICE_ID_D108 0xD108
+#define AQ_DEVICE_ID_D109 0xD109
+
+#define AQ_DEVICE_ID_AQC100 0x00B1
+#define AQ_DEVICE_ID_AQC107 0x07B1
+#define AQ_DEVICE_ID_AQC108 0x08B1
+#define AQ_DEVICE_ID_AQC109 0x09B1
+#define AQ_DEVICE_ID_AQC111 0x11B1
+#define AQ_DEVICE_ID_AQC112 0x12B1
+
+#define AQ_DEVICE_ID_AQC100S 0x80B1
+#define AQ_DEVICE_ID_AQC107S 0x87B1
+#define AQ_DEVICE_ID_AQC108S 0x88B1
+#define AQ_DEVICE_ID_AQC109S 0x89B1
+#define AQ_DEVICE_ID_AQC111S 0x91B1
+#define AQ_DEVICE_ID_AQC112S 0x92B1
+
+#define AQ_DEVICE_ID_AQC113DEV 0x00C0
+#define AQ_DEVICE_ID_AQC113CS 0x94C0
+#define AQ_DEVICE_ID_AQC113CA 0x34C0
+#define AQ_DEVICE_ID_AQC114CS 0x93C0
+#define AQ_DEVICE_ID_AQC113 0x04C0
+#define AQ_DEVICE_ID_AQC113C 0x14C0
+#define AQ_DEVICE_ID_AQC115C 0x12C0
+#define AQ_DEVICE_ID_AQC116C 0x11C0
+
+#define HW_ATL_NIC_NAME "Marvell (aQuantia) AQtion 10Gbit Network Adapter"
+
+#define AQ_HWREV_ANY 0
+#define AQ_HWREV_1 1
+#define AQ_HWREV_2 2
+
+#define AQ_NIC_RATE_10G BIT(0)
+#define AQ_NIC_RATE_5G BIT(1)
+#define AQ_NIC_RATE_2G5 BIT(2)
+#define AQ_NIC_RATE_1G BIT(3)
+#define AQ_NIC_RATE_100M BIT(4)
+#define AQ_NIC_RATE_10M BIT(5)
+#define AQ_NIC_RATE_1G_HALF BIT(6)
+#define AQ_NIC_RATE_100M_HALF BIT(7)
+#define AQ_NIC_RATE_10M_HALF BIT(8)
+
+#define AQ_NIC_RATE_EEE_10G BIT(9)
+#define AQ_NIC_RATE_EEE_5G BIT(10)
+#define AQ_NIC_RATE_EEE_2G5 BIT(11)
+#define AQ_NIC_RATE_EEE_1G BIT(12)
+#define AQ_NIC_RATE_EEE_100M BIT(13)
+#define AQ_NIC_RATE_EEE_MSK (AQ_NIC_RATE_EEE_10G |\
+ AQ_NIC_RATE_EEE_5G |\
+ AQ_NIC_RATE_EEE_2G5 |\
+ AQ_NIC_RATE_EEE_1G |\
+ AQ_NIC_RATE_EEE_100M)
+
+#endif /* AQ_COMMON_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
new file mode 100644
index 000000000..d3526cd38
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+/* File aq_drvinfo.c: Definition of common code for firmware info in sys.*/
+
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/hwmon.h>
+#include <linux/uaccess.h>
+
+#include "aq_drvinfo.h"
+#include "aq_nic.h"
+
+#if IS_REACHABLE(CONFIG_HWMON)
+static const char * const atl_temp_label[] = {
+ "PHY Temperature",
+ "MAC Temperature",
+};
+
+static int aq_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *value)
+{
+ struct aq_nic_s *aq_nic = dev_get_drvdata(dev);
+ int err = 0;
+ int temp;
+
+ if (!aq_nic)
+ return -EIO;
+
+ if (type != hwmon_temp || attr != hwmon_temp_input)
+ return -EOPNOTSUPP;
+
+ switch (channel) {
+ case 0:
+ if (!aq_nic->aq_fw_ops->get_phy_temp)
+ return -EOPNOTSUPP;
+
+ err = aq_nic->aq_fw_ops->get_phy_temp(aq_nic->aq_hw, &temp);
+ *value = temp;
+ break;
+ case 1:
+ if (!aq_nic->aq_fw_ops->get_mac_temp &&
+ !aq_nic->aq_hw_ops->hw_get_mac_temp)
+ return -EOPNOTSUPP;
+
+ if (aq_nic->aq_fw_ops->get_mac_temp)
+ err = aq_nic->aq_fw_ops->get_mac_temp(aq_nic->aq_hw, &temp);
+ else
+ err = aq_nic->aq_hw_ops->hw_get_mac_temp(aq_nic->aq_hw, &temp);
+ *value = temp;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int aq_hwmon_read_string(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ struct aq_nic_s *aq_nic = dev_get_drvdata(dev);
+
+ if (!aq_nic)
+ return -EIO;
+
+ if (type != hwmon_temp || attr != hwmon_temp_label)
+ return -EOPNOTSUPP;
+
+ if (channel < ARRAY_SIZE(atl_temp_label))
+ *str = atl_temp_label[channel];
+ else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static umode_t aq_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct aq_nic_s *nic = data;
+
+ if (type != hwmon_temp)
+ return 0;
+
+ if (channel == 0 && !nic->aq_fw_ops->get_phy_temp)
+ return 0;
+ else if (channel == 1 && !nic->aq_fw_ops->get_mac_temp &&
+ !nic->aq_hw_ops->hw_get_mac_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_label:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static const struct hwmon_ops aq_hwmon_ops = {
+ .is_visible = aq_hwmon_is_visible,
+ .read = aq_hwmon_read,
+ .read_string = aq_hwmon_read_string,
+};
+
+static u32 aq_hwmon_temp_config[] = {
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ 0,
+};
+
+static const struct hwmon_channel_info aq_hwmon_temp = {
+ .type = hwmon_temp,
+ .config = aq_hwmon_temp_config,
+};
+
+static const struct hwmon_channel_info *aq_hwmon_info[] = {
+ &aq_hwmon_temp,
+ NULL,
+};
+
+static const struct hwmon_chip_info aq_hwmon_chip_info = {
+ .ops = &aq_hwmon_ops,
+ .info = aq_hwmon_info,
+};
+
+int aq_drvinfo_init(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct device *dev = &aq_nic->pdev->dev;
+ struct device *hwmon_dev;
+ int err = 0;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev,
+ ndev->name,
+ aq_nic,
+ &aq_hwmon_chip_info,
+ NULL);
+
+ if (IS_ERR(hwmon_dev))
+ err = PTR_ERR(hwmon_dev);
+
+ return err;
+}
+
+#else
+int aq_drvinfo_init(struct net_device *ndev) { return 0; }
+#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.h b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.h
new file mode 100644
index 000000000..59113a206
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+/* File aq_drvinfo.h: Declaration of common code for firmware info in sys.*/
+
+#ifndef AQ_DRVINFO_H
+#define AQ_DRVINFO_H
+
+struct net_device;
+
+int aq_drvinfo_init(struct net_device *ndev);
+
+#endif /* AQ_DRVINFO_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
new file mode 100644
index 000000000..ac4ea93bd
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -0,0 +1,1026 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+/* File aq_ethtool.c: Definition of ethertool related functions. */
+
+#include "aq_ethtool.h"
+#include "aq_nic.h"
+#include "aq_vec.h"
+#include "aq_ptp.h"
+#include "aq_filters.h"
+#include "aq_macsec.h"
+#include "aq_main.h"
+
+#include <linux/ptp_clock_kernel.h>
+
+static void aq_ethtool_get_regs(struct net_device *ndev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ u32 regs_count;
+
+ regs_count = aq_nic_get_regs_count(aq_nic);
+
+ memset(p, 0, regs_count * sizeof(u32));
+ aq_nic_get_regs(aq_nic, regs, p);
+}
+
+static int aq_ethtool_get_regs_len(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ u32 regs_count;
+
+ regs_count = aq_nic_get_regs_count(aq_nic);
+
+ return regs_count * sizeof(u32);
+}
+
+static u32 aq_ethtool_get_link(struct net_device *ndev)
+{
+ return ethtool_op_get_link(ndev);
+}
+
+static int aq_ethtool_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ aq_nic_get_link_ksettings(aq_nic, cmd);
+ cmd->base.speed = netif_carrier_ok(ndev) ?
+ aq_nic_get_link_speed(aq_nic) : 0U;
+
+ return 0;
+}
+
+static int
+aq_ethtool_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ return aq_nic_set_link_ksettings(aq_nic, cmd);
+}
+
+static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
+ "InPackets",
+ "InUCast",
+ "InMCast",
+ "InBCast",
+ "InErrors",
+ "OutPackets",
+ "OutUCast",
+ "OutMCast",
+ "OutBCast",
+ "InUCastOctets",
+ "OutUCastOctets",
+ "InMCastOctets",
+ "OutMCastOctets",
+ "InBCastOctets",
+ "OutBCastOctets",
+ "InOctets",
+ "OutOctets",
+ "InPacketsDma",
+ "OutPacketsDma",
+ "InOctetsDma",
+ "OutOctetsDma",
+ "InDroppedDma",
+};
+
+static const char * const aq_ethtool_queue_rx_stat_names[] = {
+ "%sQueue[%d] InPackets",
+ "%sQueue[%d] InJumboPackets",
+ "%sQueue[%d] InLroPackets",
+ "%sQueue[%d] InErrors",
+ "%sQueue[%d] AllocFails",
+ "%sQueue[%d] SkbAllocFails",
+ "%sQueue[%d] Polls",
+ "%sQueue[%d] PageFlips",
+ "%sQueue[%d] PageReuses",
+ "%sQueue[%d] PageFrees",
+ "%sQueue[%d] XdpAbort",
+ "%sQueue[%d] XdpDrop",
+ "%sQueue[%d] XdpPass",
+ "%sQueue[%d] XdpTx",
+ "%sQueue[%d] XdpInvalid",
+ "%sQueue[%d] XdpRedirect",
+};
+
+static const char * const aq_ethtool_queue_tx_stat_names[] = {
+ "%sQueue[%d] OutPackets",
+ "%sQueue[%d] Restarts",
+};
+
+#if IS_ENABLED(CONFIG_MACSEC)
+static const char aq_macsec_stat_names[][ETH_GSTRING_LEN] = {
+ "MACSec InCtlPackets",
+ "MACSec InTaggedMissPackets",
+ "MACSec InUntaggedMissPackets",
+ "MACSec InNotagPackets",
+ "MACSec InUntaggedPackets",
+ "MACSec InBadTagPackets",
+ "MACSec InNoSciPackets",
+ "MACSec InUnknownSciPackets",
+ "MACSec InCtrlPortPassPackets",
+ "MACSec InUnctrlPortPassPackets",
+ "MACSec InCtrlPortFailPackets",
+ "MACSec InUnctrlPortFailPackets",
+ "MACSec InTooLongPackets",
+ "MACSec InIgpocCtlPackets",
+ "MACSec InEccErrorPackets",
+ "MACSec InUnctrlHitDropRedir",
+ "MACSec OutCtlPackets",
+ "MACSec OutUnknownSaPackets",
+ "MACSec OutUntaggedPackets",
+ "MACSec OutTooLong",
+ "MACSec OutEccErrorPackets",
+ "MACSec OutUnctrlHitDropRedir",
+};
+
+static const char * const aq_macsec_txsc_stat_names[] = {
+ "MACSecTXSC%d ProtectedPkts",
+ "MACSecTXSC%d EncryptedPkts",
+ "MACSecTXSC%d ProtectedOctets",
+ "MACSecTXSC%d EncryptedOctets",
+};
+
+static const char * const aq_macsec_txsa_stat_names[] = {
+ "MACSecTXSC%dSA%d HitDropRedirect",
+ "MACSecTXSC%dSA%d Protected2Pkts",
+ "MACSecTXSC%dSA%d ProtectedPkts",
+ "MACSecTXSC%dSA%d EncryptedPkts",
+};
+
+static const char * const aq_macsec_rxsa_stat_names[] = {
+ "MACSecRXSC%dSA%d UntaggedHitPkts",
+ "MACSecRXSC%dSA%d CtrlHitDrpRedir",
+ "MACSecRXSC%dSA%d NotUsingSa",
+ "MACSecRXSC%dSA%d UnusedSa",
+ "MACSecRXSC%dSA%d NotValidPkts",
+ "MACSecRXSC%dSA%d InvalidPkts",
+ "MACSecRXSC%dSA%d OkPkts",
+ "MACSecRXSC%dSA%d LatePkts",
+ "MACSecRXSC%dSA%d DelayedPkts",
+ "MACSecRXSC%dSA%d UncheckedPkts",
+ "MACSecRXSC%dSA%d ValidatedOctets",
+ "MACSecRXSC%dSA%d DecryptedOctets",
+};
+#endif
+
+static const char aq_ethtool_priv_flag_names[][ETH_GSTRING_LEN] = {
+ "DMASystemLoopback",
+ "PKTSystemLoopback",
+ "DMANetworkLoopback",
+ "PHYInternalLoopback",
+ "PHYExternalLoopback",
+};
+
+static u32 aq_ethtool_n_stats(struct net_device *ndev)
+{
+ const int rx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_rx_stat_names);
+ const int tx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_tx_stat_names);
+ struct aq_nic_s *nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(nic);
+ u32 n_stats = ARRAY_SIZE(aq_ethtool_stat_names) +
+ (rx_stat_cnt + tx_stat_cnt) * cfg->vecs * cfg->tcs;
+
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+ n_stats += rx_stat_cnt * aq_ptp_get_ring_cnt(nic, ATL_RING_RX) +
+ tx_stat_cnt * aq_ptp_get_ring_cnt(nic, ATL_RING_TX);
+#endif
+
+#if IS_ENABLED(CONFIG_MACSEC)
+ if (nic->macsec_cfg) {
+ n_stats += ARRAY_SIZE(aq_macsec_stat_names) +
+ ARRAY_SIZE(aq_macsec_txsc_stat_names) *
+ aq_macsec_tx_sc_cnt(nic) +
+ ARRAY_SIZE(aq_macsec_txsa_stat_names) *
+ aq_macsec_tx_sa_cnt(nic) +
+ ARRAY_SIZE(aq_macsec_rxsa_stat_names) *
+ aq_macsec_rx_sa_cnt(nic);
+ }
+#endif
+
+ return n_stats;
+}
+
+static void aq_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ memset(data, 0, aq_ethtool_n_stats(ndev) * sizeof(u64));
+ data = aq_nic_get_stats(aq_nic, data);
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+ data = aq_ptp_get_stats(aq_nic, data);
+#endif
+#if IS_ENABLED(CONFIG_MACSEC)
+ data = aq_macsec_get_stats(aq_nic, data);
+#endif
+}
+
+static void aq_ethtool_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct pci_dev *pdev = to_pci_dev(ndev->dev.parent);
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ u32 firmware_version;
+ u32 regs_count;
+
+ firmware_version = aq_nic_get_fw_version(aq_nic);
+ regs_count = aq_nic_get_regs_count(aq_nic);
+
+ strlcat(drvinfo->driver, AQ_CFG_DRV_NAME, sizeof(drvinfo->driver));
+
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%u.%u.%u", firmware_version >> 24,
+ (firmware_version >> 16) & 0xFFU, firmware_version & 0xFFFFU);
+
+ strscpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
+ sizeof(drvinfo->bus_info));
+ drvinfo->n_stats = aq_ethtool_n_stats(ndev);
+ drvinfo->testinfo_len = 0;
+ drvinfo->regdump_len = regs_count;
+ drvinfo->eedump_len = 0;
+}
+
+static void aq_ethtool_get_strings(struct net_device *ndev,
+ u32 stringset, u8 *data)
+{
+ struct aq_nic_s *nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg;
+ u8 *p = data;
+ int i, si;
+#if IS_ENABLED(CONFIG_MACSEC)
+ int sa;
+#endif
+
+ cfg = aq_nic_get_cfg(nic);
+
+ switch (stringset) {
+ case ETH_SS_STATS: {
+ const int rx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_rx_stat_names);
+ const int tx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_tx_stat_names);
+ char tc_string[8];
+ int tc;
+
+ memset(tc_string, 0, sizeof(tc_string));
+ memcpy(p, aq_ethtool_stat_names,
+ sizeof(aq_ethtool_stat_names));
+ p = p + sizeof(aq_ethtool_stat_names);
+
+ for (tc = 0; tc < cfg->tcs; tc++) {
+ if (cfg->is_qos)
+ snprintf(tc_string, 8, "TC%d ", tc);
+
+ for (i = 0; i < cfg->vecs; i++) {
+ for (si = 0; si < rx_stat_cnt; si++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ aq_ethtool_queue_rx_stat_names[si],
+ tc_string,
+ AQ_NIC_CFG_TCVEC2RING(cfg, tc, i));
+ p += ETH_GSTRING_LEN;
+ }
+ for (si = 0; si < tx_stat_cnt; si++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ aq_ethtool_queue_tx_stat_names[si],
+ tc_string,
+ AQ_NIC_CFG_TCVEC2RING(cfg, tc, i));
+ p += ETH_GSTRING_LEN;
+ }
+ }
+ }
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+ if (nic->aq_ptp) {
+ const int rx_ring_cnt = aq_ptp_get_ring_cnt(nic, ATL_RING_RX);
+ const int tx_ring_cnt = aq_ptp_get_ring_cnt(nic, ATL_RING_TX);
+ unsigned int ptp_ring_idx =
+ aq_ptp_ring_idx(nic->aq_nic_cfg.tc_mode);
+
+ snprintf(tc_string, 8, "PTP ");
+
+ for (i = 0; i < max(rx_ring_cnt, tx_ring_cnt); i++) {
+ for (si = 0; si < rx_stat_cnt; si++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ aq_ethtool_queue_rx_stat_names[si],
+ tc_string,
+ i ? PTP_HWST_RING_IDX : ptp_ring_idx);
+ p += ETH_GSTRING_LEN;
+ }
+ if (i >= tx_ring_cnt)
+ continue;
+ for (si = 0; si < tx_stat_cnt; si++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ aq_ethtool_queue_tx_stat_names[si],
+ tc_string,
+ i ? PTP_HWST_RING_IDX : ptp_ring_idx);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+ }
+#endif
+#if IS_ENABLED(CONFIG_MACSEC)
+ if (!nic->macsec_cfg)
+ break;
+
+ memcpy(p, aq_macsec_stat_names, sizeof(aq_macsec_stat_names));
+ p = p + sizeof(aq_macsec_stat_names);
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ struct aq_macsec_txsc *aq_txsc;
+
+ if (!(test_bit(i, &nic->macsec_cfg->txsc_idx_busy)))
+ continue;
+
+ for (si = 0;
+ si < ARRAY_SIZE(aq_macsec_txsc_stat_names);
+ si++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ aq_macsec_txsc_stat_names[si], i);
+ p += ETH_GSTRING_LEN;
+ }
+ aq_txsc = &nic->macsec_cfg->aq_txsc[i];
+ for (sa = 0; sa < MACSEC_NUM_AN; sa++) {
+ if (!(test_bit(sa, &aq_txsc->tx_sa_idx_busy)))
+ continue;
+ for (si = 0;
+ si < ARRAY_SIZE(aq_macsec_txsa_stat_names);
+ si++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ aq_macsec_txsa_stat_names[si],
+ i, sa);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+ }
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ struct aq_macsec_rxsc *aq_rxsc;
+
+ if (!(test_bit(i, &nic->macsec_cfg->rxsc_idx_busy)))
+ continue;
+
+ aq_rxsc = &nic->macsec_cfg->aq_rxsc[i];
+ for (sa = 0; sa < MACSEC_NUM_AN; sa++) {
+ if (!(test_bit(sa, &aq_rxsc->rx_sa_idx_busy)))
+ continue;
+ for (si = 0;
+ si < ARRAY_SIZE(aq_macsec_rxsa_stat_names);
+ si++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ aq_macsec_rxsa_stat_names[si],
+ i, sa);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+ }
+#endif
+ break;
+ }
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(p, aq_ethtool_priv_flag_names,
+ sizeof(aq_ethtool_priv_flag_names));
+ break;
+ }
+}
+
+static int aq_ethtool_set_phys_id(struct net_device *ndev,
+ enum ethtool_phys_id_state state)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_hw_s *hw = aq_nic->aq_hw;
+ int ret = 0;
+
+ if (!aq_nic->aq_fw_ops->led_control)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&aq_nic->fwreq_mutex);
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ ret = aq_nic->aq_fw_ops->led_control(hw, AQ_HW_LED_BLINK |
+ AQ_HW_LED_BLINK << 2 | AQ_HW_LED_BLINK << 4);
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ ret = aq_nic->aq_fw_ops->led_control(hw, AQ_HW_LED_DEFAULT);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&aq_nic->fwreq_mutex);
+
+ return ret;
+}
+
+static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
+{
+ int ret = 0;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ ret = aq_ethtool_n_stats(ndev);
+ break;
+ case ETH_SS_PRIV_FLAGS:
+ ret = ARRAY_SIZE(aq_ethtool_priv_flag_names);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static u32 aq_ethtool_get_rss_indir_size(struct net_device *ndev)
+{
+ return AQ_CFG_RSS_INDIRECTION_TABLE_MAX;
+}
+
+static u32 aq_ethtool_get_rss_key_size(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg;
+
+ cfg = aq_nic_get_cfg(aq_nic);
+
+ return sizeof(cfg->aq_rss.hash_secret_key);
+}
+
+static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg;
+ unsigned int i = 0U;
+
+ cfg = aq_nic_get_cfg(aq_nic);
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
+ if (indir) {
+ for (i = 0; i < AQ_CFG_RSS_INDIRECTION_TABLE_MAX; i++)
+ indir[i] = cfg->aq_rss.indirection_table[i];
+ }
+ if (key)
+ memcpy(key, cfg->aq_rss.hash_secret_key,
+ sizeof(cfg->aq_rss.hash_secret_key));
+
+ return 0;
+}
+
+static int aq_ethtool_set_rss(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(netdev);
+ struct aq_nic_cfg_s *cfg;
+ unsigned int i = 0U;
+ u32 rss_entries;
+ int err = 0;
+
+ cfg = aq_nic_get_cfg(aq_nic);
+ rss_entries = cfg->aq_rss.indirection_table_size;
+
+ /* We do not allow change in unsupported parameters */
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+ /* Fill out the redirection table */
+ if (indir)
+ for (i = 0; i < rss_entries; i++)
+ cfg->aq_rss.indirection_table[i] = indir[i];
+
+ /* Fill out the rss hash key */
+ if (key) {
+ memcpy(cfg->aq_rss.hash_secret_key, key,
+ sizeof(cfg->aq_rss.hash_secret_key));
+ err = aq_nic->aq_hw_ops->hw_rss_hash_set(aq_nic->aq_hw,
+ &cfg->aq_rss);
+ if (err)
+ return err;
+ }
+
+ err = aq_nic->aq_hw_ops->hw_rss_set(aq_nic->aq_hw, &cfg->aq_rss);
+
+ return err;
+}
+
+static int aq_ethtool_get_rxnfc(struct net_device *ndev,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg;
+ int err = 0;
+
+ cfg = aq_nic_get_cfg(aq_nic);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = cfg->vecs;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = aq_get_rxnfc_count_all_rules(aq_nic);
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = aq_get_rxnfc_rule(aq_nic, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ err = aq_get_rxnfc_all_rules(aq_nic, cmd, rule_locs);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int aq_ethtool_set_rxnfc(struct net_device *ndev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ err = aq_add_rxnfc_rule(aq_nic, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ err = aq_del_rxnfc_rule(aq_nic, cmd);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int aq_ethtool_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg;
+
+ cfg = aq_nic_get_cfg(aq_nic);
+
+ if (cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON ||
+ cfg->itr == AQ_CFG_INTERRUPT_MODERATION_AUTO) {
+ coal->rx_coalesce_usecs = cfg->rx_itr;
+ coal->tx_coalesce_usecs = cfg->tx_itr;
+ coal->rx_max_coalesced_frames = 0;
+ coal->tx_max_coalesced_frames = 0;
+ } else {
+ coal->rx_coalesce_usecs = 0;
+ coal->tx_coalesce_usecs = 0;
+ coal->rx_max_coalesced_frames = 1;
+ coal->tx_max_coalesced_frames = 1;
+ }
+
+ return 0;
+}
+
+static int aq_ethtool_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg;
+
+ cfg = aq_nic_get_cfg(aq_nic);
+
+ /* Atlantic only supports timing based coalescing
+ */
+ if (coal->rx_max_coalesced_frames > 1 ||
+ coal->tx_max_coalesced_frames > 1)
+ return -EOPNOTSUPP;
+
+ /* We do not support frame counting. Check this
+ */
+ if (!(coal->rx_max_coalesced_frames == !coal->rx_coalesce_usecs))
+ return -EOPNOTSUPP;
+ if (!(coal->tx_max_coalesced_frames == !coal->tx_coalesce_usecs))
+ return -EOPNOTSUPP;
+
+ if (coal->rx_coalesce_usecs > AQ_CFG_INTERRUPT_MODERATION_USEC_MAX ||
+ coal->tx_coalesce_usecs > AQ_CFG_INTERRUPT_MODERATION_USEC_MAX)
+ return -EINVAL;
+
+ cfg->itr = AQ_CFG_INTERRUPT_MODERATION_ON;
+
+ cfg->rx_itr = coal->rx_coalesce_usecs;
+ cfg->tx_itr = coal->tx_coalesce_usecs;
+
+ return aq_nic_update_interrupt_moderation_settings(aq_nic);
+}
+
+static void aq_ethtool_get_wol(struct net_device *ndev,
+ struct ethtool_wolinfo *wol)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg;
+
+ cfg = aq_nic_get_cfg(aq_nic);
+
+ wol->supported = AQ_NIC_WOL_MODES;
+ wol->wolopts = cfg->wol;
+}
+
+static int aq_ethtool_set_wol(struct net_device *ndev,
+ struct ethtool_wolinfo *wol)
+{
+ struct pci_dev *pdev = to_pci_dev(ndev->dev.parent);
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg;
+ int err = 0;
+
+ cfg = aq_nic_get_cfg(aq_nic);
+
+ if (wol->wolopts & ~AQ_NIC_WOL_MODES)
+ return -EOPNOTSUPP;
+
+ cfg->wol = wol->wolopts;
+
+ err = device_set_wakeup_enable(&pdev->dev, !!cfg->wol);
+
+ return err;
+}
+
+static int aq_ethtool_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ ethtool_op_get_ts_info(ndev, info);
+
+ if (!aq_nic->aq_ptp)
+ return 0;
+
+ info->so_timestamping |=
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
+
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+ info->phc_index = ptp_clock_index(aq_ptp_get_ptp_clock(aq_nic->aq_ptp));
+#endif
+
+ return 0;
+}
+
+static u32 eee_mask_to_ethtool_mask(u32 speed)
+{
+ u32 rate = 0;
+
+ if (speed & AQ_NIC_RATE_EEE_10G)
+ rate |= SUPPORTED_10000baseT_Full;
+
+ if (speed & AQ_NIC_RATE_EEE_1G)
+ rate |= SUPPORTED_1000baseT_Full;
+
+ if (speed & AQ_NIC_RATE_EEE_100M)
+ rate |= SUPPORTED_100baseT_Full;
+
+ return rate;
+}
+
+static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ u32 rate, supported_rates;
+ int err = 0;
+
+ if (!aq_nic->aq_fw_ops->get_eee_rate)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&aq_nic->fwreq_mutex);
+ err = aq_nic->aq_fw_ops->get_eee_rate(aq_nic->aq_hw, &rate,
+ &supported_rates);
+ mutex_unlock(&aq_nic->fwreq_mutex);
+ if (err < 0)
+ return err;
+
+ eee->supported = eee_mask_to_ethtool_mask(supported_rates);
+
+ if (aq_nic->aq_nic_cfg.eee_speeds)
+ eee->advertised = eee->supported;
+
+ eee->lp_advertised = eee_mask_to_ethtool_mask(rate);
+
+ eee->eee_enabled = !!eee->advertised;
+
+ eee->tx_lpi_enabled = eee->eee_enabled;
+ if ((supported_rates & rate) & AQ_NIC_RATE_EEE_MSK)
+ eee->eee_active = true;
+
+ return 0;
+}
+
+static int aq_ethtool_set_eee(struct net_device *ndev, struct ethtool_eee *eee)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ u32 rate, supported_rates;
+ struct aq_nic_cfg_s *cfg;
+ int err = 0;
+
+ cfg = aq_nic_get_cfg(aq_nic);
+
+ if (unlikely(!aq_nic->aq_fw_ops->get_eee_rate ||
+ !aq_nic->aq_fw_ops->set_eee_rate))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&aq_nic->fwreq_mutex);
+ err = aq_nic->aq_fw_ops->get_eee_rate(aq_nic->aq_hw, &rate,
+ &supported_rates);
+ mutex_unlock(&aq_nic->fwreq_mutex);
+ if (err < 0)
+ return err;
+
+ if (eee->eee_enabled) {
+ rate = supported_rates;
+ cfg->eee_speeds = rate;
+ } else {
+ rate = 0;
+ cfg->eee_speeds = 0;
+ }
+
+ mutex_lock(&aq_nic->fwreq_mutex);
+ err = aq_nic->aq_fw_ops->set_eee_rate(aq_nic->aq_hw, rate);
+ mutex_unlock(&aq_nic->fwreq_mutex);
+
+ return err;
+}
+
+static int aq_ethtool_nway_reset(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = 0;
+
+ if (unlikely(!aq_nic->aq_fw_ops->renegotiate))
+ return -EOPNOTSUPP;
+
+ if (netif_running(ndev)) {
+ mutex_lock(&aq_nic->fwreq_mutex);
+ err = aq_nic->aq_fw_ops->renegotiate(aq_nic->aq_hw);
+ mutex_unlock(&aq_nic->fwreq_mutex);
+ }
+
+ return err;
+}
+
+static void aq_ethtool_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int fc = aq_nic->aq_nic_cfg.fc.req;
+
+ pause->autoneg = 0;
+
+ pause->rx_pause = !!(fc & AQ_NIC_FC_RX);
+ pause->tx_pause = !!(fc & AQ_NIC_FC_TX);
+}
+
+static int aq_ethtool_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = 0;
+
+ if (!aq_nic->aq_fw_ops->set_flow_control)
+ return -EOPNOTSUPP;
+
+ if (pause->autoneg == AUTONEG_ENABLE)
+ return -EOPNOTSUPP;
+
+ if (pause->rx_pause)
+ aq_nic->aq_hw->aq_nic_cfg->fc.req |= AQ_NIC_FC_RX;
+ else
+ aq_nic->aq_hw->aq_nic_cfg->fc.req &= ~AQ_NIC_FC_RX;
+
+ if (pause->tx_pause)
+ aq_nic->aq_hw->aq_nic_cfg->fc.req |= AQ_NIC_FC_TX;
+ else
+ aq_nic->aq_hw->aq_nic_cfg->fc.req &= ~AQ_NIC_FC_TX;
+
+ mutex_lock(&aq_nic->fwreq_mutex);
+ err = aq_nic->aq_fw_ops->set_flow_control(aq_nic->aq_hw);
+ mutex_unlock(&aq_nic->fwreq_mutex);
+
+ return err;
+}
+
+static void aq_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg;
+
+ cfg = aq_nic_get_cfg(aq_nic);
+
+ ring->rx_pending = cfg->rxds;
+ ring->tx_pending = cfg->txds;
+
+ ring->rx_max_pending = cfg->aq_hw_caps->rxds_max;
+ ring->tx_max_pending = cfg->aq_hw_caps->txds_max;
+}
+
+static int aq_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ const struct aq_hw_caps_s *hw_caps;
+ bool ndev_running = false;
+ struct aq_nic_cfg_s *cfg;
+ int err = 0;
+
+ cfg = aq_nic_get_cfg(aq_nic);
+ hw_caps = cfg->aq_hw_caps;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
+ err = -EOPNOTSUPP;
+ goto err_exit;
+ }
+
+ if (netif_running(ndev)) {
+ ndev_running = true;
+ aq_ndev_close(ndev);
+ }
+
+ cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min);
+ cfg->rxds = min(cfg->rxds, hw_caps->rxds_max);
+ cfg->rxds = ALIGN(cfg->rxds, AQ_HW_RXD_MULTIPLE);
+
+ cfg->txds = max(ring->tx_pending, hw_caps->txds_min);
+ cfg->txds = min(cfg->txds, hw_caps->txds_max);
+ cfg->txds = ALIGN(cfg->txds, AQ_HW_TXD_MULTIPLE);
+
+ err = aq_nic_realloc_vectors(aq_nic);
+ if (err)
+ goto err_exit;
+
+ if (ndev_running)
+ err = aq_ndev_open(ndev);
+
+err_exit:
+ return err;
+}
+
+static u32 aq_get_msg_level(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ return aq_nic->msg_enable;
+}
+
+static void aq_set_msg_level(struct net_device *ndev, u32 data)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ aq_nic->msg_enable = data;
+}
+
+static u32 aq_ethtool_get_priv_flags(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ return aq_nic->aq_nic_cfg.priv_flags;
+}
+
+static int aq_ethtool_set_priv_flags(struct net_device *ndev, u32 flags)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg;
+ u32 priv_flags;
+ int ret = 0;
+
+ cfg = aq_nic_get_cfg(aq_nic);
+ priv_flags = cfg->priv_flags;
+
+ if (flags & ~AQ_PRIV_FLAGS_MASK)
+ return -EOPNOTSUPP;
+
+ if (hweight32((flags | priv_flags) & AQ_HW_LOOPBACK_MASK) > 1) {
+ netdev_info(ndev, "Can't enable more than one loopback simultaneously\n");
+ return -EINVAL;
+ }
+
+ cfg->priv_flags = flags;
+
+ if ((priv_flags ^ flags) & BIT(AQ_HW_LOOPBACK_DMA_NET)) {
+ if (netif_running(ndev)) {
+ dev_close(ndev);
+
+ dev_open(ndev, NULL);
+ }
+ } else if ((priv_flags ^ flags) & AQ_HW_LOOPBACK_MASK) {
+ ret = aq_nic_set_loopback(aq_nic);
+ }
+
+ return ret;
+}
+
+static int aq_ethtool_get_phy_tunable(struct net_device *ndev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ switch (tuna->id) {
+ case ETHTOOL_PHY_EDPD: {
+ u16 *val = data;
+
+ *val = aq_nic->aq_nic_cfg.is_media_detect ? AQ_HW_MEDIA_DETECT_CNT : 0;
+ break;
+ }
+ case ETHTOOL_PHY_DOWNSHIFT: {
+ u8 *val = data;
+
+ *val = (u8)aq_nic->aq_nic_cfg.downshift_counter;
+ break;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int aq_ethtool_set_phy_tunable(struct net_device *ndev,
+ const struct ethtool_tunable *tuna, const void *data)
+{
+ int err = -EOPNOTSUPP;
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ switch (tuna->id) {
+ case ETHTOOL_PHY_EDPD: {
+ const u16 *val = data;
+
+ err = aq_nic_set_media_detect(aq_nic, *val);
+ break;
+ }
+ case ETHTOOL_PHY_DOWNSHIFT: {
+ const u8 *val = data;
+
+ err = aq_nic_set_downshift(aq_nic, *val);
+ break;
+ }
+ default:
+ break;
+ }
+
+ return err;
+}
+
+const struct ethtool_ops aq_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
+ .get_link = aq_ethtool_get_link,
+ .get_regs_len = aq_ethtool_get_regs_len,
+ .get_regs = aq_ethtool_get_regs,
+ .get_drvinfo = aq_ethtool_get_drvinfo,
+ .get_strings = aq_ethtool_get_strings,
+ .set_phys_id = aq_ethtool_set_phys_id,
+ .get_rxfh_indir_size = aq_ethtool_get_rss_indir_size,
+ .get_wol = aq_ethtool_get_wol,
+ .set_wol = aq_ethtool_set_wol,
+ .nway_reset = aq_ethtool_nway_reset,
+ .get_ringparam = aq_get_ringparam,
+ .set_ringparam = aq_set_ringparam,
+ .get_eee = aq_ethtool_get_eee,
+ .set_eee = aq_ethtool_set_eee,
+ .get_pauseparam = aq_ethtool_get_pauseparam,
+ .set_pauseparam = aq_ethtool_set_pauseparam,
+ .get_rxfh_key_size = aq_ethtool_get_rss_key_size,
+ .get_rxfh = aq_ethtool_get_rss,
+ .set_rxfh = aq_ethtool_set_rss,
+ .get_rxnfc = aq_ethtool_get_rxnfc,
+ .set_rxnfc = aq_ethtool_set_rxnfc,
+ .get_msglevel = aq_get_msg_level,
+ .set_msglevel = aq_set_msg_level,
+ .get_sset_count = aq_ethtool_get_sset_count,
+ .get_ethtool_stats = aq_ethtool_stats,
+ .get_priv_flags = aq_ethtool_get_priv_flags,
+ .set_priv_flags = aq_ethtool_set_priv_flags,
+ .get_link_ksettings = aq_ethtool_get_link_ksettings,
+ .set_link_ksettings = aq_ethtool_set_link_ksettings,
+ .get_coalesce = aq_ethtool_get_coalesce,
+ .set_coalesce = aq_ethtool_set_coalesce,
+ .get_ts_info = aq_ethtool_get_ts_info,
+ .get_phy_tunable = aq_ethtool_get_phy_tunable,
+ .set_phy_tunable = aq_ethtool_set_phy_tunable,
+};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h
new file mode 100644
index 000000000..6d5be5ebe
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ */
+
+/* File aq_ethtool.h: Declaration of ethertool related functions. */
+
+#ifndef AQ_ETHTOOL_H
+#define AQ_ETHTOOL_H
+
+#include "aq_common.h"
+
+extern const struct ethtool_ops aq_ethtool_ops;
+#define AQ_PRIV_FLAGS_MASK (AQ_HW_LOOPBACK_MASK)
+
+#endif /* AQ_ETHTOOL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
new file mode 100644
index 000000000..30a573db0
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -0,0 +1,889 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2014-2019 aQuantia Corporation. */
+
+/* File aq_filters.c: RX filters related functions. */
+
+#include "aq_filters.h"
+
+static bool __must_check
+aq_rule_is_approve(struct ethtool_rx_flow_spec *fsp)
+{
+ if (fsp->flow_type & FLOW_MAC_EXT)
+ return false;
+
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case ETHER_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ return true;
+ case IP_USER_FLOW:
+ switch (fsp->h_u.usr_ip4_spec.proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_SCTP:
+ case IPPROTO_IP:
+ return true;
+ default:
+ return false;
+ }
+ case IPV6_USER_FLOW:
+ switch (fsp->h_u.usr_ip6_spec.l4_proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_SCTP:
+ case IPPROTO_IP:
+ return true;
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+static bool __must_check
+aq_match_filter(struct ethtool_rx_flow_spec *fsp1,
+ struct ethtool_rx_flow_spec *fsp2)
+{
+ if (fsp1->flow_type != fsp2->flow_type ||
+ memcmp(&fsp1->h_u, &fsp2->h_u, sizeof(fsp2->h_u)) ||
+ memcmp(&fsp1->h_ext, &fsp2->h_ext, sizeof(fsp2->h_ext)) ||
+ memcmp(&fsp1->m_u, &fsp2->m_u, sizeof(fsp2->m_u)) ||
+ memcmp(&fsp1->m_ext, &fsp2->m_ext, sizeof(fsp2->m_ext)))
+ return false;
+
+ return true;
+}
+
+static bool __must_check
+aq_rule_already_exists(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ struct aq_rx_filter *rule;
+ struct hlist_node *aq_node2;
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (rule->aq_fsp.location == fsp->location)
+ continue;
+ if (aq_match_filter(&rule->aq_fsp, fsp)) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: This filter is already set\n");
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int aq_check_approve_fl3l4(struct aq_nic_s *aq_nic,
+ struct aq_hw_rx_fltrs_s *rx_fltrs,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ u32 last_location = AQ_RX_LAST_LOC_FL3L4 -
+ aq_nic->aq_hw_rx_fltrs.fl3l4.reserved_count;
+
+ if (fsp->location < AQ_RX_FIRST_LOC_FL3L4 ||
+ fsp->location > last_location) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: location must be in range [%d, %d]",
+ AQ_RX_FIRST_LOC_FL3L4, last_location);
+ return -EINVAL;
+ }
+ if (rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv4) {
+ rx_fltrs->fl3l4.is_ipv6 = false;
+ netdev_err(aq_nic->ndev,
+ "ethtool: mixing ipv4 and ipv6 is not allowed");
+ return -EINVAL;
+ } else if (!rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv6) {
+ rx_fltrs->fl3l4.is_ipv6 = true;
+ netdev_err(aq_nic->ndev,
+ "ethtool: mixing ipv4 and ipv6 is not allowed");
+ return -EINVAL;
+ } else if (rx_fltrs->fl3l4.is_ipv6 &&
+ fsp->location != AQ_RX_FIRST_LOC_FL3L4 + 4 &&
+ fsp->location != AQ_RX_FIRST_LOC_FL3L4) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified location for ipv6 must be %d or %d",
+ AQ_RX_FIRST_LOC_FL3L4, AQ_RX_FIRST_LOC_FL3L4 + 4);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __must_check
+aq_check_approve_fl2(struct aq_nic_s *aq_nic,
+ struct aq_hw_rx_fltrs_s *rx_fltrs,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ u32 last_location = AQ_RX_LAST_LOC_FETHERT -
+ aq_nic->aq_hw_rx_fltrs.fet_reserved_count;
+
+ if (fsp->location < AQ_RX_FIRST_LOC_FETHERT ||
+ fsp->location > last_location) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: location must be in range [%d, %d]",
+ AQ_RX_FIRST_LOC_FETHERT,
+ last_location);
+ return -EINVAL;
+ }
+
+ if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK &&
+ fsp->m_u.ether_spec.h_proto == 0U) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: proto (ether_type) parameter must be specified");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __must_check
+aq_check_approve_fvlan(struct aq_nic_s *aq_nic,
+ struct aq_hw_rx_fltrs_s *rx_fltrs,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ struct aq_nic_cfg_s *cfg = &aq_nic->aq_nic_cfg;
+
+ if (fsp->location < AQ_RX_FIRST_LOC_FVLANID ||
+ fsp->location > AQ_RX_LAST_LOC_FVLANID) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: location must be in range [%d, %d]",
+ AQ_RX_FIRST_LOC_FVLANID,
+ AQ_RX_LAST_LOC_FVLANID);
+ return -EINVAL;
+ }
+
+ if ((aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ (!test_bit(be16_to_cpu(fsp->h_ext.vlan_tci) & VLAN_VID_MASK,
+ aq_nic->active_vlans))) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: unknown vlan-id specified");
+ return -EINVAL;
+ }
+
+ if (fsp->ring_cookie > cfg->num_rss_queues * cfg->tcs) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: queue number must be in range [0, %d]",
+ cfg->num_rss_queues * cfg->tcs - 1);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int __must_check
+aq_check_filter(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ int err = 0;
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+
+ if (fsp->flow_type & FLOW_EXT) {
+ if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_VID_MASK) {
+ err = aq_check_approve_fvlan(aq_nic, rx_fltrs, fsp);
+ } else if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK) {
+ err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
+ } else {
+ netdev_err(aq_nic->ndev,
+ "ethtool: invalid vlan mask 0x%x specified",
+ be16_to_cpu(fsp->m_ext.vlan_tci));
+ err = -EINVAL;
+ }
+ } else {
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case ETHER_FLOW:
+ err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case IPV4_FLOW:
+ case IP_USER_FLOW:
+ rx_fltrs->fl3l4.is_ipv6 = false;
+ err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV6_FLOW:
+ case IPV6_USER_FLOW:
+ rx_fltrs->fl3l4.is_ipv6 = true;
+ err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
+ break;
+ default:
+ netdev_err(aq_nic->ndev,
+ "ethtool: unknown flow-type specified");
+ err = -EINVAL;
+ }
+ }
+
+ return err;
+}
+
+static bool __must_check
+aq_rule_is_not_support(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ bool rule_is_not_support = false;
+
+ if (!(aq_nic->ndev->features & NETIF_F_NTUPLE)) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: Please, to enable the RX flow control:\n"
+ "ethtool -K %s ntuple on\n", aq_nic->ndev->name);
+ rule_is_not_support = true;
+ } else if (!aq_rule_is_approve(fsp)) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified flow type is not supported\n");
+ rule_is_not_support = true;
+ } else if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW &&
+ (fsp->h_u.tcp_ip4_spec.tos ||
+ fsp->h_u.tcp_ip6_spec.tclass)) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified tos tclass are not supported\n");
+ rule_is_not_support = true;
+ } else if (fsp->flow_type & FLOW_MAC_EXT) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: MAC_EXT is not supported");
+ rule_is_not_support = true;
+ }
+
+ return rule_is_not_support;
+}
+
+static bool __must_check
+aq_rule_is_not_correct(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ struct aq_nic_cfg_s *cfg = &aq_nic->aq_nic_cfg;
+ bool rule_is_not_correct = false;
+
+ if (!aq_nic) {
+ rule_is_not_correct = true;
+ } else if (fsp->location > AQ_RX_MAX_RXNFC_LOC) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified number %u rule is invalid\n",
+ fsp->location);
+ rule_is_not_correct = true;
+ } else if (aq_check_filter(aq_nic, fsp)) {
+ rule_is_not_correct = true;
+ } else if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
+ if (fsp->ring_cookie >= cfg->num_rss_queues * cfg->tcs) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified action is invalid.\n"
+ "Maximum allowable value action is %u.\n",
+ cfg->num_rss_queues * cfg->tcs - 1);
+ rule_is_not_correct = true;
+ }
+ }
+
+ return rule_is_not_correct;
+}
+
+static int __must_check
+aq_check_rule(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ int err = 0;
+
+ if (aq_rule_is_not_correct(aq_nic, fsp))
+ err = -EINVAL;
+ else if (aq_rule_is_not_support(aq_nic, fsp))
+ err = -EOPNOTSUPP;
+ else if (aq_rule_already_exists(aq_nic, fsp))
+ err = -EEXIST;
+
+ return err;
+}
+
+static void aq_set_data_fl2(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr,
+ struct aq_rx_filter_l2 *data, bool add)
+{
+ const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
+
+ memset(data, 0, sizeof(*data));
+
+ data->location = fsp->location - AQ_RX_FIRST_LOC_FETHERT;
+
+ if (fsp->ring_cookie != RX_CLS_FLOW_DISC)
+ data->queue = fsp->ring_cookie;
+ else
+ data->queue = -1;
+
+ data->ethertype = be16_to_cpu(fsp->h_u.ether_spec.h_proto);
+ data->user_priority_en = be16_to_cpu(fsp->m_ext.vlan_tci)
+ == VLAN_PRIO_MASK;
+ data->user_priority = (be16_to_cpu(fsp->h_ext.vlan_tci)
+ & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+}
+
+static int aq_add_del_fether(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+ struct aq_rx_filter_l2 data;
+ struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+
+ aq_set_data_fl2(aq_nic, aq_rx_fltr, &data, add);
+
+ if (unlikely(!aq_hw_ops->hw_filter_l2_set))
+ return -EOPNOTSUPP;
+ if (unlikely(!aq_hw_ops->hw_filter_l2_clear))
+ return -EOPNOTSUPP;
+
+ if (add)
+ return aq_hw_ops->hw_filter_l2_set(aq_hw, &data);
+ else
+ return aq_hw_ops->hw_filter_l2_clear(aq_hw, &data);
+}
+
+static bool aq_fvlan_is_busy(struct aq_rx_filter_vlan *aq_vlans, int vlan)
+{
+ int i;
+
+ for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
+ if (aq_vlans[i].enable &&
+ aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED &&
+ aq_vlans[i].vlan_id == vlan) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/* Function rebuilds array of vlan filters so that filters with assigned
+ * queue have a precedence over just vlans on the interface.
+ */
+static void aq_fvlan_rebuild(struct aq_nic_s *aq_nic,
+ unsigned long *active_vlans,
+ struct aq_rx_filter_vlan *aq_vlans)
+{
+ bool vlan_busy = false;
+ int vlan = -1;
+ int i;
+
+ for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
+ if (aq_vlans[i].enable &&
+ aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED)
+ continue;
+ do {
+ vlan = find_next_bit(active_vlans,
+ VLAN_N_VID,
+ vlan + 1);
+ if (vlan == VLAN_N_VID) {
+ aq_vlans[i].enable = 0U;
+ aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
+ aq_vlans[i].vlan_id = 0;
+ continue;
+ }
+
+ vlan_busy = aq_fvlan_is_busy(aq_vlans, vlan);
+ if (!vlan_busy) {
+ aq_vlans[i].enable = 1U;
+ aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
+ aq_vlans[i].vlan_id = vlan;
+ }
+ } while (vlan_busy && vlan != VLAN_N_VID);
+ }
+}
+
+static int aq_set_data_fvlan(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr,
+ struct aq_rx_filter_vlan *aq_vlans, bool add)
+{
+ const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
+ int location = fsp->location - AQ_RX_FIRST_LOC_FVLANID;
+ int i;
+
+ memset(&aq_vlans[location], 0, sizeof(aq_vlans[location]));
+
+ if (!add)
+ return 0;
+
+ /* remove vlan if it was in table without queue assignment */
+ for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
+ if (aq_vlans[i].vlan_id ==
+ (be16_to_cpu(fsp->h_ext.vlan_tci) & VLAN_VID_MASK)) {
+ aq_vlans[i].enable = false;
+ }
+ }
+
+ aq_vlans[location].location = location;
+ aq_vlans[location].vlan_id = be16_to_cpu(fsp->h_ext.vlan_tci)
+ & VLAN_VID_MASK;
+ aq_vlans[location].queue = fsp->ring_cookie & 0x1FU;
+ aq_vlans[location].enable = 1U;
+
+ return 0;
+}
+
+int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct aq_rx_filter *rule = NULL;
+ struct hlist_node *aq_node2;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id)
+ break;
+ }
+ if (rule && rule->type == aq_rx_filter_vlan &&
+ be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
+ struct ethtool_rxnfc cmd;
+
+ cmd.fs.location = rule->aq_fsp.location;
+ return aq_del_rxnfc_rule(aq_nic, &cmd);
+ }
+
+ return -ENOENT;
+}
+
+static int aq_add_del_fvlan(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
+ return -EOPNOTSUPP;
+
+ aq_set_data_fvlan(aq_nic,
+ aq_rx_fltr,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans,
+ add);
+
+ return aq_filters_vlans_update(aq_nic);
+}
+
+static int aq_set_data_fl3l4(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr,
+ struct aq_rx_filter_l3l4 *data, bool add)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
+
+ memset(data, 0, sizeof(*data));
+
+ data->is_ipv6 = rx_fltrs->fl3l4.is_ipv6;
+ data->location = HW_ATL_GET_REG_LOCATION_FL3L4(fsp->location);
+
+ if (!add) {
+ if (!data->is_ipv6)
+ rx_fltrs->fl3l4.active_ipv4 &= ~BIT(data->location);
+ else
+ rx_fltrs->fl3l4.active_ipv6 &=
+ ~BIT((data->location) / 4);
+
+ return 0;
+ }
+
+ data->cmd |= HW_ATL_RX_ENABLE_FLTR_L3L4;
+
+ switch (fsp->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ data->cmd |= HW_ATL_RX_UDP;
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ data->cmd |= HW_ATL_RX_SCTP;
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
+ break;
+ default:
+ break;
+ }
+
+ if (!data->is_ipv6) {
+ data->ip_src[0] =
+ ntohl(fsp->h_u.tcp_ip4_spec.ip4src);
+ data->ip_dst[0] =
+ ntohl(fsp->h_u.tcp_ip4_spec.ip4dst);
+ rx_fltrs->fl3l4.active_ipv4 |= BIT(data->location);
+ } else {
+ int i;
+
+ rx_fltrs->fl3l4.active_ipv6 |= BIT((data->location) / 4);
+ for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
+ data->ip_dst[i] =
+ ntohl(fsp->h_u.tcp_ip6_spec.ip6dst[i]);
+ data->ip_src[i] =
+ ntohl(fsp->h_u.tcp_ip6_spec.ip6src[i]);
+ }
+ data->cmd |= HW_ATL_RX_ENABLE_L3_IPV6;
+ }
+ if (fsp->flow_type != IP_USER_FLOW &&
+ fsp->flow_type != IPV6_USER_FLOW) {
+ if (!data->is_ipv6) {
+ data->p_dst =
+ ntohs(fsp->h_u.tcp_ip4_spec.pdst);
+ data->p_src =
+ ntohs(fsp->h_u.tcp_ip4_spec.psrc);
+ } else {
+ data->p_dst =
+ ntohs(fsp->h_u.tcp_ip6_spec.pdst);
+ data->p_src =
+ ntohs(fsp->h_u.tcp_ip6_spec.psrc);
+ }
+ }
+ if (data->ip_src[0] && !data->is_ipv6)
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3;
+ if (data->ip_dst[0] && !data->is_ipv6)
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3;
+ if (data->p_dst)
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4;
+ if (data->p_src)
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4;
+ if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
+ data->cmd |= HW_ATL_RX_HOST << HW_ATL_RX_ACTION_FL3F4_SHIFT;
+ data->cmd |= fsp->ring_cookie << HW_ATL_RX_QUEUE_FL3L4_SHIFT;
+ data->cmd |= HW_ATL_RX_ENABLE_QUEUE_L3L4;
+ } else {
+ data->cmd |= HW_ATL_RX_DISCARD << HW_ATL_RX_ACTION_FL3F4_SHIFT;
+ }
+
+ return 0;
+}
+
+static int aq_set_fl3l4(struct aq_hw_s *aq_hw,
+ const struct aq_hw_ops *aq_hw_ops,
+ struct aq_rx_filter_l3l4 *data)
+{
+ if (unlikely(!aq_hw_ops->hw_filter_l3l4_set))
+ return -EOPNOTSUPP;
+
+ return aq_hw_ops->hw_filter_l3l4_set(aq_hw, data);
+}
+
+static int aq_add_del_fl3l4(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+ struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+ struct aq_rx_filter_l3l4 data;
+
+ if (unlikely(aq_rx_fltr->aq_fsp.location < AQ_RX_FIRST_LOC_FL3L4 ||
+ aq_rx_fltr->aq_fsp.location > AQ_RX_LAST_LOC_FL3L4 ||
+ aq_set_data_fl3l4(aq_nic, aq_rx_fltr, &data, add)))
+ return -EINVAL;
+
+ return aq_set_fl3l4(aq_hw, aq_hw_ops, &data);
+}
+
+static int aq_add_del_rule(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+ int err = -EINVAL;
+
+ if (aq_rx_fltr->aq_fsp.flow_type & FLOW_EXT) {
+ if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
+ == VLAN_VID_MASK) {
+ aq_rx_fltr->type = aq_rx_filter_vlan;
+ err = aq_add_del_fvlan(aq_nic, aq_rx_fltr, add);
+ } else if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
+ == VLAN_PRIO_MASK) {
+ aq_rx_fltr->type = aq_rx_filter_ethertype;
+ err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
+ }
+ } else {
+ switch (aq_rx_fltr->aq_fsp.flow_type & ~FLOW_EXT) {
+ case ETHER_FLOW:
+ aq_rx_fltr->type = aq_rx_filter_ethertype;
+ err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case IP_USER_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV6_USER_FLOW:
+ aq_rx_fltr->type = aq_rx_filter_l3l4;
+ err = aq_add_del_fl3l4(aq_nic, aq_rx_fltr, add);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int aq_update_table_filters(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, u16 index,
+ struct ethtool_rxnfc *cmd)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct aq_rx_filter *rule = NULL, *parent = NULL;
+ struct hlist_node *aq_node2;
+ int err = -EINVAL;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (rule->aq_fsp.location >= index)
+ break;
+ parent = rule;
+ }
+
+ if (rule && rule->aq_fsp.location == index) {
+ err = aq_add_del_rule(aq_nic, rule, false);
+ hlist_del(&rule->aq_node);
+ kfree(rule);
+ --rx_fltrs->active_filters;
+ }
+
+ if (unlikely(!aq_rx_fltr))
+ return err;
+
+ INIT_HLIST_NODE(&aq_rx_fltr->aq_node);
+
+ if (parent)
+ hlist_add_behind(&aq_rx_fltr->aq_node, &parent->aq_node);
+ else
+ hlist_add_head(&aq_rx_fltr->aq_node, &rx_fltrs->filter_list);
+
+ ++rx_fltrs->active_filters;
+
+ return 0;
+}
+
+u16 aq_get_rxnfc_count_all_rules(struct aq_nic_s *aq_nic)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+
+ return rx_fltrs->active_filters;
+}
+
+struct aq_hw_rx_fltrs_s *aq_get_hw_rx_fltrs(struct aq_nic_s *aq_nic)
+{
+ return &aq_nic->aq_hw_rx_fltrs;
+}
+
+int aq_add_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct aq_rx_filter *aq_rx_fltr;
+ int err = 0;
+
+ err = aq_check_rule(aq_nic, fsp);
+ if (err)
+ goto err_exit;
+
+ aq_rx_fltr = kzalloc(sizeof(*aq_rx_fltr), GFP_KERNEL);
+ if (unlikely(!aq_rx_fltr)) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ memcpy(&aq_rx_fltr->aq_fsp, fsp, sizeof(*fsp));
+
+ err = aq_update_table_filters(aq_nic, aq_rx_fltr, fsp->location, NULL);
+ if (unlikely(err))
+ goto err_free;
+
+ err = aq_add_del_rule(aq_nic, aq_rx_fltr, true);
+ if (unlikely(err)) {
+ hlist_del(&aq_rx_fltr->aq_node);
+ --rx_fltrs->active_filters;
+ goto err_free;
+ }
+
+ return 0;
+
+err_free:
+ kfree(aq_rx_fltr);
+err_exit:
+ return err;
+}
+
+int aq_del_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct aq_rx_filter *rule = NULL;
+ struct hlist_node *aq_node2;
+ int err = -EINVAL;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (rule->aq_fsp.location == cmd->fs.location)
+ break;
+ }
+
+ if (rule && rule->aq_fsp.location == cmd->fs.location) {
+ err = aq_add_del_rule(aq_nic, rule, false);
+ hlist_del(&rule->aq_node);
+ kfree(rule);
+ --rx_fltrs->active_filters;
+ }
+ return err;
+}
+
+int aq_get_rxnfc_rule(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct aq_rx_filter *rule = NULL;
+ struct hlist_node *aq_node2;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node)
+ if (fsp->location <= rule->aq_fsp.location)
+ break;
+
+ if (unlikely(!rule || fsp->location != rule->aq_fsp.location))
+ return -EINVAL;
+
+ memcpy(fsp, &rule->aq_fsp, sizeof(*fsp));
+
+ return 0;
+}
+
+int aq_get_rxnfc_all_rules(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct hlist_node *aq_node2;
+ struct aq_rx_filter *rule;
+ int count = 0;
+
+ cmd->data = aq_get_rxnfc_count_all_rules(aq_nic);
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (unlikely(count == cmd->rule_cnt))
+ return -EMSGSIZE;
+
+ rule_locs[count++] = rule->aq_fsp.location;
+ }
+
+ cmd->rule_cnt = count;
+
+ return 0;
+}
+
+int aq_clear_rxnfc_all_rules(struct aq_nic_s *aq_nic)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct hlist_node *aq_node2;
+ struct aq_rx_filter *rule;
+ int err = 0;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ err = aq_add_del_rule(aq_nic, rule, false);
+ if (err)
+ goto err_exit;
+ hlist_del(&rule->aq_node);
+ kfree(rule);
+ --rx_fltrs->active_filters;
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_reapply_rxnfc_all_rules(struct aq_nic_s *aq_nic)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct hlist_node *aq_node2;
+ struct aq_rx_filter *rule;
+ int err = 0;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ err = aq_add_del_rule(aq_nic, rule, true);
+ if (err)
+ goto err_exit;
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
+{
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+ struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+ int hweight = 0;
+ int err = 0;
+
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
+ return -EOPNOTSUPP;
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
+ return -EOPNOTSUPP;
+
+ aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
+
+ if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ hweight = bitmap_weight(aq_nic->active_vlans, VLAN_N_VID);
+
+ err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
+ if (err)
+ return err;
+ }
+
+ err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
+ );
+ if (err)
+ return err;
+
+ if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ if (hweight <= AQ_VLAN_MAX_FILTERS && hweight > 0) {
+ err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw,
+ !(aq_nic->packet_filter & IFF_PROMISC));
+ aq_nic->aq_nic_cfg.is_vlan_force_promisc = false;
+ } else {
+ /* otherwise left in promiscue mode */
+ aq_nic->aq_nic_cfg.is_vlan_force_promisc = true;
+ }
+ }
+
+ return err;
+}
+
+int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic)
+{
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+ struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+ int err = 0;
+
+ bitmap_zero(aq_nic->active_vlans, VLAN_N_VID);
+ aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
+
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
+ return -EOPNOTSUPP;
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
+ return -EOPNOTSUPP;
+
+ aq_nic->aq_nic_cfg.is_vlan_force_promisc = true;
+ err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
+ if (err)
+ return err;
+ err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
+ );
+ return err;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.h b/drivers/net/ethernet/aquantia/atlantic/aq_filters.h
new file mode 100644
index 000000000..122e06c88
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2014-2017 aQuantia Corporation. */
+
+/* File aq_filters.h: RX filters related functions. */
+
+#ifndef AQ_FILTERS_H
+#define AQ_FILTERS_H
+
+#include "aq_nic.h"
+
+enum aq_rx_filter_type {
+ aq_rx_filter_ethertype,
+ aq_rx_filter_vlan,
+ aq_rx_filter_l3l4
+};
+
+struct aq_rx_filter {
+ struct hlist_node aq_node;
+ enum aq_rx_filter_type type;
+ struct ethtool_rx_flow_spec aq_fsp;
+};
+
+u16 aq_get_rxnfc_count_all_rules(struct aq_nic_s *aq_nic);
+struct aq_hw_rx_fltrs_s *aq_get_hw_rx_fltrs(struct aq_nic_s *aq_nic);
+int aq_add_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd);
+int aq_del_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd);
+int aq_get_rxnfc_rule(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd);
+int aq_get_rxnfc_all_rules(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs);
+int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id);
+int aq_clear_rxnfc_all_rules(struct aq_nic_s *aq_nic);
+int aq_reapply_rxnfc_all_rules(struct aq_nic_s *aq_nic);
+int aq_filters_vlans_update(struct aq_nic_s *aq_nic);
+int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic);
+
+#endif /* AQ_FILTERS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
new file mode 100644
index 000000000..dbd284660
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -0,0 +1,404 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+/* File aq_hw.h: Declaration of abstract interface for NIC hardware specific
+ * functions.
+ */
+
+#ifndef AQ_HW_H
+#define AQ_HW_H
+
+#include "aq_common.h"
+#include "aq_rss.h"
+#include "hw_atl/hw_atl_utils.h"
+
+#define AQ_HW_MAC_COUNTER_HZ 312500000ll
+#define AQ_HW_PHY_COUNTER_HZ 160000000ll
+
+enum aq_tc_mode {
+ AQ_TC_MODE_INVALID = -1,
+ AQ_TC_MODE_8TCS,
+ AQ_TC_MODE_4TCS,
+};
+
+#define AQ_RX_FIRST_LOC_FVLANID 0U
+#define AQ_RX_LAST_LOC_FVLANID 15U
+#define AQ_RX_FIRST_LOC_FETHERT 16U
+#define AQ_RX_LAST_LOC_FETHERT 31U
+#define AQ_RX_FIRST_LOC_FL3L4 32U
+#define AQ_RX_LAST_LOC_FL3L4 39U
+#define AQ_RX_MAX_RXNFC_LOC AQ_RX_LAST_LOC_FL3L4
+#define AQ_VLAN_MAX_FILTERS \
+ (AQ_RX_LAST_LOC_FVLANID - AQ_RX_FIRST_LOC_FVLANID + 1U)
+#define AQ_RX_QUEUE_NOT_ASSIGNED 0xFFU
+
+#define AQ_FRAC_PER_NS 0x100000000LL
+
+/* Used for rate to Mbps conversion */
+#define AQ_MBPS_DIVISOR 125000 /* 1000000 / 8 */
+
+/* NIC H/W capabilities */
+struct aq_hw_caps_s {
+ u64 hw_features;
+ u64 link_speed_msk;
+ unsigned int hw_priv_flags;
+ u32 media_type;
+ u32 rxds_max;
+ u32 txds_max;
+ u32 rxds_min;
+ u32 txds_min;
+ u32 txhwb_alignment;
+ u32 irq_mask;
+ u32 vecs;
+ u32 mtu;
+ u32 mac_regs_count;
+ u32 hw_alive_check_addr;
+ u8 msix_irqs;
+ u8 tcs_max;
+ u8 rxd_alignment;
+ u8 rxd_size;
+ u8 txd_alignment;
+ u8 txd_size;
+ u8 tx_rings;
+ u8 rx_rings;
+ bool flow_control;
+ bool is_64_dma;
+ bool op64bit;
+ u32 quirks;
+ u32 priv_data_len;
+};
+
+struct aq_hw_link_status_s {
+ unsigned int mbps;
+ bool full_duplex;
+ u32 lp_link_speed_msk;
+ u32 lp_flow_control;
+};
+
+struct aq_stats_s {
+ u64 brc;
+ u64 btc;
+ u64 uprc;
+ u64 mprc;
+ u64 bprc;
+ u64 erpt;
+ u64 uptc;
+ u64 mptc;
+ u64 bptc;
+ u64 erpr;
+ u64 mbtc;
+ u64 bbtc;
+ u64 mbrc;
+ u64 bbrc;
+ u64 ubrc;
+ u64 ubtc;
+ u64 dpc;
+ u64 dma_pkt_rc;
+ u64 dma_pkt_tc;
+ u64 dma_oct_rc;
+ u64 dma_oct_tc;
+};
+
+#define AQ_HW_IRQ_INVALID 0U
+#define AQ_HW_IRQ_LEGACY 1U
+#define AQ_HW_IRQ_MSI 2U
+#define AQ_HW_IRQ_MSIX 3U
+
+#define AQ_HW_SERVICE_IRQS 1U
+
+#define AQ_HW_POWER_STATE_D0 0U
+#define AQ_HW_POWER_STATE_D3 3U
+
+#define AQ_HW_FLAG_STARTED 0x00000004U
+#define AQ_HW_FLAG_STOPPING 0x00000008U
+#define AQ_HW_FLAG_RESETTING 0x00000010U
+#define AQ_HW_FLAG_CLOSING 0x00000020U
+#define AQ_HW_PTP_AVAILABLE 0x01000000U
+#define AQ_HW_LINK_DOWN 0x04000000U
+#define AQ_HW_FLAG_ERR_UNPLUG 0x40000000U
+#define AQ_HW_FLAG_ERR_HW 0x80000000U
+
+#define AQ_HW_FLAG_ERRORS (AQ_HW_FLAG_ERR_HW | AQ_HW_FLAG_ERR_UNPLUG)
+
+#define AQ_NIC_FLAGS_IS_NOT_READY (AQ_NIC_FLAG_STOPPING | \
+ AQ_NIC_FLAG_RESETTING | AQ_NIC_FLAG_CLOSING | \
+ AQ_NIC_FLAG_ERR_UNPLUG | AQ_NIC_FLAG_ERR_HW)
+
+#define AQ_NIC_FLAGS_IS_NOT_TX_READY (AQ_NIC_FLAGS_IS_NOT_READY | \
+ AQ_NIC_LINK_DOWN)
+
+#define AQ_HW_MEDIA_TYPE_TP 1U
+#define AQ_HW_MEDIA_TYPE_FIBRE 2U
+
+#define AQ_HW_TXD_MULTIPLE 8U
+#define AQ_HW_RXD_MULTIPLE 8U
+
+#define AQ_HW_QUEUES_MAX 32U
+#define AQ_HW_MULTICAST_ADDRESS_MAX 32U
+
+#define AQ_HW_PTP_TC 2U
+
+#define AQ_HW_LED_BLINK 0x2U
+#define AQ_HW_LED_DEFAULT 0x0U
+
+#define AQ_HW_MEDIA_DETECT_CNT 6000
+
+enum aq_priv_flags {
+ AQ_HW_LOOPBACK_DMA_SYS,
+ AQ_HW_LOOPBACK_PKT_SYS,
+ AQ_HW_LOOPBACK_DMA_NET,
+ AQ_HW_LOOPBACK_PHYINT_SYS,
+ AQ_HW_LOOPBACK_PHYEXT_SYS,
+};
+
+#define AQ_HW_LOOPBACK_MASK (BIT(AQ_HW_LOOPBACK_DMA_SYS) |\
+ BIT(AQ_HW_LOOPBACK_PKT_SYS) |\
+ BIT(AQ_HW_LOOPBACK_DMA_NET) |\
+ BIT(AQ_HW_LOOPBACK_PHYINT_SYS) |\
+ BIT(AQ_HW_LOOPBACK_PHYEXT_SYS))
+
+#define ATL_HW_CHIP_MIPS 0x00000001U
+#define ATL_HW_CHIP_TPO2 0x00000002U
+#define ATL_HW_CHIP_RPF2 0x00000004U
+#define ATL_HW_CHIP_MPI_AQ 0x00000010U
+#define ATL_HW_CHIP_ATLANTIC 0x00800000U
+#define ATL_HW_CHIP_REVISION_A0 0x01000000U
+#define ATL_HW_CHIP_REVISION_B0 0x02000000U
+#define ATL_HW_CHIP_REVISION_B1 0x04000000U
+#define ATL_HW_CHIP_ANTIGUA 0x08000000U
+
+#define ATL_HW_IS_CHIP_FEATURE(_HW_, _F_) (!!(ATL_HW_CHIP_##_F_ & \
+ (_HW_)->chip_features))
+
+struct aq_hw_s {
+ atomic_t flags;
+ u8 rbl_enabled:1;
+ struct aq_nic_cfg_s *aq_nic_cfg;
+ const struct aq_fw_ops *aq_fw_ops;
+ void __iomem *mmio;
+ struct aq_hw_link_status_s aq_link_status;
+ struct hw_atl_utils_mbox mbox;
+ struct hw_atl_stats_s last_stats;
+ struct aq_stats_s curr_stats;
+ u64 speed;
+ u32 itr_tx;
+ u32 itr_rx;
+ unsigned int chip_features;
+ u32 fw_ver_actual;
+ atomic_t dpc;
+ u32 mbox_addr;
+ u32 rpc_addr;
+ u32 settings_addr;
+ u32 rpc_tid;
+ struct hw_atl_utils_fw_rpc rpc;
+ s64 ptp_clk_offset;
+ u16 phy_id;
+ void *priv;
+};
+
+struct aq_ring_s;
+struct aq_ring_param_s;
+struct sk_buff;
+struct aq_rx_filter_l3l4;
+
+struct aq_hw_ops {
+
+ int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+ unsigned int frags);
+
+ int (*hw_ring_rx_receive)(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring);
+
+ int (*hw_ring_rx_fill)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+ unsigned int sw_tail_old);
+
+ int (*hw_ring_tx_head_update)(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring);
+
+ int (*hw_set_mac_address)(struct aq_hw_s *self, const u8 *mac_addr);
+
+ int (*hw_soft_reset)(struct aq_hw_s *self);
+
+ int (*hw_prepare)(struct aq_hw_s *self,
+ const struct aq_fw_ops **fw_ops);
+
+ int (*hw_reset)(struct aq_hw_s *self);
+
+ int (*hw_init)(struct aq_hw_s *self, const u8 *mac_addr);
+
+ int (*hw_start)(struct aq_hw_s *self);
+
+ int (*hw_stop)(struct aq_hw_s *self);
+
+ int (*hw_ring_tx_init)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param);
+
+ int (*hw_ring_tx_start)(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring);
+
+ int (*hw_ring_tx_stop)(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring);
+
+ int (*hw_ring_rx_init)(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param);
+
+ int (*hw_ring_rx_start)(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring);
+
+ int (*hw_ring_rx_stop)(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring);
+
+ int (*hw_irq_enable)(struct aq_hw_s *self, u64 mask);
+
+ int (*hw_irq_disable)(struct aq_hw_s *self, u64 mask);
+
+ int (*hw_irq_read)(struct aq_hw_s *self, u64 *mask);
+
+ int (*hw_packet_filter_set)(struct aq_hw_s *self,
+ unsigned int packet_filter);
+
+ int (*hw_filter_l3l4_set)(struct aq_hw_s *self,
+ struct aq_rx_filter_l3l4 *data);
+
+ int (*hw_filter_l3l4_clear)(struct aq_hw_s *self,
+ struct aq_rx_filter_l3l4 *data);
+
+ int (*hw_filter_l2_set)(struct aq_hw_s *self,
+ struct aq_rx_filter_l2 *data);
+
+ int (*hw_filter_l2_clear)(struct aq_hw_s *self,
+ struct aq_rx_filter_l2 *data);
+
+ int (*hw_filter_vlan_set)(struct aq_hw_s *self,
+ struct aq_rx_filter_vlan *aq_vlans);
+
+ int (*hw_filter_vlan_ctrl)(struct aq_hw_s *self, bool enable);
+
+ int (*hw_multicast_list_set)(struct aq_hw_s *self,
+ u8 ar_mac[AQ_HW_MULTICAST_ADDRESS_MAX]
+ [ETH_ALEN],
+ u32 count);
+
+ int (*hw_interrupt_moderation_set)(struct aq_hw_s *self);
+
+ int (*hw_rss_set)(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params);
+
+ int (*hw_rss_hash_set)(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params);
+
+ int (*hw_tc_rate_limit_set)(struct aq_hw_s *self);
+
+ int (*hw_get_regs)(struct aq_hw_s *self,
+ const struct aq_hw_caps_s *aq_hw_caps,
+ u32 *regs_buff);
+
+ struct aq_stats_s *(*hw_get_hw_stats)(struct aq_hw_s *self);
+
+ u32 (*hw_get_fw_version)(struct aq_hw_s *self);
+
+ int (*hw_set_offload)(struct aq_hw_s *self,
+ struct aq_nic_cfg_s *aq_nic_cfg);
+
+ int (*hw_ring_hwts_rx_fill)(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring);
+
+ int (*hw_ring_hwts_rx_receive)(struct aq_hw_s *self,
+ struct aq_ring_s *ring);
+
+ void (*hw_get_ptp_ts)(struct aq_hw_s *self, u64 *stamp);
+
+ int (*hw_adj_clock_freq)(struct aq_hw_s *self, s32 delta);
+
+ int (*hw_adj_sys_clock)(struct aq_hw_s *self, s64 delta);
+
+ int (*hw_set_sys_clock)(struct aq_hw_s *self, u64 time, u64 ts);
+
+ int (*hw_ts_to_sys_clock)(struct aq_hw_s *self, u64 ts, u64 *time);
+
+ int (*hw_gpio_pulse)(struct aq_hw_s *self, u32 index, u64 start,
+ u32 period);
+
+ int (*hw_extts_gpio_enable)(struct aq_hw_s *self, u32 index,
+ u32 enable);
+
+ int (*hw_get_sync_ts)(struct aq_hw_s *self, u64 *ts);
+
+ u16 (*rx_extract_ts)(struct aq_hw_s *self, u8 *p, unsigned int len,
+ u64 *timestamp);
+
+ int (*extract_hwts)(struct aq_hw_s *self, u8 *p, unsigned int len,
+ u64 *timestamp);
+
+ int (*hw_set_fc)(struct aq_hw_s *self, u32 fc, u32 tc);
+
+ int (*hw_set_loopback)(struct aq_hw_s *self, u32 mode, bool enable);
+
+ int (*hw_get_mac_temp)(struct aq_hw_s *self, u32 *temp);
+};
+
+struct aq_fw_ops {
+ int (*init)(struct aq_hw_s *self);
+
+ int (*deinit)(struct aq_hw_s *self);
+
+ int (*reset)(struct aq_hw_s *self);
+
+ int (*renegotiate)(struct aq_hw_s *self);
+
+ int (*get_mac_permanent)(struct aq_hw_s *self, u8 *mac);
+
+ int (*set_link_speed)(struct aq_hw_s *self, u32 speed);
+
+ int (*set_state)(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state);
+
+ int (*update_link_status)(struct aq_hw_s *self);
+
+ int (*update_stats)(struct aq_hw_s *self);
+
+ int (*get_mac_temp)(struct aq_hw_s *self, int *temp);
+
+ int (*get_phy_temp)(struct aq_hw_s *self, int *temp);
+
+ u32 (*get_flow_control)(struct aq_hw_s *self, u32 *fcmode);
+
+ int (*set_flow_control)(struct aq_hw_s *self);
+
+ int (*led_control)(struct aq_hw_s *self, u32 mode);
+
+ int (*set_phyloopback)(struct aq_hw_s *self, u32 mode, bool enable);
+
+ int (*set_power)(struct aq_hw_s *self, unsigned int power_state,
+ const u8 *mac);
+
+ int (*send_fw_request)(struct aq_hw_s *self,
+ const struct hw_fw_request_iface *fw_req,
+ size_t size);
+
+ void (*enable_ptp)(struct aq_hw_s *self, int enable);
+
+ void (*adjust_ptp)(struct aq_hw_s *self, uint64_t adj);
+
+ int (*set_eee_rate)(struct aq_hw_s *self, u32 speed);
+
+ int (*get_eee_rate)(struct aq_hw_s *self, u32 *rate,
+ u32 *supported_rates);
+
+ int (*set_downshift)(struct aq_hw_s *self, u32 counter);
+
+ int (*set_media_detect)(struct aq_hw_s *self, bool enable);
+
+ u32 (*get_link_capabilities)(struct aq_hw_s *self);
+
+ int (*send_macsec_req)(struct aq_hw_s *self,
+ struct macsec_msg_fw_request *msg,
+ struct macsec_msg_fw_response *resp);
+};
+
+#endif /* AQ_HW_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
new file mode 100644
index 000000000..1921741f7
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+/* File aq_hw_utils.c: Definitions of helper functions used across
+ * hardware layer.
+ */
+
+#include "aq_hw_utils.h"
+
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+#include "aq_hw.h"
+#include "aq_nic.h"
+
+void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
+ u32 shift, u32 val)
+{
+ if (msk ^ ~0) {
+ u32 reg_old, reg_new;
+
+ reg_old = aq_hw_read_reg(aq_hw, addr);
+ reg_new = (reg_old & (~msk)) | (val << shift);
+
+ if (reg_old != reg_new)
+ aq_hw_write_reg(aq_hw, addr, reg_new);
+ } else {
+ aq_hw_write_reg(aq_hw, addr, val);
+ }
+}
+
+u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift)
+{
+ return ((aq_hw_read_reg(aq_hw, addr) & msk) >> shift);
+}
+
+u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg)
+{
+ u32 value = readl(hw->mmio + reg);
+
+ if (value == U32_MAX &&
+ readl(hw->mmio + hw->aq_nic_cfg->aq_hw_caps->hw_alive_check_addr) == U32_MAX)
+ aq_utils_obj_set(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG);
+
+ return value;
+}
+
+void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value)
+{
+ writel(value, hw->mmio + reg);
+}
+
+/* Most of 64-bit registers are in LSW, MSW form.
+ Counters are normally implemented by HW as latched pairs:
+ reading LSW first locks MSW, to overcome LSW overflow
+ */
+u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg)
+{
+ u64 value = U64_MAX;
+
+ if (hw->aq_nic_cfg->aq_hw_caps->op64bit)
+ value = readq(hw->mmio + reg);
+ else
+ value = lo_hi_readq(hw->mmio + reg);
+
+ if (value == U64_MAX &&
+ readl(hw->mmio + hw->aq_nic_cfg->aq_hw_caps->hw_alive_check_addr) == U32_MAX)
+ aq_utils_obj_set(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG);
+
+ return value;
+}
+
+void aq_hw_write_reg64(struct aq_hw_s *hw, u32 reg, u64 value)
+{
+ if (hw->aq_nic_cfg->aq_hw_caps->op64bit)
+ writeq(value, hw->mmio + reg);
+ else
+ lo_hi_writeq(value, hw->mmio + reg);
+}
+
+int aq_hw_err_from_flags(struct aq_hw_s *hw)
+{
+ int err = 0;
+
+ if (aq_utils_obj_test(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+ err = -ENXIO;
+ goto err_exit;
+ }
+ if (aq_utils_obj_test(&hw->flags, AQ_HW_FLAG_ERR_HW)) {
+ err = -EIO;
+ goto err_exit;
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_hw_num_tcs(struct aq_hw_s *hw)
+{
+ switch (hw->aq_nic_cfg->tc_mode) {
+ case AQ_TC_MODE_8TCS:
+ return 8;
+ case AQ_TC_MODE_4TCS:
+ return 4;
+ default:
+ break;
+ }
+
+ return 1;
+}
+
+int aq_hw_q_per_tc(struct aq_hw_s *hw)
+{
+ switch (hw->aq_nic_cfg->tc_mode) {
+ case AQ_TC_MODE_8TCS:
+ return 4;
+ case AQ_TC_MODE_4TCS:
+ return 8;
+ default:
+ return 4;
+ }
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
new file mode 100644
index 000000000..ffa6e4067
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+/* File aq_hw_utils.h: Declaration of helper functions used across hardware
+ * layer.
+ */
+
+#ifndef AQ_HW_UTILS_H
+#define AQ_HW_UTILS_H
+
+#include <linux/iopoll.h>
+
+#include "aq_common.h"
+
+#ifndef HIDWORD
+#define LODWORD(_qw) ((u32)(_qw))
+#define HIDWORD(_qw) ((u32)(((_qw) >> 32) & 0xffffffff))
+#endif
+
+#define AQ_HW_SLEEP(_US_) mdelay(_US_)
+
+#define aq_pr_err(...) pr_err(AQ_CFG_DRV_NAME ": " __VA_ARGS__)
+#define aq_pr_trace(...) pr_info(AQ_CFG_DRV_NAME ": " __VA_ARGS__)
+
+struct aq_hw_s;
+
+void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
+ u32 shift, u32 val);
+u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift);
+u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg);
+void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value);
+u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg);
+void aq_hw_write_reg64(struct aq_hw_s *hw, u32 reg, u64 value);
+int aq_hw_err_from_flags(struct aq_hw_s *hw);
+int aq_hw_num_tcs(struct aq_hw_s *hw);
+int aq_hw_q_per_tc(struct aq_hw_s *hw);
+
+#endif /* AQ_HW_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
new file mode 100644
index 000000000..7eb5851eb
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
@@ -0,0 +1,1770 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include "aq_macsec.h"
+#include "aq_nic.h"
+#include <linux/rtnetlink.h>
+
+#include "macsec/macsec_api.h"
+#define AQ_MACSEC_KEY_LEN_128_BIT 16
+#define AQ_MACSEC_KEY_LEN_192_BIT 24
+#define AQ_MACSEC_KEY_LEN_256_BIT 32
+
+enum aq_clear_type {
+ /* update HW configuration */
+ AQ_CLEAR_HW = BIT(0),
+ /* update SW configuration (busy bits, pointers) */
+ AQ_CLEAR_SW = BIT(1),
+ /* update both HW and SW configuration */
+ AQ_CLEAR_ALL = AQ_CLEAR_HW | AQ_CLEAR_SW,
+};
+
+static int aq_clear_txsc(struct aq_nic_s *nic, const int txsc_idx,
+ enum aq_clear_type clear_type);
+static int aq_clear_txsa(struct aq_nic_s *nic, struct aq_macsec_txsc *aq_txsc,
+ const int sa_num, enum aq_clear_type clear_type);
+static int aq_clear_rxsc(struct aq_nic_s *nic, const int rxsc_idx,
+ enum aq_clear_type clear_type);
+static int aq_clear_rxsa(struct aq_nic_s *nic, struct aq_macsec_rxsc *aq_rxsc,
+ const int sa_num, enum aq_clear_type clear_type);
+static int aq_clear_secy(struct aq_nic_s *nic, const struct macsec_secy *secy,
+ enum aq_clear_type clear_type);
+static int aq_apply_macsec_cfg(struct aq_nic_s *nic);
+static int aq_apply_secy_cfg(struct aq_nic_s *nic,
+ const struct macsec_secy *secy);
+
+static void aq_ether_addr_to_mac(u32 mac[2], const unsigned char *emac)
+{
+ u32 tmp[2] = { 0 };
+
+ memcpy(((u8 *)tmp) + 2, emac, ETH_ALEN);
+
+ mac[0] = swab32(tmp[1]);
+ mac[1] = swab32(tmp[0]);
+}
+
+/* There's a 1:1 mapping between SecY and TX SC */
+static int aq_get_txsc_idx_from_secy(struct aq_macsec_cfg *macsec_cfg,
+ const struct macsec_secy *secy)
+{
+ int i;
+
+ if (unlikely(!secy))
+ return -1;
+
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (macsec_cfg->aq_txsc[i].sw_secy == secy)
+ return i;
+ }
+ return -1;
+}
+
+static int aq_get_rxsc_idx_from_rxsc(struct aq_macsec_cfg *macsec_cfg,
+ const struct macsec_rx_sc *rxsc)
+{
+ int i;
+
+ if (unlikely(!rxsc))
+ return -1;
+
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (macsec_cfg->aq_rxsc[i].sw_rxsc == rxsc)
+ return i;
+ }
+
+ return -1;
+}
+
+static int aq_get_txsc_idx_from_sc_idx(const enum aq_macsec_sc_sa sc_sa,
+ const int sc_idx)
+{
+ switch (sc_sa) {
+ case aq_macsec_sa_sc_4sa_8sc:
+ return sc_idx >> 2;
+ case aq_macsec_sa_sc_2sa_16sc:
+ return sc_idx >> 1;
+ case aq_macsec_sa_sc_1sa_32sc:
+ return sc_idx;
+ default:
+ WARN_ONCE(true, "Invalid sc_sa");
+ }
+ return -1;
+}
+
+/* Rotate keys u32[8] */
+static void aq_rotate_keys(u32 (*key)[8], const int key_len)
+{
+ u32 tmp[8] = { 0 };
+
+ memcpy(&tmp, key, sizeof(tmp));
+ memset(*key, 0, sizeof(*key));
+
+ if (key_len == AQ_MACSEC_KEY_LEN_128_BIT) {
+ (*key)[0] = swab32(tmp[3]);
+ (*key)[1] = swab32(tmp[2]);
+ (*key)[2] = swab32(tmp[1]);
+ (*key)[3] = swab32(tmp[0]);
+ } else if (key_len == AQ_MACSEC_KEY_LEN_192_BIT) {
+ (*key)[0] = swab32(tmp[5]);
+ (*key)[1] = swab32(tmp[4]);
+ (*key)[2] = swab32(tmp[3]);
+ (*key)[3] = swab32(tmp[2]);
+ (*key)[4] = swab32(tmp[1]);
+ (*key)[5] = swab32(tmp[0]);
+ } else if (key_len == AQ_MACSEC_KEY_LEN_256_BIT) {
+ (*key)[0] = swab32(tmp[7]);
+ (*key)[1] = swab32(tmp[6]);
+ (*key)[2] = swab32(tmp[5]);
+ (*key)[3] = swab32(tmp[4]);
+ (*key)[4] = swab32(tmp[3]);
+ (*key)[5] = swab32(tmp[2]);
+ (*key)[6] = swab32(tmp[1]);
+ (*key)[7] = swab32(tmp[0]);
+ } else {
+ pr_warn("Rotate_keys: invalid key_len\n");
+ }
+}
+
+#define STATS_2x32_TO_64(stat_field) \
+ (((u64)stat_field[1] << 32) | stat_field[0])
+
+static int aq_get_macsec_common_stats(struct aq_hw_s *hw,
+ struct aq_macsec_common_stats *stats)
+{
+ struct aq_mss_ingress_common_counters ingress_counters;
+ struct aq_mss_egress_common_counters egress_counters;
+ int ret;
+
+ /* MACSEC counters */
+ ret = aq_mss_get_ingress_common_counters(hw, &ingress_counters);
+ if (unlikely(ret))
+ return ret;
+
+ stats->in.ctl_pkts = STATS_2x32_TO_64(ingress_counters.ctl_pkts);
+ stats->in.tagged_miss_pkts =
+ STATS_2x32_TO_64(ingress_counters.tagged_miss_pkts);
+ stats->in.untagged_miss_pkts =
+ STATS_2x32_TO_64(ingress_counters.untagged_miss_pkts);
+ stats->in.notag_pkts = STATS_2x32_TO_64(ingress_counters.notag_pkts);
+ stats->in.untagged_pkts =
+ STATS_2x32_TO_64(ingress_counters.untagged_pkts);
+ stats->in.bad_tag_pkts =
+ STATS_2x32_TO_64(ingress_counters.bad_tag_pkts);
+ stats->in.no_sci_pkts = STATS_2x32_TO_64(ingress_counters.no_sci_pkts);
+ stats->in.unknown_sci_pkts =
+ STATS_2x32_TO_64(ingress_counters.unknown_sci_pkts);
+ stats->in.ctrl_prt_pass_pkts =
+ STATS_2x32_TO_64(ingress_counters.ctrl_prt_pass_pkts);
+ stats->in.unctrl_prt_pass_pkts =
+ STATS_2x32_TO_64(ingress_counters.unctrl_prt_pass_pkts);
+ stats->in.ctrl_prt_fail_pkts =
+ STATS_2x32_TO_64(ingress_counters.ctrl_prt_fail_pkts);
+ stats->in.unctrl_prt_fail_pkts =
+ STATS_2x32_TO_64(ingress_counters.unctrl_prt_fail_pkts);
+ stats->in.too_long_pkts =
+ STATS_2x32_TO_64(ingress_counters.too_long_pkts);
+ stats->in.igpoc_ctl_pkts =
+ STATS_2x32_TO_64(ingress_counters.igpoc_ctl_pkts);
+ stats->in.ecc_error_pkts =
+ STATS_2x32_TO_64(ingress_counters.ecc_error_pkts);
+ stats->in.unctrl_hit_drop_redir =
+ STATS_2x32_TO_64(ingress_counters.unctrl_hit_drop_redir);
+
+ ret = aq_mss_get_egress_common_counters(hw, &egress_counters);
+ if (unlikely(ret))
+ return ret;
+ stats->out.ctl_pkts = STATS_2x32_TO_64(egress_counters.ctl_pkt);
+ stats->out.unknown_sa_pkts =
+ STATS_2x32_TO_64(egress_counters.unknown_sa_pkts);
+ stats->out.untagged_pkts =
+ STATS_2x32_TO_64(egress_counters.untagged_pkts);
+ stats->out.too_long = STATS_2x32_TO_64(egress_counters.too_long);
+ stats->out.ecc_error_pkts =
+ STATS_2x32_TO_64(egress_counters.ecc_error_pkts);
+ stats->out.unctrl_hit_drop_redir =
+ STATS_2x32_TO_64(egress_counters.unctrl_hit_drop_redir);
+
+ return 0;
+}
+
+static int aq_get_rxsa_stats(struct aq_hw_s *hw, const int sa_idx,
+ struct aq_macsec_rx_sa_stats *stats)
+{
+ struct aq_mss_ingress_sa_counters i_sa_counters;
+ int ret;
+
+ ret = aq_mss_get_ingress_sa_counters(hw, &i_sa_counters, sa_idx);
+ if (unlikely(ret))
+ return ret;
+
+ stats->untagged_hit_pkts =
+ STATS_2x32_TO_64(i_sa_counters.untagged_hit_pkts);
+ stats->ctrl_hit_drop_redir_pkts =
+ STATS_2x32_TO_64(i_sa_counters.ctrl_hit_drop_redir_pkts);
+ stats->not_using_sa = STATS_2x32_TO_64(i_sa_counters.not_using_sa);
+ stats->unused_sa = STATS_2x32_TO_64(i_sa_counters.unused_sa);
+ stats->not_valid_pkts = STATS_2x32_TO_64(i_sa_counters.not_valid_pkts);
+ stats->invalid_pkts = STATS_2x32_TO_64(i_sa_counters.invalid_pkts);
+ stats->ok_pkts = STATS_2x32_TO_64(i_sa_counters.ok_pkts);
+ stats->late_pkts = STATS_2x32_TO_64(i_sa_counters.late_pkts);
+ stats->delayed_pkts = STATS_2x32_TO_64(i_sa_counters.delayed_pkts);
+ stats->unchecked_pkts = STATS_2x32_TO_64(i_sa_counters.unchecked_pkts);
+ stats->validated_octets =
+ STATS_2x32_TO_64(i_sa_counters.validated_octets);
+ stats->decrypted_octets =
+ STATS_2x32_TO_64(i_sa_counters.decrypted_octets);
+
+ return 0;
+}
+
+static int aq_get_txsa_stats(struct aq_hw_s *hw, const int sa_idx,
+ struct aq_macsec_tx_sa_stats *stats)
+{
+ struct aq_mss_egress_sa_counters e_sa_counters;
+ int ret;
+
+ ret = aq_mss_get_egress_sa_counters(hw, &e_sa_counters, sa_idx);
+ if (unlikely(ret))
+ return ret;
+
+ stats->sa_hit_drop_redirect =
+ STATS_2x32_TO_64(e_sa_counters.sa_hit_drop_redirect);
+ stats->sa_protected2_pkts =
+ STATS_2x32_TO_64(e_sa_counters.sa_protected2_pkts);
+ stats->sa_protected_pkts =
+ STATS_2x32_TO_64(e_sa_counters.sa_protected_pkts);
+ stats->sa_encrypted_pkts =
+ STATS_2x32_TO_64(e_sa_counters.sa_encrypted_pkts);
+
+ return 0;
+}
+
+static int aq_get_txsa_next_pn(struct aq_hw_s *hw, const int sa_idx, u32 *pn)
+{
+ struct aq_mss_egress_sa_record sa_rec;
+ int ret;
+
+ ret = aq_mss_get_egress_sa_record(hw, &sa_rec, sa_idx);
+ if (likely(!ret))
+ *pn = sa_rec.next_pn;
+
+ return ret;
+}
+
+static int aq_get_rxsa_next_pn(struct aq_hw_s *hw, const int sa_idx, u32 *pn)
+{
+ struct aq_mss_ingress_sa_record sa_rec;
+ int ret;
+
+ ret = aq_mss_get_ingress_sa_record(hw, &sa_rec, sa_idx);
+ if (likely(!ret))
+ *pn = (!sa_rec.sat_nextpn) ? sa_rec.next_pn : 0;
+
+ return ret;
+}
+
+static int aq_get_txsc_stats(struct aq_hw_s *hw, const int sc_idx,
+ struct aq_macsec_tx_sc_stats *stats)
+{
+ struct aq_mss_egress_sc_counters e_sc_counters;
+ int ret;
+
+ ret = aq_mss_get_egress_sc_counters(hw, &e_sc_counters, sc_idx);
+ if (unlikely(ret))
+ return ret;
+
+ stats->sc_protected_pkts =
+ STATS_2x32_TO_64(e_sc_counters.sc_protected_pkts);
+ stats->sc_encrypted_pkts =
+ STATS_2x32_TO_64(e_sc_counters.sc_encrypted_pkts);
+ stats->sc_protected_octets =
+ STATS_2x32_TO_64(e_sc_counters.sc_protected_octets);
+ stats->sc_encrypted_octets =
+ STATS_2x32_TO_64(e_sc_counters.sc_encrypted_octets);
+
+ return 0;
+}
+
+static int aq_mdo_dev_open(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ int ret = 0;
+
+ if (netif_carrier_ok(nic->ndev))
+ ret = aq_apply_secy_cfg(nic, ctx->secy);
+
+ return ret;
+}
+
+static int aq_mdo_dev_stop(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ int i;
+
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (nic->macsec_cfg->txsc_idx_busy & BIT(i))
+ aq_clear_secy(nic, nic->macsec_cfg->aq_txsc[i].sw_secy,
+ AQ_CLEAR_HW);
+ }
+
+ return 0;
+}
+
+static int aq_set_txsc(struct aq_nic_s *nic, const int txsc_idx)
+{
+ struct aq_macsec_txsc *aq_txsc = &nic->macsec_cfg->aq_txsc[txsc_idx];
+ struct aq_mss_egress_class_record tx_class_rec = { 0 };
+ const struct macsec_secy *secy = aq_txsc->sw_secy;
+ struct aq_mss_egress_sc_record sc_rec = { 0 };
+ unsigned int sc_idx = aq_txsc->hw_sc_idx;
+ struct aq_hw_s *hw = nic->aq_hw;
+ int ret = 0;
+
+ aq_ether_addr_to_mac(tx_class_rec.mac_sa, secy->netdev->dev_addr);
+
+ put_unaligned_be64((__force u64)secy->sci, tx_class_rec.sci);
+ tx_class_rec.sci_mask = 0;
+
+ tx_class_rec.sa_mask = 0x3f;
+
+ tx_class_rec.action = 0; /* forward to SA/SC table */
+ tx_class_rec.valid = 1;
+
+ tx_class_rec.sc_idx = sc_idx;
+
+ tx_class_rec.sc_sa = nic->macsec_cfg->sc_sa;
+
+ ret = aq_mss_set_egress_class_record(hw, &tx_class_rec, txsc_idx);
+ if (ret)
+ return ret;
+
+ sc_rec.protect = secy->protect_frames;
+ if (secy->tx_sc.encrypt)
+ sc_rec.tci |= BIT(1);
+ if (secy->tx_sc.scb)
+ sc_rec.tci |= BIT(2);
+ if (secy->tx_sc.send_sci)
+ sc_rec.tci |= BIT(3);
+ if (secy->tx_sc.end_station)
+ sc_rec.tci |= BIT(4);
+ /* The C bit is clear if and only if the Secure Data is
+ * exactly the same as the User Data and the ICV is 16 octets long.
+ */
+ if (!(secy->icv_len == 16 && !secy->tx_sc.encrypt))
+ sc_rec.tci |= BIT(0);
+
+ sc_rec.an_roll = 0;
+
+ switch (secy->key_len) {
+ case AQ_MACSEC_KEY_LEN_128_BIT:
+ sc_rec.sak_len = 0;
+ break;
+ case AQ_MACSEC_KEY_LEN_192_BIT:
+ sc_rec.sak_len = 1;
+ break;
+ case AQ_MACSEC_KEY_LEN_256_BIT:
+ sc_rec.sak_len = 2;
+ break;
+ default:
+ WARN_ONCE(true, "Invalid sc_sa");
+ return -EINVAL;
+ }
+
+ sc_rec.curr_an = secy->tx_sc.encoding_sa;
+ sc_rec.valid = 1;
+ sc_rec.fresh = 1;
+
+ return aq_mss_set_egress_sc_record(hw, &sc_rec, sc_idx);
+}
+
+static u32 aq_sc_idx_max(const enum aq_macsec_sc_sa sc_sa)
+{
+ u32 result = 0;
+
+ switch (sc_sa) {
+ case aq_macsec_sa_sc_4sa_8sc:
+ result = 8;
+ break;
+ case aq_macsec_sa_sc_2sa_16sc:
+ result = 16;
+ break;
+ case aq_macsec_sa_sc_1sa_32sc:
+ result = 32;
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static u32 aq_to_hw_sc_idx(const u32 sc_idx, const enum aq_macsec_sc_sa sc_sa)
+{
+ switch (sc_sa) {
+ case aq_macsec_sa_sc_4sa_8sc:
+ return sc_idx << 2;
+ case aq_macsec_sa_sc_2sa_16sc:
+ return sc_idx << 1;
+ case aq_macsec_sa_sc_1sa_32sc:
+ return sc_idx;
+ default:
+ WARN_ONCE(true, "Invalid sc_sa");
+ }
+
+ return sc_idx;
+}
+
+static enum aq_macsec_sc_sa sc_sa_from_num_an(const int num_an)
+{
+ enum aq_macsec_sc_sa sc_sa = aq_macsec_sa_sc_not_used;
+
+ switch (num_an) {
+ case 4:
+ sc_sa = aq_macsec_sa_sc_4sa_8sc;
+ break;
+ case 2:
+ sc_sa = aq_macsec_sa_sc_2sa_16sc;
+ break;
+ case 1:
+ sc_sa = aq_macsec_sa_sc_1sa_32sc;
+ break;
+ default:
+ break;
+ }
+
+ return sc_sa;
+}
+
+static int aq_mdo_add_secy(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ const struct macsec_secy *secy = ctx->secy;
+ enum aq_macsec_sc_sa sc_sa;
+ u32 txsc_idx;
+ int ret = 0;
+
+ if (secy->xpn)
+ return -EOPNOTSUPP;
+
+ sc_sa = sc_sa_from_num_an(MACSEC_NUM_AN);
+ if (sc_sa == aq_macsec_sa_sc_not_used)
+ return -EINVAL;
+
+ if (hweight32(cfg->txsc_idx_busy) >= aq_sc_idx_max(sc_sa))
+ return -ENOSPC;
+
+ txsc_idx = ffz(cfg->txsc_idx_busy);
+ if (txsc_idx == AQ_MACSEC_MAX_SC)
+ return -ENOSPC;
+
+ cfg->sc_sa = sc_sa;
+ cfg->aq_txsc[txsc_idx].hw_sc_idx = aq_to_hw_sc_idx(txsc_idx, sc_sa);
+ cfg->aq_txsc[txsc_idx].sw_secy = secy;
+
+ if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
+ ret = aq_set_txsc(nic, txsc_idx);
+
+ set_bit(txsc_idx, &cfg->txsc_idx_busy);
+
+ return ret;
+}
+
+static int aq_mdo_upd_secy(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ const struct macsec_secy *secy = ctx->secy;
+ int txsc_idx;
+ int ret = 0;
+
+ txsc_idx = aq_get_txsc_idx_from_secy(nic->macsec_cfg, secy);
+ if (txsc_idx < 0)
+ return -ENOENT;
+
+ if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
+ ret = aq_set_txsc(nic, txsc_idx);
+
+ return ret;
+}
+
+static int aq_clear_txsc(struct aq_nic_s *nic, const int txsc_idx,
+ enum aq_clear_type clear_type)
+{
+ struct aq_macsec_txsc *tx_sc = &nic->macsec_cfg->aq_txsc[txsc_idx];
+ struct aq_mss_egress_class_record tx_class_rec = { 0 };
+ struct aq_mss_egress_sc_record sc_rec = { 0 };
+ struct aq_hw_s *hw = nic->aq_hw;
+ int ret = 0;
+ int sa_num;
+
+ for_each_set_bit (sa_num, &tx_sc->tx_sa_idx_busy, AQ_MACSEC_MAX_SA) {
+ ret = aq_clear_txsa(nic, tx_sc, sa_num, clear_type);
+ if (ret)
+ return ret;
+ }
+
+ if (clear_type & AQ_CLEAR_HW) {
+ ret = aq_mss_set_egress_class_record(hw, &tx_class_rec,
+ txsc_idx);
+ if (ret)
+ return ret;
+
+ sc_rec.fresh = 1;
+ ret = aq_mss_set_egress_sc_record(hw, &sc_rec,
+ tx_sc->hw_sc_idx);
+ if (ret)
+ return ret;
+ }
+
+ if (clear_type & AQ_CLEAR_SW) {
+ clear_bit(txsc_idx, &nic->macsec_cfg->txsc_idx_busy);
+ nic->macsec_cfg->aq_txsc[txsc_idx].sw_secy = NULL;
+ }
+
+ return ret;
+}
+
+static int aq_mdo_del_secy(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ int ret = 0;
+
+ if (!nic->macsec_cfg)
+ return 0;
+
+ ret = aq_clear_secy(nic, ctx->secy, AQ_CLEAR_ALL);
+
+ return ret;
+}
+
+static int aq_update_txsa(struct aq_nic_s *nic, const unsigned int sc_idx,
+ const struct macsec_secy *secy,
+ const struct macsec_tx_sa *tx_sa,
+ const unsigned char *key, const unsigned char an)
+{
+ const u32 next_pn = tx_sa->next_pn_halves.lower;
+ struct aq_mss_egress_sakey_record key_rec;
+ const unsigned int sa_idx = sc_idx | an;
+ struct aq_mss_egress_sa_record sa_rec;
+ struct aq_hw_s *hw = nic->aq_hw;
+ int ret = 0;
+
+ memset(&sa_rec, 0, sizeof(sa_rec));
+ sa_rec.valid = tx_sa->active;
+ sa_rec.fresh = 1;
+ sa_rec.next_pn = next_pn;
+
+ ret = aq_mss_set_egress_sa_record(hw, &sa_rec, sa_idx);
+ if (ret)
+ return ret;
+
+ if (!key)
+ return ret;
+
+ memset(&key_rec, 0, sizeof(key_rec));
+ memcpy(&key_rec.key, key, secy->key_len);
+
+ aq_rotate_keys(&key_rec.key, secy->key_len);
+
+ ret = aq_mss_set_egress_sakey_record(hw, &key_rec, sa_idx);
+
+ memzero_explicit(&key_rec, sizeof(key_rec));
+ return ret;
+}
+
+static int aq_mdo_add_txsa(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ const struct macsec_secy *secy = ctx->secy;
+ struct aq_macsec_txsc *aq_txsc;
+ int txsc_idx;
+ int ret = 0;
+
+ txsc_idx = aq_get_txsc_idx_from_secy(cfg, secy);
+ if (txsc_idx < 0)
+ return -EINVAL;
+
+ aq_txsc = &cfg->aq_txsc[txsc_idx];
+ set_bit(ctx->sa.assoc_num, &aq_txsc->tx_sa_idx_busy);
+
+ memcpy(aq_txsc->tx_sa_key[ctx->sa.assoc_num], ctx->sa.key,
+ secy->key_len);
+
+ if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
+ ret = aq_update_txsa(nic, aq_txsc->hw_sc_idx, secy,
+ ctx->sa.tx_sa, ctx->sa.key,
+ ctx->sa.assoc_num);
+
+ return ret;
+}
+
+static int aq_mdo_upd_txsa(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ const struct macsec_secy *secy = ctx->secy;
+ struct aq_macsec_txsc *aq_txsc;
+ int txsc_idx;
+ int ret = 0;
+
+ txsc_idx = aq_get_txsc_idx_from_secy(cfg, secy);
+ if (txsc_idx < 0)
+ return -EINVAL;
+
+ aq_txsc = &cfg->aq_txsc[txsc_idx];
+ if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
+ ret = aq_update_txsa(nic, aq_txsc->hw_sc_idx, secy,
+ ctx->sa.tx_sa, NULL, ctx->sa.assoc_num);
+
+ return ret;
+}
+
+static int aq_clear_txsa(struct aq_nic_s *nic, struct aq_macsec_txsc *aq_txsc,
+ const int sa_num, enum aq_clear_type clear_type)
+{
+ const int sa_idx = aq_txsc->hw_sc_idx | sa_num;
+ struct aq_hw_s *hw = nic->aq_hw;
+ int ret = 0;
+
+ if (clear_type & AQ_CLEAR_SW)
+ clear_bit(sa_num, &aq_txsc->tx_sa_idx_busy);
+
+ if ((clear_type & AQ_CLEAR_HW) && netif_carrier_ok(nic->ndev)) {
+ struct aq_mss_egress_sakey_record key_rec;
+ struct aq_mss_egress_sa_record sa_rec;
+
+ memset(&sa_rec, 0, sizeof(sa_rec));
+ sa_rec.fresh = 1;
+
+ ret = aq_mss_set_egress_sa_record(hw, &sa_rec, sa_idx);
+ if (ret)
+ return ret;
+
+ memset(&key_rec, 0, sizeof(key_rec));
+ return aq_mss_set_egress_sakey_record(hw, &key_rec, sa_idx);
+ }
+
+ return 0;
+}
+
+static int aq_mdo_del_txsa(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ int txsc_idx;
+ int ret = 0;
+
+ txsc_idx = aq_get_txsc_idx_from_secy(cfg, ctx->secy);
+ if (txsc_idx < 0)
+ return -EINVAL;
+
+ ret = aq_clear_txsa(nic, &cfg->aq_txsc[txsc_idx], ctx->sa.assoc_num,
+ AQ_CLEAR_ALL);
+
+ return ret;
+}
+
+static int aq_rxsc_validate_frames(const enum macsec_validation_type validate)
+{
+ switch (validate) {
+ case MACSEC_VALIDATE_DISABLED:
+ return 2;
+ case MACSEC_VALIDATE_CHECK:
+ return 1;
+ case MACSEC_VALIDATE_STRICT:
+ return 0;
+ default:
+ WARN_ONCE(true, "Invalid validation type");
+ }
+
+ return 0;
+}
+
+static int aq_set_rxsc(struct aq_nic_s *nic, const u32 rxsc_idx)
+{
+ const struct aq_macsec_rxsc *aq_rxsc =
+ &nic->macsec_cfg->aq_rxsc[rxsc_idx];
+ struct aq_mss_ingress_preclass_record pre_class_record;
+ const struct macsec_rx_sc *rx_sc = aq_rxsc->sw_rxsc;
+ const struct macsec_secy *secy = aq_rxsc->sw_secy;
+ const u32 hw_sc_idx = aq_rxsc->hw_sc_idx;
+ struct aq_mss_ingress_sc_record sc_record;
+ struct aq_hw_s *hw = nic->aq_hw;
+ int ret = 0;
+
+ memset(&pre_class_record, 0, sizeof(pre_class_record));
+ put_unaligned_be64((__force u64)rx_sc->sci, pre_class_record.sci);
+ pre_class_record.sci_mask = 0xff;
+ /* match all MACSEC ethertype packets */
+ pre_class_record.eth_type = ETH_P_MACSEC;
+ pre_class_record.eth_type_mask = 0x3;
+
+ aq_ether_addr_to_mac(pre_class_record.mac_sa, (char *)&rx_sc->sci);
+ pre_class_record.sa_mask = 0x3f;
+
+ pre_class_record.an_mask = nic->macsec_cfg->sc_sa;
+ pre_class_record.sc_idx = hw_sc_idx;
+ /* strip SecTAG & forward for decryption */
+ pre_class_record.action = 0x0;
+ pre_class_record.valid = 1;
+
+ ret = aq_mss_set_ingress_preclass_record(hw, &pre_class_record,
+ 2 * rxsc_idx + 1);
+ if (ret)
+ return ret;
+
+ /* If SCI is absent, then match by SA alone */
+ pre_class_record.sci_mask = 0;
+ pre_class_record.sci_from_table = 1;
+
+ ret = aq_mss_set_ingress_preclass_record(hw, &pre_class_record,
+ 2 * rxsc_idx);
+ if (ret)
+ return ret;
+
+ memset(&sc_record, 0, sizeof(sc_record));
+ sc_record.validate_frames =
+ aq_rxsc_validate_frames(secy->validate_frames);
+ if (secy->replay_protect) {
+ sc_record.replay_protect = 1;
+ sc_record.anti_replay_window = secy->replay_window;
+ }
+ sc_record.valid = 1;
+ sc_record.fresh = 1;
+
+ ret = aq_mss_set_ingress_sc_record(hw, &sc_record, hw_sc_idx);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int aq_mdo_add_rxsc(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ const u32 rxsc_idx_max = aq_sc_idx_max(cfg->sc_sa);
+ u32 rxsc_idx;
+ int ret = 0;
+
+ if (hweight32(cfg->rxsc_idx_busy) >= rxsc_idx_max)
+ return -ENOSPC;
+
+ rxsc_idx = ffz(cfg->rxsc_idx_busy);
+ if (rxsc_idx >= rxsc_idx_max)
+ return -ENOSPC;
+
+ cfg->aq_rxsc[rxsc_idx].hw_sc_idx = aq_to_hw_sc_idx(rxsc_idx,
+ cfg->sc_sa);
+ cfg->aq_rxsc[rxsc_idx].sw_secy = ctx->secy;
+ cfg->aq_rxsc[rxsc_idx].sw_rxsc = ctx->rx_sc;
+
+ if (netif_carrier_ok(nic->ndev) && netif_running(ctx->secy->netdev))
+ ret = aq_set_rxsc(nic, rxsc_idx);
+
+ if (ret < 0)
+ return ret;
+
+ set_bit(rxsc_idx, &cfg->rxsc_idx_busy);
+
+ return 0;
+}
+
+static int aq_mdo_upd_rxsc(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ int rxsc_idx;
+ int ret = 0;
+
+ rxsc_idx = aq_get_rxsc_idx_from_rxsc(nic->macsec_cfg, ctx->rx_sc);
+ if (rxsc_idx < 0)
+ return -ENOENT;
+
+ if (netif_carrier_ok(nic->ndev) && netif_running(ctx->secy->netdev))
+ ret = aq_set_rxsc(nic, rxsc_idx);
+
+ return ret;
+}
+
+static int aq_clear_rxsc(struct aq_nic_s *nic, const int rxsc_idx,
+ enum aq_clear_type clear_type)
+{
+ struct aq_macsec_rxsc *rx_sc = &nic->macsec_cfg->aq_rxsc[rxsc_idx];
+ struct aq_hw_s *hw = nic->aq_hw;
+ int ret = 0;
+ int sa_num;
+
+ for_each_set_bit (sa_num, &rx_sc->rx_sa_idx_busy, AQ_MACSEC_MAX_SA) {
+ ret = aq_clear_rxsa(nic, rx_sc, sa_num, clear_type);
+ if (ret)
+ return ret;
+ }
+
+ if (clear_type & AQ_CLEAR_HW) {
+ struct aq_mss_ingress_preclass_record pre_class_record;
+ struct aq_mss_ingress_sc_record sc_record;
+
+ memset(&pre_class_record, 0, sizeof(pre_class_record));
+ memset(&sc_record, 0, sizeof(sc_record));
+
+ ret = aq_mss_set_ingress_preclass_record(hw, &pre_class_record,
+ 2 * rxsc_idx);
+ if (ret)
+ return ret;
+
+ ret = aq_mss_set_ingress_preclass_record(hw, &pre_class_record,
+ 2 * rxsc_idx + 1);
+ if (ret)
+ return ret;
+
+ sc_record.fresh = 1;
+ ret = aq_mss_set_ingress_sc_record(hw, &sc_record,
+ rx_sc->hw_sc_idx);
+ if (ret)
+ return ret;
+ }
+
+ if (clear_type & AQ_CLEAR_SW) {
+ clear_bit(rxsc_idx, &nic->macsec_cfg->rxsc_idx_busy);
+ rx_sc->sw_secy = NULL;
+ rx_sc->sw_rxsc = NULL;
+ }
+
+ return ret;
+}
+
+static int aq_mdo_del_rxsc(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ enum aq_clear_type clear_type = AQ_CLEAR_SW;
+ int rxsc_idx;
+ int ret = 0;
+
+ rxsc_idx = aq_get_rxsc_idx_from_rxsc(nic->macsec_cfg, ctx->rx_sc);
+ if (rxsc_idx < 0)
+ return -ENOENT;
+
+ if (netif_carrier_ok(nic->ndev))
+ clear_type = AQ_CLEAR_ALL;
+
+ ret = aq_clear_rxsc(nic, rxsc_idx, clear_type);
+
+ return ret;
+}
+
+static int aq_update_rxsa(struct aq_nic_s *nic, const unsigned int sc_idx,
+ const struct macsec_secy *secy,
+ const struct macsec_rx_sa *rx_sa,
+ const unsigned char *key, const unsigned char an)
+{
+ struct aq_mss_ingress_sakey_record sa_key_record;
+ const u32 next_pn = rx_sa->next_pn_halves.lower;
+ struct aq_mss_ingress_sa_record sa_record;
+ struct aq_hw_s *hw = nic->aq_hw;
+ const int sa_idx = sc_idx | an;
+ int ret = 0;
+
+ memset(&sa_record, 0, sizeof(sa_record));
+ sa_record.valid = rx_sa->active;
+ sa_record.fresh = 1;
+ sa_record.next_pn = next_pn;
+
+ ret = aq_mss_set_ingress_sa_record(hw, &sa_record, sa_idx);
+ if (ret)
+ return ret;
+
+ if (!key)
+ return ret;
+
+ memset(&sa_key_record, 0, sizeof(sa_key_record));
+ memcpy(&sa_key_record.key, key, secy->key_len);
+
+ switch (secy->key_len) {
+ case AQ_MACSEC_KEY_LEN_128_BIT:
+ sa_key_record.key_len = 0;
+ break;
+ case AQ_MACSEC_KEY_LEN_192_BIT:
+ sa_key_record.key_len = 1;
+ break;
+ case AQ_MACSEC_KEY_LEN_256_BIT:
+ sa_key_record.key_len = 2;
+ break;
+ default:
+ return -1;
+ }
+
+ aq_rotate_keys(&sa_key_record.key, secy->key_len);
+
+ ret = aq_mss_set_ingress_sakey_record(hw, &sa_key_record, sa_idx);
+
+ memzero_explicit(&sa_key_record, sizeof(sa_key_record));
+ return ret;
+}
+
+static int aq_mdo_add_rxsa(struct macsec_context *ctx)
+{
+ const struct macsec_rx_sc *rx_sc = ctx->sa.rx_sa->sc;
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ const struct macsec_secy *secy = ctx->secy;
+ struct aq_macsec_rxsc *aq_rxsc;
+ int rxsc_idx;
+ int ret = 0;
+
+ rxsc_idx = aq_get_rxsc_idx_from_rxsc(nic->macsec_cfg, rx_sc);
+ if (rxsc_idx < 0)
+ return -EINVAL;
+
+ aq_rxsc = &nic->macsec_cfg->aq_rxsc[rxsc_idx];
+ set_bit(ctx->sa.assoc_num, &aq_rxsc->rx_sa_idx_busy);
+
+ memcpy(aq_rxsc->rx_sa_key[ctx->sa.assoc_num], ctx->sa.key,
+ secy->key_len);
+
+ if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
+ ret = aq_update_rxsa(nic, aq_rxsc->hw_sc_idx, secy,
+ ctx->sa.rx_sa, ctx->sa.key,
+ ctx->sa.assoc_num);
+
+ return ret;
+}
+
+static int aq_mdo_upd_rxsa(struct macsec_context *ctx)
+{
+ const struct macsec_rx_sc *rx_sc = ctx->sa.rx_sa->sc;
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ const struct macsec_secy *secy = ctx->secy;
+ int rxsc_idx;
+ int ret = 0;
+
+ rxsc_idx = aq_get_rxsc_idx_from_rxsc(cfg, rx_sc);
+ if (rxsc_idx < 0)
+ return -EINVAL;
+
+ if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
+ ret = aq_update_rxsa(nic, cfg->aq_rxsc[rxsc_idx].hw_sc_idx,
+ secy, ctx->sa.rx_sa, NULL,
+ ctx->sa.assoc_num);
+
+ return ret;
+}
+
+static int aq_clear_rxsa(struct aq_nic_s *nic, struct aq_macsec_rxsc *aq_rxsc,
+ const int sa_num, enum aq_clear_type clear_type)
+{
+ int sa_idx = aq_rxsc->hw_sc_idx | sa_num;
+ struct aq_hw_s *hw = nic->aq_hw;
+ int ret = 0;
+
+ if (clear_type & AQ_CLEAR_SW)
+ clear_bit(sa_num, &aq_rxsc->rx_sa_idx_busy);
+
+ if ((clear_type & AQ_CLEAR_HW) && netif_carrier_ok(nic->ndev)) {
+ struct aq_mss_ingress_sakey_record sa_key_record;
+ struct aq_mss_ingress_sa_record sa_record;
+
+ memset(&sa_key_record, 0, sizeof(sa_key_record));
+ memset(&sa_record, 0, sizeof(sa_record));
+ sa_record.fresh = 1;
+ ret = aq_mss_set_ingress_sa_record(hw, &sa_record, sa_idx);
+ if (ret)
+ return ret;
+
+ return aq_mss_set_ingress_sakey_record(hw, &sa_key_record,
+ sa_idx);
+ }
+
+ return ret;
+}
+
+static int aq_mdo_del_rxsa(struct macsec_context *ctx)
+{
+ const struct macsec_rx_sc *rx_sc = ctx->sa.rx_sa->sc;
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ int rxsc_idx;
+ int ret = 0;
+
+ rxsc_idx = aq_get_rxsc_idx_from_rxsc(cfg, rx_sc);
+ if (rxsc_idx < 0)
+ return -EINVAL;
+
+ ret = aq_clear_rxsa(nic, &cfg->aq_rxsc[rxsc_idx], ctx->sa.assoc_num,
+ AQ_CLEAR_ALL);
+
+ return ret;
+}
+
+static int aq_mdo_get_dev_stats(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_common_stats *stats = &nic->macsec_cfg->stats;
+ struct aq_hw_s *hw = nic->aq_hw;
+
+ aq_get_macsec_common_stats(hw, stats);
+
+ ctx->stats.dev_stats->OutPktsUntagged = stats->out.untagged_pkts;
+ ctx->stats.dev_stats->InPktsUntagged = stats->in.untagged_pkts;
+ ctx->stats.dev_stats->OutPktsTooLong = stats->out.too_long;
+ ctx->stats.dev_stats->InPktsNoTag = stats->in.notag_pkts;
+ ctx->stats.dev_stats->InPktsBadTag = stats->in.bad_tag_pkts;
+ ctx->stats.dev_stats->InPktsUnknownSCI = stats->in.unknown_sci_pkts;
+ ctx->stats.dev_stats->InPktsNoSCI = stats->in.no_sci_pkts;
+ ctx->stats.dev_stats->InPktsOverrun = 0;
+
+ return 0;
+}
+
+static int aq_mdo_get_tx_sc_stats(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_tx_sc_stats *stats;
+ struct aq_hw_s *hw = nic->aq_hw;
+ struct aq_macsec_txsc *aq_txsc;
+ int txsc_idx;
+
+ txsc_idx = aq_get_txsc_idx_from_secy(nic->macsec_cfg, ctx->secy);
+ if (txsc_idx < 0)
+ return -ENOENT;
+
+ aq_txsc = &nic->macsec_cfg->aq_txsc[txsc_idx];
+ stats = &aq_txsc->stats;
+ aq_get_txsc_stats(hw, aq_txsc->hw_sc_idx, stats);
+
+ ctx->stats.tx_sc_stats->OutPktsProtected = stats->sc_protected_pkts;
+ ctx->stats.tx_sc_stats->OutPktsEncrypted = stats->sc_encrypted_pkts;
+ ctx->stats.tx_sc_stats->OutOctetsProtected = stats->sc_protected_octets;
+ ctx->stats.tx_sc_stats->OutOctetsEncrypted = stats->sc_encrypted_octets;
+
+ return 0;
+}
+
+static int aq_mdo_get_tx_sa_stats(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ struct aq_macsec_tx_sa_stats *stats;
+ struct aq_hw_s *hw = nic->aq_hw;
+ const struct macsec_secy *secy;
+ struct aq_macsec_txsc *aq_txsc;
+ struct macsec_tx_sa *tx_sa;
+ unsigned int sa_idx;
+ int txsc_idx;
+ u32 next_pn;
+ int ret;
+
+ txsc_idx = aq_get_txsc_idx_from_secy(cfg, ctx->secy);
+ if (txsc_idx < 0)
+ return -EINVAL;
+
+ aq_txsc = &cfg->aq_txsc[txsc_idx];
+ sa_idx = aq_txsc->hw_sc_idx | ctx->sa.assoc_num;
+ stats = &aq_txsc->tx_sa_stats[ctx->sa.assoc_num];
+ ret = aq_get_txsa_stats(hw, sa_idx, stats);
+ if (ret)
+ return ret;
+
+ ctx->stats.tx_sa_stats->OutPktsProtected = stats->sa_protected_pkts;
+ ctx->stats.tx_sa_stats->OutPktsEncrypted = stats->sa_encrypted_pkts;
+
+ secy = aq_txsc->sw_secy;
+ tx_sa = rcu_dereference_bh(secy->tx_sc.sa[ctx->sa.assoc_num]);
+ ret = aq_get_txsa_next_pn(hw, sa_idx, &next_pn);
+ if (ret == 0) {
+ spin_lock_bh(&tx_sa->lock);
+ tx_sa->next_pn = next_pn;
+ spin_unlock_bh(&tx_sa->lock);
+ }
+
+ return ret;
+}
+
+static int aq_mdo_get_rx_sc_stats(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ struct aq_macsec_rx_sa_stats *stats;
+ struct aq_hw_s *hw = nic->aq_hw;
+ struct aq_macsec_rxsc *aq_rxsc;
+ unsigned int sa_idx;
+ int rxsc_idx;
+ int ret = 0;
+ int i;
+
+ rxsc_idx = aq_get_rxsc_idx_from_rxsc(cfg, ctx->rx_sc);
+ if (rxsc_idx < 0)
+ return -ENOENT;
+
+ aq_rxsc = &cfg->aq_rxsc[rxsc_idx];
+ for (i = 0; i < MACSEC_NUM_AN; i++) {
+ if (!test_bit(i, &aq_rxsc->rx_sa_idx_busy))
+ continue;
+
+ stats = &aq_rxsc->rx_sa_stats[i];
+ sa_idx = aq_rxsc->hw_sc_idx | i;
+ ret = aq_get_rxsa_stats(hw, sa_idx, stats);
+ if (ret)
+ break;
+
+ ctx->stats.rx_sc_stats->InOctetsValidated +=
+ stats->validated_octets;
+ ctx->stats.rx_sc_stats->InOctetsDecrypted +=
+ stats->decrypted_octets;
+ ctx->stats.rx_sc_stats->InPktsUnchecked +=
+ stats->unchecked_pkts;
+ ctx->stats.rx_sc_stats->InPktsDelayed += stats->delayed_pkts;
+ ctx->stats.rx_sc_stats->InPktsOK += stats->ok_pkts;
+ ctx->stats.rx_sc_stats->InPktsInvalid += stats->invalid_pkts;
+ ctx->stats.rx_sc_stats->InPktsLate += stats->late_pkts;
+ ctx->stats.rx_sc_stats->InPktsNotValid += stats->not_valid_pkts;
+ ctx->stats.rx_sc_stats->InPktsNotUsingSA += stats->not_using_sa;
+ ctx->stats.rx_sc_stats->InPktsUnusedSA += stats->unused_sa;
+ }
+
+ return ret;
+}
+
+static int aq_mdo_get_rx_sa_stats(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ struct aq_macsec_rx_sa_stats *stats;
+ struct aq_hw_s *hw = nic->aq_hw;
+ struct aq_macsec_rxsc *aq_rxsc;
+ struct macsec_rx_sa *rx_sa;
+ unsigned int sa_idx;
+ int rxsc_idx;
+ u32 next_pn;
+ int ret;
+
+ rxsc_idx = aq_get_rxsc_idx_from_rxsc(cfg, ctx->rx_sc);
+ if (rxsc_idx < 0)
+ return -EINVAL;
+
+ aq_rxsc = &cfg->aq_rxsc[rxsc_idx];
+ stats = &aq_rxsc->rx_sa_stats[ctx->sa.assoc_num];
+ sa_idx = aq_rxsc->hw_sc_idx | ctx->sa.assoc_num;
+ ret = aq_get_rxsa_stats(hw, sa_idx, stats);
+ if (ret)
+ return ret;
+
+ ctx->stats.rx_sa_stats->InPktsOK = stats->ok_pkts;
+ ctx->stats.rx_sa_stats->InPktsInvalid = stats->invalid_pkts;
+ ctx->stats.rx_sa_stats->InPktsNotValid = stats->not_valid_pkts;
+ ctx->stats.rx_sa_stats->InPktsNotUsingSA = stats->not_using_sa;
+ ctx->stats.rx_sa_stats->InPktsUnusedSA = stats->unused_sa;
+
+ rx_sa = rcu_dereference_bh(aq_rxsc->sw_rxsc->sa[ctx->sa.assoc_num]);
+ ret = aq_get_rxsa_next_pn(hw, sa_idx, &next_pn);
+ if (ret == 0) {
+ spin_lock_bh(&rx_sa->lock);
+ rx_sa->next_pn = next_pn;
+ spin_unlock_bh(&rx_sa->lock);
+ }
+
+ return ret;
+}
+
+static int apply_txsc_cfg(struct aq_nic_s *nic, const int txsc_idx)
+{
+ struct aq_macsec_txsc *aq_txsc = &nic->macsec_cfg->aq_txsc[txsc_idx];
+ const struct macsec_secy *secy = aq_txsc->sw_secy;
+ struct macsec_tx_sa *tx_sa;
+ int ret = 0;
+ int i;
+
+ if (!netif_running(secy->netdev))
+ return ret;
+
+ ret = aq_set_txsc(nic, txsc_idx);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < MACSEC_NUM_AN; i++) {
+ tx_sa = rcu_dereference_bh(secy->tx_sc.sa[i]);
+ if (tx_sa) {
+ ret = aq_update_txsa(nic, aq_txsc->hw_sc_idx, secy,
+ tx_sa, aq_txsc->tx_sa_key[i], i);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int apply_rxsc_cfg(struct aq_nic_s *nic, const int rxsc_idx)
+{
+ struct aq_macsec_rxsc *aq_rxsc = &nic->macsec_cfg->aq_rxsc[rxsc_idx];
+ const struct macsec_secy *secy = aq_rxsc->sw_secy;
+ struct macsec_rx_sa *rx_sa;
+ int ret = 0;
+ int i;
+
+ if (!netif_running(secy->netdev))
+ return ret;
+
+ ret = aq_set_rxsc(nic, rxsc_idx);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < MACSEC_NUM_AN; i++) {
+ rx_sa = rcu_dereference_bh(aq_rxsc->sw_rxsc->sa[i]);
+ if (rx_sa) {
+ ret = aq_update_rxsa(nic, aq_rxsc->hw_sc_idx, secy,
+ rx_sa, aq_rxsc->rx_sa_key[i], i);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int aq_clear_secy(struct aq_nic_s *nic, const struct macsec_secy *secy,
+ enum aq_clear_type clear_type)
+{
+ struct macsec_rx_sc *rx_sc;
+ int txsc_idx;
+ int rxsc_idx;
+ int ret = 0;
+
+ txsc_idx = aq_get_txsc_idx_from_secy(nic->macsec_cfg, secy);
+ if (txsc_idx >= 0) {
+ ret = aq_clear_txsc(nic, txsc_idx, clear_type);
+ if (ret)
+ return ret;
+ }
+
+ for (rx_sc = rcu_dereference_bh(secy->rx_sc); rx_sc;
+ rx_sc = rcu_dereference_bh(rx_sc->next)) {
+ rxsc_idx = aq_get_rxsc_idx_from_rxsc(nic->macsec_cfg, rx_sc);
+ if (rxsc_idx < 0)
+ continue;
+
+ ret = aq_clear_rxsc(nic, rxsc_idx, clear_type);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int aq_apply_secy_cfg(struct aq_nic_s *nic,
+ const struct macsec_secy *secy)
+{
+ struct macsec_rx_sc *rx_sc;
+ int txsc_idx;
+ int rxsc_idx;
+ int ret = 0;
+
+ txsc_idx = aq_get_txsc_idx_from_secy(nic->macsec_cfg, secy);
+ if (txsc_idx >= 0)
+ apply_txsc_cfg(nic, txsc_idx);
+
+ for (rx_sc = rcu_dereference_bh(secy->rx_sc); rx_sc && rx_sc->active;
+ rx_sc = rcu_dereference_bh(rx_sc->next)) {
+ rxsc_idx = aq_get_rxsc_idx_from_rxsc(nic->macsec_cfg, rx_sc);
+ if (unlikely(rxsc_idx < 0))
+ continue;
+
+ ret = apply_rxsc_cfg(nic, rxsc_idx);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int aq_apply_macsec_cfg(struct aq_nic_s *nic)
+{
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (nic->macsec_cfg->txsc_idx_busy & BIT(i)) {
+ ret = apply_txsc_cfg(nic, i);
+ if (ret)
+ return ret;
+ }
+ }
+
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (nic->macsec_cfg->rxsc_idx_busy & BIT(i)) {
+ ret = apply_rxsc_cfg(nic, i);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int aq_sa_from_sa_idx(const enum aq_macsec_sc_sa sc_sa, const int sa_idx)
+{
+ switch (sc_sa) {
+ case aq_macsec_sa_sc_4sa_8sc:
+ return sa_idx & 3;
+ case aq_macsec_sa_sc_2sa_16sc:
+ return sa_idx & 1;
+ case aq_macsec_sa_sc_1sa_32sc:
+ return 0;
+ default:
+ WARN_ONCE(true, "Invalid sc_sa");
+ }
+ return -EINVAL;
+}
+
+static int aq_sc_idx_from_sa_idx(const enum aq_macsec_sc_sa sc_sa,
+ const int sa_idx)
+{
+ switch (sc_sa) {
+ case aq_macsec_sa_sc_4sa_8sc:
+ return sa_idx & ~3;
+ case aq_macsec_sa_sc_2sa_16sc:
+ return sa_idx & ~1;
+ case aq_macsec_sa_sc_1sa_32sc:
+ return sa_idx;
+ default:
+ WARN_ONCE(true, "Invalid sc_sa");
+ }
+ return -EINVAL;
+}
+
+static void aq_check_txsa_expiration(struct aq_nic_s *nic)
+{
+ u32 egress_sa_expired, egress_sa_threshold_expired;
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ struct aq_hw_s *hw = nic->aq_hw;
+ struct aq_macsec_txsc *aq_txsc;
+ const struct macsec_secy *secy;
+ int sc_idx = 0, txsc_idx = 0;
+ enum aq_macsec_sc_sa sc_sa;
+ struct macsec_tx_sa *tx_sa;
+ unsigned char an = 0;
+ int ret;
+ int i;
+
+ sc_sa = cfg->sc_sa;
+
+ ret = aq_mss_get_egress_sa_expired(hw, &egress_sa_expired);
+ if (unlikely(ret))
+ return;
+
+ ret = aq_mss_get_egress_sa_threshold_expired(hw,
+ &egress_sa_threshold_expired);
+
+ for (i = 0; i < AQ_MACSEC_MAX_SA; i++) {
+ if (egress_sa_expired & BIT(i)) {
+ an = aq_sa_from_sa_idx(sc_sa, i);
+ sc_idx = aq_sc_idx_from_sa_idx(sc_sa, i);
+ txsc_idx = aq_get_txsc_idx_from_sc_idx(sc_sa, sc_idx);
+ if (txsc_idx < 0)
+ continue;
+
+ aq_txsc = &cfg->aq_txsc[txsc_idx];
+ if (!(cfg->txsc_idx_busy & BIT(txsc_idx))) {
+ netdev_warn(nic->ndev,
+ "PN threshold expired on invalid TX SC");
+ continue;
+ }
+
+ secy = aq_txsc->sw_secy;
+ if (!netif_running(secy->netdev)) {
+ netdev_warn(nic->ndev,
+ "PN threshold expired on down TX SC");
+ continue;
+ }
+
+ if (unlikely(!(aq_txsc->tx_sa_idx_busy & BIT(an)))) {
+ netdev_warn(nic->ndev,
+ "PN threshold expired on invalid TX SA");
+ continue;
+ }
+
+ tx_sa = rcu_dereference_bh(secy->tx_sc.sa[an]);
+ macsec_pn_wrapped((struct macsec_secy *)secy, tx_sa);
+ }
+ }
+
+ aq_mss_set_egress_sa_expired(hw, egress_sa_expired);
+ if (likely(!ret))
+ aq_mss_set_egress_sa_threshold_expired(hw,
+ egress_sa_threshold_expired);
+}
+
+#define AQ_LOCKED_MDO_DEF(mdo) \
+static int aq_locked_mdo_##mdo(struct macsec_context *ctx) \
+{ \
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev); \
+ int ret; \
+ mutex_lock(&nic->macsec_mutex); \
+ ret = aq_mdo_##mdo(ctx); \
+ mutex_unlock(&nic->macsec_mutex); \
+ return ret; \
+}
+
+AQ_LOCKED_MDO_DEF(dev_open)
+AQ_LOCKED_MDO_DEF(dev_stop)
+AQ_LOCKED_MDO_DEF(add_secy)
+AQ_LOCKED_MDO_DEF(upd_secy)
+AQ_LOCKED_MDO_DEF(del_secy)
+AQ_LOCKED_MDO_DEF(add_rxsc)
+AQ_LOCKED_MDO_DEF(upd_rxsc)
+AQ_LOCKED_MDO_DEF(del_rxsc)
+AQ_LOCKED_MDO_DEF(add_rxsa)
+AQ_LOCKED_MDO_DEF(upd_rxsa)
+AQ_LOCKED_MDO_DEF(del_rxsa)
+AQ_LOCKED_MDO_DEF(add_txsa)
+AQ_LOCKED_MDO_DEF(upd_txsa)
+AQ_LOCKED_MDO_DEF(del_txsa)
+AQ_LOCKED_MDO_DEF(get_dev_stats)
+AQ_LOCKED_MDO_DEF(get_tx_sc_stats)
+AQ_LOCKED_MDO_DEF(get_tx_sa_stats)
+AQ_LOCKED_MDO_DEF(get_rx_sc_stats)
+AQ_LOCKED_MDO_DEF(get_rx_sa_stats)
+
+const struct macsec_ops aq_macsec_ops = {
+ .mdo_dev_open = aq_locked_mdo_dev_open,
+ .mdo_dev_stop = aq_locked_mdo_dev_stop,
+ .mdo_add_secy = aq_locked_mdo_add_secy,
+ .mdo_upd_secy = aq_locked_mdo_upd_secy,
+ .mdo_del_secy = aq_locked_mdo_del_secy,
+ .mdo_add_rxsc = aq_locked_mdo_add_rxsc,
+ .mdo_upd_rxsc = aq_locked_mdo_upd_rxsc,
+ .mdo_del_rxsc = aq_locked_mdo_del_rxsc,
+ .mdo_add_rxsa = aq_locked_mdo_add_rxsa,
+ .mdo_upd_rxsa = aq_locked_mdo_upd_rxsa,
+ .mdo_del_rxsa = aq_locked_mdo_del_rxsa,
+ .mdo_add_txsa = aq_locked_mdo_add_txsa,
+ .mdo_upd_txsa = aq_locked_mdo_upd_txsa,
+ .mdo_del_txsa = aq_locked_mdo_del_txsa,
+ .mdo_get_dev_stats = aq_locked_mdo_get_dev_stats,
+ .mdo_get_tx_sc_stats = aq_locked_mdo_get_tx_sc_stats,
+ .mdo_get_tx_sa_stats = aq_locked_mdo_get_tx_sa_stats,
+ .mdo_get_rx_sc_stats = aq_locked_mdo_get_rx_sc_stats,
+ .mdo_get_rx_sa_stats = aq_locked_mdo_get_rx_sa_stats,
+};
+
+int aq_macsec_init(struct aq_nic_s *nic)
+{
+ struct aq_macsec_cfg *cfg;
+ u32 caps_lo;
+
+ if (!nic->aq_fw_ops->get_link_capabilities)
+ return 0;
+
+ caps_lo = nic->aq_fw_ops->get_link_capabilities(nic->aq_hw);
+
+ if (!(caps_lo & BIT(CAPS_LO_MACSEC)))
+ return 0;
+
+ nic->macsec_cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!nic->macsec_cfg)
+ return -ENOMEM;
+
+ nic->ndev->features |= NETIF_F_HW_MACSEC;
+ nic->ndev->macsec_ops = &aq_macsec_ops;
+ mutex_init(&nic->macsec_mutex);
+
+ return 0;
+}
+
+void aq_macsec_free(struct aq_nic_s *nic)
+{
+ kfree(nic->macsec_cfg);
+ nic->macsec_cfg = NULL;
+}
+
+int aq_macsec_enable(struct aq_nic_s *nic)
+{
+ u32 ctl_ether_types[1] = { ETH_P_PAE };
+ struct macsec_msg_fw_response resp = { 0 };
+ struct macsec_msg_fw_request msg = { 0 };
+ struct aq_hw_s *hw = nic->aq_hw;
+ int num_ctl_ether_types = 0;
+ int index = 0, tbl_idx;
+ int ret;
+
+ if (!nic->macsec_cfg)
+ return 0;
+
+ mutex_lock(&nic->macsec_mutex);
+
+ if (nic->aq_fw_ops->send_macsec_req) {
+ struct macsec_cfg_request cfg = { 0 };
+
+ cfg.enabled = 1;
+ cfg.egress_threshold = 0xffffffff;
+ cfg.ingress_threshold = 0xffffffff;
+ cfg.interrupts_enabled = 1;
+
+ msg.msg_type = macsec_cfg_msg;
+ msg.cfg = cfg;
+
+ ret = nic->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
+ if (ret)
+ goto unlock;
+ }
+
+ /* Init Ethertype bypass filters */
+ for (index = 0; index < ARRAY_SIZE(ctl_ether_types); index++) {
+ struct aq_mss_ingress_prectlf_record rx_prectlf_rec;
+ struct aq_mss_egress_ctlf_record tx_ctlf_rec;
+
+ if (ctl_ether_types[index] == 0)
+ continue;
+
+ memset(&tx_ctlf_rec, 0, sizeof(tx_ctlf_rec));
+ tx_ctlf_rec.eth_type = ctl_ether_types[index];
+ tx_ctlf_rec.match_type = 4; /* Match eth_type only */
+ tx_ctlf_rec.match_mask = 0xf; /* match for eth_type */
+ tx_ctlf_rec.action = 0; /* Bypass MACSEC modules */
+ tbl_idx = NUMROWS_EGRESSCTLFRECORD - num_ctl_ether_types - 1;
+ aq_mss_set_egress_ctlf_record(hw, &tx_ctlf_rec, tbl_idx);
+
+ memset(&rx_prectlf_rec, 0, sizeof(rx_prectlf_rec));
+ rx_prectlf_rec.eth_type = ctl_ether_types[index];
+ rx_prectlf_rec.match_type = 4; /* Match eth_type only */
+ rx_prectlf_rec.match_mask = 0xf; /* match for eth_type */
+ rx_prectlf_rec.action = 0; /* Bypass MACSEC modules */
+ tbl_idx =
+ NUMROWS_INGRESSPRECTLFRECORD - num_ctl_ether_types - 1;
+ aq_mss_set_ingress_prectlf_record(hw, &rx_prectlf_rec, tbl_idx);
+
+ num_ctl_ether_types++;
+ }
+
+ ret = aq_apply_macsec_cfg(nic);
+
+unlock:
+ mutex_unlock(&nic->macsec_mutex);
+ return ret;
+}
+
+void aq_macsec_work(struct aq_nic_s *nic)
+{
+ if (!nic->macsec_cfg)
+ return;
+
+ if (!netif_carrier_ok(nic->ndev))
+ return;
+
+ mutex_lock(&nic->macsec_mutex);
+ aq_check_txsa_expiration(nic);
+ mutex_unlock(&nic->macsec_mutex);
+}
+
+int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic)
+{
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ int i, cnt = 0;
+
+ if (!cfg)
+ return 0;
+
+ mutex_lock(&nic->macsec_mutex);
+
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (!test_bit(i, &cfg->rxsc_idx_busy))
+ continue;
+ cnt += hweight_long(cfg->aq_rxsc[i].rx_sa_idx_busy);
+ }
+
+ mutex_unlock(&nic->macsec_mutex);
+ return cnt;
+}
+
+int aq_macsec_tx_sc_cnt(struct aq_nic_s *nic)
+{
+ int cnt;
+
+ if (!nic->macsec_cfg)
+ return 0;
+
+ mutex_lock(&nic->macsec_mutex);
+ cnt = hweight_long(nic->macsec_cfg->txsc_idx_busy);
+ mutex_unlock(&nic->macsec_mutex);
+
+ return cnt;
+}
+
+int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic)
+{
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ int i, cnt = 0;
+
+ if (!cfg)
+ return 0;
+
+ mutex_lock(&nic->macsec_mutex);
+
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (!test_bit(i, &cfg->txsc_idx_busy))
+ continue;
+ cnt += hweight_long(cfg->aq_txsc[i].tx_sa_idx_busy);
+ }
+
+ mutex_unlock(&nic->macsec_mutex);
+ return cnt;
+}
+
+static int aq_macsec_update_stats(struct aq_nic_s *nic)
+{
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ struct aq_hw_s *hw = nic->aq_hw;
+ struct aq_macsec_txsc *aq_txsc;
+ struct aq_macsec_rxsc *aq_rxsc;
+ int i, sa_idx, assoc_num;
+ int ret = 0;
+
+ aq_get_macsec_common_stats(hw, &cfg->stats);
+
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (!(cfg->txsc_idx_busy & BIT(i)))
+ continue;
+ aq_txsc = &cfg->aq_txsc[i];
+
+ ret = aq_get_txsc_stats(hw, aq_txsc->hw_sc_idx,
+ &aq_txsc->stats);
+ if (ret)
+ return ret;
+
+ for (assoc_num = 0; assoc_num < MACSEC_NUM_AN; assoc_num++) {
+ if (!test_bit(assoc_num, &aq_txsc->tx_sa_idx_busy))
+ continue;
+ sa_idx = aq_txsc->hw_sc_idx | assoc_num;
+ ret = aq_get_txsa_stats(hw, sa_idx,
+ &aq_txsc->tx_sa_stats[assoc_num]);
+ if (ret)
+ return ret;
+ }
+ }
+
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (!(test_bit(i, &cfg->rxsc_idx_busy)))
+ continue;
+ aq_rxsc = &cfg->aq_rxsc[i];
+
+ for (assoc_num = 0; assoc_num < MACSEC_NUM_AN; assoc_num++) {
+ if (!test_bit(assoc_num, &aq_rxsc->rx_sa_idx_busy))
+ continue;
+ sa_idx = aq_rxsc->hw_sc_idx | assoc_num;
+
+ ret = aq_get_rxsa_stats(hw, sa_idx,
+ &aq_rxsc->rx_sa_stats[assoc_num]);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+u64 *aq_macsec_get_stats(struct aq_nic_s *nic, u64 *data)
+{
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ struct aq_macsec_common_stats *common_stats;
+ struct aq_macsec_tx_sc_stats *txsc_stats;
+ struct aq_macsec_tx_sa_stats *txsa_stats;
+ struct aq_macsec_rx_sa_stats *rxsa_stats;
+ struct aq_macsec_txsc *aq_txsc;
+ struct aq_macsec_rxsc *aq_rxsc;
+ unsigned int assoc_num;
+ unsigned int sc_num;
+ unsigned int i = 0U;
+
+ if (!cfg)
+ return data;
+
+ mutex_lock(&nic->macsec_mutex);
+
+ aq_macsec_update_stats(nic);
+
+ common_stats = &cfg->stats;
+ data[i] = common_stats->in.ctl_pkts;
+ data[++i] = common_stats->in.tagged_miss_pkts;
+ data[++i] = common_stats->in.untagged_miss_pkts;
+ data[++i] = common_stats->in.notag_pkts;
+ data[++i] = common_stats->in.untagged_pkts;
+ data[++i] = common_stats->in.bad_tag_pkts;
+ data[++i] = common_stats->in.no_sci_pkts;
+ data[++i] = common_stats->in.unknown_sci_pkts;
+ data[++i] = common_stats->in.ctrl_prt_pass_pkts;
+ data[++i] = common_stats->in.unctrl_prt_pass_pkts;
+ data[++i] = common_stats->in.ctrl_prt_fail_pkts;
+ data[++i] = common_stats->in.unctrl_prt_fail_pkts;
+ data[++i] = common_stats->in.too_long_pkts;
+ data[++i] = common_stats->in.igpoc_ctl_pkts;
+ data[++i] = common_stats->in.ecc_error_pkts;
+ data[++i] = common_stats->in.unctrl_hit_drop_redir;
+ data[++i] = common_stats->out.ctl_pkts;
+ data[++i] = common_stats->out.unknown_sa_pkts;
+ data[++i] = common_stats->out.untagged_pkts;
+ data[++i] = common_stats->out.too_long;
+ data[++i] = common_stats->out.ecc_error_pkts;
+ data[++i] = common_stats->out.unctrl_hit_drop_redir;
+
+ for (sc_num = 0; sc_num < AQ_MACSEC_MAX_SC; sc_num++) {
+ if (!(test_bit(sc_num, &cfg->txsc_idx_busy)))
+ continue;
+
+ aq_txsc = &cfg->aq_txsc[sc_num];
+ txsc_stats = &aq_txsc->stats;
+
+ data[++i] = txsc_stats->sc_protected_pkts;
+ data[++i] = txsc_stats->sc_encrypted_pkts;
+ data[++i] = txsc_stats->sc_protected_octets;
+ data[++i] = txsc_stats->sc_encrypted_octets;
+
+ for (assoc_num = 0; assoc_num < MACSEC_NUM_AN; assoc_num++) {
+ if (!test_bit(assoc_num, &aq_txsc->tx_sa_idx_busy))
+ continue;
+
+ txsa_stats = &aq_txsc->tx_sa_stats[assoc_num];
+
+ data[++i] = txsa_stats->sa_hit_drop_redirect;
+ data[++i] = txsa_stats->sa_protected2_pkts;
+ data[++i] = txsa_stats->sa_protected_pkts;
+ data[++i] = txsa_stats->sa_encrypted_pkts;
+ }
+ }
+
+ for (sc_num = 0; sc_num < AQ_MACSEC_MAX_SC; sc_num++) {
+ if (!(test_bit(sc_num, &cfg->rxsc_idx_busy)))
+ continue;
+
+ aq_rxsc = &cfg->aq_rxsc[sc_num];
+
+ for (assoc_num = 0; assoc_num < MACSEC_NUM_AN; assoc_num++) {
+ if (!test_bit(assoc_num, &aq_rxsc->rx_sa_idx_busy))
+ continue;
+
+ rxsa_stats = &aq_rxsc->rx_sa_stats[assoc_num];
+
+ data[++i] = rxsa_stats->untagged_hit_pkts;
+ data[++i] = rxsa_stats->ctrl_hit_drop_redir_pkts;
+ data[++i] = rxsa_stats->not_using_sa;
+ data[++i] = rxsa_stats->unused_sa;
+ data[++i] = rxsa_stats->not_valid_pkts;
+ data[++i] = rxsa_stats->invalid_pkts;
+ data[++i] = rxsa_stats->ok_pkts;
+ data[++i] = rxsa_stats->late_pkts;
+ data[++i] = rxsa_stats->delayed_pkts;
+ data[++i] = rxsa_stats->unchecked_pkts;
+ data[++i] = rxsa_stats->validated_octets;
+ data[++i] = rxsa_stats->decrypted_octets;
+ }
+ }
+
+ i++;
+
+ data += i;
+
+ mutex_unlock(&nic->macsec_mutex);
+
+ return data;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h
new file mode 100644
index 000000000..a47e27104
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef AQ_MACSEC_H
+#define AQ_MACSEC_H
+
+#include <linux/netdevice.h>
+#if IS_ENABLED(CONFIG_MACSEC)
+
+#include "net/macsec.h"
+
+struct aq_nic_s;
+
+#define AQ_MACSEC_MAX_SC 32
+#define AQ_MACSEC_MAX_SA 32
+
+enum aq_macsec_sc_sa {
+ aq_macsec_sa_sc_4sa_8sc,
+ aq_macsec_sa_sc_not_used,
+ aq_macsec_sa_sc_2sa_16sc,
+ aq_macsec_sa_sc_1sa_32sc,
+};
+
+struct aq_macsec_common_stats {
+ /* Ingress Common Counters */
+ struct {
+ u64 ctl_pkts;
+ u64 tagged_miss_pkts;
+ u64 untagged_miss_pkts;
+ u64 notag_pkts;
+ u64 untagged_pkts;
+ u64 bad_tag_pkts;
+ u64 no_sci_pkts;
+ u64 unknown_sci_pkts;
+ u64 ctrl_prt_pass_pkts;
+ u64 unctrl_prt_pass_pkts;
+ u64 ctrl_prt_fail_pkts;
+ u64 unctrl_prt_fail_pkts;
+ u64 too_long_pkts;
+ u64 igpoc_ctl_pkts;
+ u64 ecc_error_pkts;
+ u64 unctrl_hit_drop_redir;
+ } in;
+
+ /* Egress Common Counters */
+ struct {
+ u64 ctl_pkts;
+ u64 unknown_sa_pkts;
+ u64 untagged_pkts;
+ u64 too_long;
+ u64 ecc_error_pkts;
+ u64 unctrl_hit_drop_redir;
+ } out;
+};
+
+/* Ingress SA Counters */
+struct aq_macsec_rx_sa_stats {
+ u64 untagged_hit_pkts;
+ u64 ctrl_hit_drop_redir_pkts;
+ u64 not_using_sa;
+ u64 unused_sa;
+ u64 not_valid_pkts;
+ u64 invalid_pkts;
+ u64 ok_pkts;
+ u64 late_pkts;
+ u64 delayed_pkts;
+ u64 unchecked_pkts;
+ u64 validated_octets;
+ u64 decrypted_octets;
+};
+
+/* Egress SA Counters */
+struct aq_macsec_tx_sa_stats {
+ u64 sa_hit_drop_redirect;
+ u64 sa_protected2_pkts;
+ u64 sa_protected_pkts;
+ u64 sa_encrypted_pkts;
+};
+
+/* Egress SC Counters */
+struct aq_macsec_tx_sc_stats {
+ u64 sc_protected_pkts;
+ u64 sc_encrypted_pkts;
+ u64 sc_protected_octets;
+ u64 sc_encrypted_octets;
+};
+
+struct aq_macsec_txsc {
+ u32 hw_sc_idx;
+ unsigned long tx_sa_idx_busy;
+ const struct macsec_secy *sw_secy;
+ u8 tx_sa_key[MACSEC_NUM_AN][MACSEC_MAX_KEY_LEN];
+ struct aq_macsec_tx_sc_stats stats;
+ struct aq_macsec_tx_sa_stats tx_sa_stats[MACSEC_NUM_AN];
+};
+
+struct aq_macsec_rxsc {
+ u32 hw_sc_idx;
+ unsigned long rx_sa_idx_busy;
+ const struct macsec_secy *sw_secy;
+ const struct macsec_rx_sc *sw_rxsc;
+ u8 rx_sa_key[MACSEC_NUM_AN][MACSEC_MAX_KEY_LEN];
+ struct aq_macsec_rx_sa_stats rx_sa_stats[MACSEC_NUM_AN];
+};
+
+struct aq_macsec_cfg {
+ enum aq_macsec_sc_sa sc_sa;
+ /* Egress channel configuration */
+ unsigned long txsc_idx_busy;
+ struct aq_macsec_txsc aq_txsc[AQ_MACSEC_MAX_SC];
+ /* Ingress channel configuration */
+ unsigned long rxsc_idx_busy;
+ struct aq_macsec_rxsc aq_rxsc[AQ_MACSEC_MAX_SC];
+ /* Statistics / counters */
+ struct aq_macsec_common_stats stats;
+};
+
+extern const struct macsec_ops aq_macsec_ops;
+
+int aq_macsec_init(struct aq_nic_s *nic);
+void aq_macsec_free(struct aq_nic_s *nic);
+int aq_macsec_enable(struct aq_nic_s *nic);
+void aq_macsec_work(struct aq_nic_s *nic);
+u64 *aq_macsec_get_stats(struct aq_nic_s *nic, u64 *data);
+int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic);
+int aq_macsec_tx_sc_cnt(struct aq_nic_s *nic);
+int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic);
+
+#endif
+
+#endif /* AQ_MACSEC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
new file mode 100644
index 000000000..77609dc0a
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -0,0 +1,541 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+/* File aq_main.c: Main file for aQuantia Linux driver. */
+
+#include "aq_main.h"
+#include "aq_nic.h"
+#include "aq_pci_func.h"
+#include "aq_ethtool.h"
+#include "aq_ptp.h"
+#include "aq_filters.h"
+#include "aq_hw_utils.h"
+#include "aq_vec.h"
+
+#include <linux/netdevice.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <net/pkt_cls.h>
+#include <linux/filter.h>
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
+MODULE_DESCRIPTION(AQ_CFG_DRV_DESC);
+
+DEFINE_STATIC_KEY_FALSE(aq_xdp_locking_key);
+EXPORT_SYMBOL(aq_xdp_locking_key);
+
+static const char aq_ndev_driver_name[] = AQ_CFG_DRV_NAME;
+
+static const struct net_device_ops aq_ndev_ops;
+
+static struct workqueue_struct *aq_ndev_wq;
+
+void aq_ndev_schedule_work(struct work_struct *work)
+{
+ queue_work(aq_ndev_wq, work);
+}
+
+struct net_device *aq_ndev_alloc(void)
+{
+ struct net_device *ndev = NULL;
+ struct aq_nic_s *aq_nic = NULL;
+
+ ndev = alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_HW_QUEUES_MAX);
+ if (!ndev)
+ return NULL;
+
+ aq_nic = netdev_priv(ndev);
+ aq_nic->ndev = ndev;
+ ndev->netdev_ops = &aq_ndev_ops;
+ ndev->ethtool_ops = &aq_ethtool_ops;
+
+ return ndev;
+}
+
+int aq_ndev_open(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = 0;
+
+ err = aq_nic_init(aq_nic);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_reapply_rxnfc_all_rules(aq_nic);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_filters_vlans_update(aq_nic);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_nic_start(aq_nic);
+ if (err < 0) {
+ aq_nic_stop(aq_nic);
+ goto err_exit;
+ }
+
+err_exit:
+ if (err < 0)
+ aq_nic_deinit(aq_nic, true);
+
+ return err;
+}
+
+int aq_ndev_close(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = 0;
+
+ err = aq_nic_stop(aq_nic);
+ aq_nic_deinit(aq_nic, true);
+
+ return err;
+}
+
+static netdev_tx_t aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+ if (unlikely(aq_utils_obj_test(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP))) {
+ /* Hardware adds the Timestamp for PTPv2 802.AS1
+ * and PTPv2 IPv4 UDP.
+ * We have to push even general 320 port messages to the ptp
+ * queue explicitly. This is a limitation of current firmware
+ * and hardware PTP design of the chip. Otherwise ptp stream
+ * will fail to sync
+ */
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ||
+ unlikely((ip_hdr(skb)->version == 4) &&
+ (ip_hdr(skb)->protocol == IPPROTO_UDP) &&
+ ((udp_hdr(skb)->dest == htons(319)) ||
+ (udp_hdr(skb)->dest == htons(320)))) ||
+ unlikely(eth_hdr(skb)->h_proto == htons(ETH_P_1588)))
+ return aq_ptp_xmit(aq_nic, skb);
+ }
+#endif
+
+ skb_tx_timestamp(skb);
+ return aq_nic_xmit(aq_nic, skb);
+}
+
+static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct bpf_prog *prog;
+ int err;
+
+ prog = READ_ONCE(aq_nic->xdp_prog);
+ if (prog && !prog->aux->xdp_has_frags &&
+ new_frame_size > AQ_CFG_RX_FRAME_MAX) {
+ netdev_err(ndev, "Illegal MTU %d for XDP prog without frags\n",
+ ndev->mtu);
+ return -EOPNOTSUPP;
+ }
+
+ err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN);
+
+ if (err < 0)
+ goto err_exit;
+ ndev->mtu = new_mtu;
+
+err_exit:
+ return err;
+}
+
+static int aq_ndev_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ bool is_vlan_tx_insert = !!(features & NETIF_F_HW_VLAN_CTAG_TX);
+ bool is_vlan_rx_strip = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ bool need_ndev_restart = false;
+ struct aq_nic_cfg_s *aq_cfg;
+ bool is_lro = false;
+ int err = 0;
+
+ aq_cfg = aq_nic_get_cfg(aq_nic);
+
+ if (!(features & NETIF_F_NTUPLE)) {
+ if (aq_nic->ndev->features & NETIF_F_NTUPLE) {
+ err = aq_clear_rxnfc_all_rules(aq_nic);
+ if (unlikely(err))
+ goto err_exit;
+ }
+ }
+ if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
+ if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ err = aq_filters_vlan_offload_off(aq_nic);
+ if (unlikely(err))
+ goto err_exit;
+ }
+ }
+
+ aq_cfg->features = features;
+
+ if (aq_cfg->aq_hw_caps->hw_features & NETIF_F_LRO) {
+ is_lro = features & NETIF_F_LRO;
+
+ if (aq_cfg->is_lro != is_lro) {
+ aq_cfg->is_lro = is_lro;
+ need_ndev_restart = true;
+ }
+ }
+
+ if ((aq_nic->ndev->features ^ features) & NETIF_F_RXCSUM) {
+ err = aq_nic->aq_hw_ops->hw_set_offload(aq_nic->aq_hw,
+ aq_cfg);
+
+ if (unlikely(err))
+ goto err_exit;
+ }
+
+ if (aq_cfg->is_vlan_rx_strip != is_vlan_rx_strip) {
+ aq_cfg->is_vlan_rx_strip = is_vlan_rx_strip;
+ need_ndev_restart = true;
+ }
+ if (aq_cfg->is_vlan_tx_insert != is_vlan_tx_insert) {
+ aq_cfg->is_vlan_tx_insert = is_vlan_tx_insert;
+ need_ndev_restart = true;
+ }
+
+ if (need_ndev_restart && netif_running(ndev)) {
+ aq_ndev_close(ndev);
+ aq_ndev_open(ndev);
+ }
+
+err_exit:
+ return err;
+}
+
+static netdev_features_t aq_ndev_fix_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct bpf_prog *prog;
+
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_LRO;
+
+ prog = READ_ONCE(aq_nic->xdp_prog);
+ if (prog && !prog->aux->xdp_has_frags &&
+ aq_nic->xdp_prog && features & NETIF_F_LRO) {
+ netdev_err(ndev, "LRO is not supported with single buffer XDP, disabling\n");
+ features &= ~NETIF_F_LRO;
+ }
+
+ return features;
+}
+
+static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = 0;
+
+ err = eth_mac_addr(ndev, addr);
+ if (err < 0)
+ goto err_exit;
+ err = aq_nic_set_mac(aq_nic, ndev);
+ if (err < 0)
+ goto err_exit;
+
+err_exit:
+ return err;
+}
+
+static void aq_ndev_set_multicast_settings(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ (void)aq_nic_set_multicast_list(aq_nic, ndev);
+}
+
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic,
+ struct hwtstamp_config *config)
+{
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ return aq_ptp_hwtstamp_config_set(aq_nic->aq_ptp, config);
+}
+#endif
+
+static int aq_ndev_hwtstamp_set(struct aq_nic_s *aq_nic, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+ int ret_val;
+#endif
+
+ if (!aq_nic->aq_ptp)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+ ret_val = aq_ndev_config_hwtstamp(aq_nic, &config);
+ if (ret_val)
+ return ret_val;
+#endif
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+static int aq_ndev_hwtstamp_get(struct aq_nic_s *aq_nic, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+
+ if (!aq_nic->aq_ptp)
+ return -EOPNOTSUPP;
+
+ aq_ptp_hwtstamp_config_get(aq_nic->aq_ptp, &config);
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+#endif
+
+static int aq_ndev_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(netdev);
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return aq_ndev_hwtstamp_set(aq_nic, ifr);
+
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+ case SIOCGHWTSTAMP:
+ return aq_ndev_hwtstamp_get(aq_nic, ifr);
+#endif
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto,
+ u16 vid)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ if (!aq_nic->aq_hw_ops->hw_filter_vlan_set)
+ return -EOPNOTSUPP;
+
+ set_bit(vid, aq_nic->active_vlans);
+
+ return aq_filters_vlans_update(aq_nic);
+}
+
+static int aq_ndo_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto,
+ u16 vid)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ if (!aq_nic->aq_hw_ops->hw_filter_vlan_set)
+ return -EOPNOTSUPP;
+
+ clear_bit(vid, aq_nic->active_vlans);
+
+ if (-ENOENT == aq_del_fvlan_by_vlan(aq_nic, vid))
+ return aq_filters_vlans_update(aq_nic);
+
+ return 0;
+}
+
+static int aq_validate_mqprio_opt(struct aq_nic_s *self,
+ struct tc_mqprio_qopt_offload *mqprio,
+ const unsigned int num_tc)
+{
+ const bool has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
+ struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(self);
+ const unsigned int tcs_max = min_t(u8, aq_nic_cfg->aq_hw_caps->tcs_max,
+ AQ_CFG_TCS_MAX);
+
+ if (num_tc > tcs_max) {
+ netdev_err(self->ndev, "Too many TCs requested\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (num_tc != 0 && !is_power_of_2(num_tc)) {
+ netdev_err(self->ndev, "TC count should be power of 2\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (has_min_rate && !ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ANTIGUA)) {
+ netdev_err(self->ndev, "Min tx rate is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int aq_ndo_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct tc_mqprio_qopt_offload *mqprio = type_data;
+ struct aq_nic_s *aq_nic = netdev_priv(dev);
+ bool has_min_rate;
+ bool has_max_rate;
+ int err;
+ int i;
+
+ if (type != TC_SETUP_QDISC_MQPRIO)
+ return -EOPNOTSUPP;
+
+ has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
+ has_max_rate = !!(mqprio->flags & TC_MQPRIO_F_MAX_RATE);
+
+ err = aq_validate_mqprio_opt(aq_nic, mqprio, mqprio->qopt.num_tc);
+ if (err)
+ return err;
+
+ for (i = 0; i < mqprio->qopt.num_tc; i++) {
+ if (has_max_rate) {
+ u64 max_rate = mqprio->max_rate[i];
+
+ do_div(max_rate, AQ_MBPS_DIVISOR);
+ aq_nic_setup_tc_max_rate(aq_nic, i, (u32)max_rate);
+ }
+
+ if (has_min_rate) {
+ u64 min_rate = mqprio->min_rate[i];
+
+ do_div(min_rate, AQ_MBPS_DIVISOR);
+ aq_nic_setup_tc_min_rate(aq_nic, i, (u32)min_rate);
+ }
+ }
+
+ return aq_nic_setup_tc_mqprio(aq_nic, mqprio->qopt.num_tc,
+ mqprio->qopt.prio_tc_map);
+}
+
+static int aq_xdp_setup(struct net_device *ndev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ bool need_update, running = netif_running(ndev);
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct bpf_prog *old_prog;
+
+ if (prog && !prog->aux->xdp_has_frags) {
+ if (ndev->mtu > AQ_CFG_RX_FRAME_MAX) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "prog does not support XDP frags");
+ return -EOPNOTSUPP;
+ }
+
+ if (prog && ndev->features & NETIF_F_LRO) {
+ netdev_err(ndev,
+ "LRO is not supported with single buffer XDP, disabling\n");
+ ndev->features &= ~NETIF_F_LRO;
+ }
+ }
+
+ need_update = !!aq_nic->xdp_prog != !!prog;
+ if (running && need_update)
+ aq_ndev_close(ndev);
+
+ old_prog = xchg(&aq_nic->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (!old_prog && prog)
+ static_branch_inc(&aq_xdp_locking_key);
+ else if (old_prog && !prog)
+ static_branch_dec(&aq_xdp_locking_key);
+
+ if (running && need_update)
+ return aq_ndev_open(ndev);
+
+ return 0;
+}
+
+static int aq_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return aq_xdp_setup(dev, xdp->prog, xdp->extack);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct net_device_ops aq_ndev_ops = {
+ .ndo_open = aq_ndev_open,
+ .ndo_stop = aq_ndev_close,
+ .ndo_start_xmit = aq_ndev_start_xmit,
+ .ndo_set_rx_mode = aq_ndev_set_multicast_settings,
+ .ndo_change_mtu = aq_ndev_change_mtu,
+ .ndo_set_mac_address = aq_ndev_set_mac_address,
+ .ndo_set_features = aq_ndev_set_features,
+ .ndo_fix_features = aq_ndev_fix_features,
+ .ndo_eth_ioctl = aq_ndev_ioctl,
+ .ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
+ .ndo_setup_tc = aq_ndo_setup_tc,
+ .ndo_bpf = aq_xdp,
+ .ndo_xdp_xmit = aq_xdp_xmit,
+};
+
+static int __init aq_ndev_init_module(void)
+{
+ int ret;
+
+ aq_ndev_wq = create_singlethread_workqueue(aq_ndev_driver_name);
+ if (!aq_ndev_wq) {
+ pr_err("Failed to create workqueue\n");
+ return -ENOMEM;
+ }
+
+ ret = aq_pci_func_register_driver();
+ if (ret) {
+ destroy_workqueue(aq_ndev_wq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit aq_ndev_exit_module(void)
+{
+ aq_pci_func_unregister_driver();
+
+ if (aq_ndev_wq) {
+ destroy_workqueue(aq_ndev_wq);
+ aq_ndev_wq = NULL;
+ }
+}
+
+module_init(aq_ndev_init_module);
+module_exit(aq_ndev_exit_module);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
new file mode 100644
index 000000000..a78c1a168
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ */
+
+/* File aq_main.h: Main file for aQuantia Linux driver. */
+
+#ifndef AQ_MAIN_H
+#define AQ_MAIN_H
+
+#include "aq_common.h"
+#include "aq_nic.h"
+
+DECLARE_STATIC_KEY_FALSE(aq_xdp_locking_key);
+
+void aq_ndev_schedule_work(struct work_struct *work);
+struct net_device *aq_ndev_alloc(void);
+int aq_ndev_open(struct net_device *ndev);
+int aq_ndev_close(struct net_device *ndev);
+
+#endif /* AQ_MAIN_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
new file mode 100644
index 000000000..06508eebb
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -0,0 +1,1686 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+/* File aq_nic.c: Definition of common code for NIC. */
+
+#include "aq_nic.h"
+#include "aq_ring.h"
+#include "aq_vec.h"
+#include "aq_hw.h"
+#include "aq_pci_func.h"
+#include "aq_macsec.h"
+#include "aq_main.h"
+#include "aq_phy.h"
+#include "aq_ptp.h"
+#include "aq_filters.h"
+
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/timer.h>
+#include <linux/cpu.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <net/ip.h>
+#include <net/pkt_cls.h>
+
+static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO;
+module_param_named(aq_itr, aq_itr, uint, 0644);
+MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode");
+
+static unsigned int aq_itr_tx;
+module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644);
+MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate");
+
+static unsigned int aq_itr_rx;
+module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
+MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");
+
+static void aq_nic_update_ndev_stats(struct aq_nic_s *self);
+
+static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
+{
+ static u8 rss_key[AQ_CFG_RSS_HASHKEY_SIZE] = {
+ 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
+ 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
+ 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
+ 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
+ 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
+ };
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+ struct aq_rss_parameters *rss_params;
+ int i = 0;
+
+ rss_params = &cfg->aq_rss;
+
+ rss_params->hash_secret_key_size = sizeof(rss_key);
+ memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key));
+ rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX;
+
+ for (i = rss_params->indirection_table_size; i--;)
+ rss_params->indirection_table[i] = i & (num_rss_queues - 1);
+}
+
+/* Recalculate the number of vectors */
+static void aq_nic_cfg_update_num_vecs(struct aq_nic_s *self)
+{
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+
+ cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
+ cfg->vecs = min(cfg->vecs, num_online_cpus());
+ if (self->irqvecs > AQ_HW_SERVICE_IRQS)
+ cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS);
+ /* cfg->vecs should be power of 2 for RSS */
+ cfg->vecs = rounddown_pow_of_two(cfg->vecs);
+
+ if (ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ANTIGUA)) {
+ if (cfg->tcs > 2)
+ cfg->vecs = min(cfg->vecs, 4U);
+ }
+
+ if (cfg->vecs <= 4)
+ cfg->tc_mode = AQ_TC_MODE_8TCS;
+ else
+ cfg->tc_mode = AQ_TC_MODE_4TCS;
+
+ /*rss rings */
+ cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
+ aq_nic_rss_init(self, cfg->num_rss_queues);
+}
+
+/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
+void aq_nic_cfg_start(struct aq_nic_s *self)
+{
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+ int i;
+
+ cfg->tcs = AQ_CFG_TCS_DEF;
+
+ cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
+
+ cfg->itr = aq_itr;
+ cfg->tx_itr = aq_itr_tx;
+ cfg->rx_itr = aq_itr_rx;
+
+ cfg->rxpageorder = AQ_CFG_RX_PAGEORDER;
+ cfg->is_rss = AQ_CFG_IS_RSS_DEF;
+ cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
+ cfg->fc.req = AQ_CFG_FC_MODE;
+ cfg->wol = AQ_CFG_WOL_MODES;
+
+ cfg->mtu = AQ_CFG_MTU_DEF;
+ cfg->link_speed_msk = AQ_CFG_SPEED_MSK;
+ cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF;
+
+ cfg->is_lro = AQ_CFG_IS_LRO_DEF;
+ cfg->is_ptp = true;
+
+ /*descriptors */
+ cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF);
+ cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF);
+
+ aq_nic_cfg_update_num_vecs(self);
+
+ cfg->irq_type = aq_pci_func_get_irq_type(self);
+
+ if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
+ (cfg->aq_hw_caps->vecs == 1U) ||
+ (cfg->vecs == 1U)) {
+ cfg->is_rss = 0U;
+ cfg->vecs = 1U;
+ }
+
+ /* Check if we have enough vectors allocated for
+ * link status IRQ. If no - we'll know link state from
+ * slower service task.
+ */
+ if (AQ_HW_SERVICE_IRQS > 0 && cfg->vecs + 1 <= self->irqvecs)
+ cfg->link_irq_vec = cfg->vecs;
+ else
+ cfg->link_irq_vec = 0;
+
+ cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
+ cfg->features = cfg->aq_hw_caps->hw_features;
+ cfg->is_vlan_rx_strip = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_RX);
+ cfg->is_vlan_tx_insert = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_TX);
+ cfg->is_vlan_force_promisc = true;
+
+ for (i = 0; i < sizeof(cfg->prio_tc_map); i++)
+ cfg->prio_tc_map[i] = cfg->tcs * i / 8;
+}
+
+static int aq_nic_update_link_status(struct aq_nic_s *self)
+{
+ int err = self->aq_fw_ops->update_link_status(self->aq_hw);
+ u32 fc = 0;
+
+ if (err)
+ return err;
+
+ if (self->aq_fw_ops->get_flow_control)
+ self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
+ self->aq_nic_cfg.fc.cur = fc;
+
+ if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) {
+ netdev_info(self->ndev, "%s: link change old %d new %d\n",
+ AQ_CFG_DRV_NAME, self->link_status.mbps,
+ self->aq_hw->aq_link_status.mbps);
+ aq_nic_update_interrupt_moderation_settings(self);
+
+ if (self->aq_ptp) {
+ aq_ptp_clock_init(self);
+ aq_ptp_tm_offset_set(self,
+ self->aq_hw->aq_link_status.mbps);
+ aq_ptp_link_change(self);
+ }
+
+ /* Driver has to update flow control settings on RX block
+ * on any link event.
+ * We should query FW whether it negotiated FC.
+ */
+ if (self->aq_hw_ops->hw_set_fc)
+ self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0);
+ }
+
+ self->link_status = self->aq_hw->aq_link_status;
+ if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
+ aq_utils_obj_set(&self->flags,
+ AQ_NIC_FLAG_STARTED);
+ aq_utils_obj_clear(&self->flags,
+ AQ_NIC_LINK_DOWN);
+ netif_carrier_on(self->ndev);
+#if IS_ENABLED(CONFIG_MACSEC)
+ aq_macsec_enable(self);
+#endif
+ if (self->aq_hw_ops->hw_tc_rate_limit_set)
+ self->aq_hw_ops->hw_tc_rate_limit_set(self->aq_hw);
+
+ netif_tx_wake_all_queues(self->ndev);
+ }
+ if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
+ netif_carrier_off(self->ndev);
+ netif_tx_disable(self->ndev);
+ aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN);
+ }
+
+ return 0;
+}
+
+static irqreturn_t aq_linkstate_threaded_isr(int irq, void *private)
+{
+ struct aq_nic_s *self = private;
+
+ if (!self)
+ return IRQ_NONE;
+
+ aq_nic_update_link_status(self);
+
+ self->aq_hw_ops->hw_irq_enable(self->aq_hw,
+ BIT(self->aq_nic_cfg.link_irq_vec));
+
+ return IRQ_HANDLED;
+}
+
+static void aq_nic_service_task(struct work_struct *work)
+{
+ struct aq_nic_s *self = container_of(work, struct aq_nic_s,
+ service_task);
+ int err;
+
+ aq_ptp_service_task(self);
+
+ if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
+ return;
+
+ err = aq_nic_update_link_status(self);
+ if (err)
+ return;
+
+#if IS_ENABLED(CONFIG_MACSEC)
+ aq_macsec_work(self);
+#endif
+
+ mutex_lock(&self->fwreq_mutex);
+ if (self->aq_fw_ops->update_stats)
+ self->aq_fw_ops->update_stats(self->aq_hw);
+ mutex_unlock(&self->fwreq_mutex);
+
+ aq_nic_update_ndev_stats(self);
+}
+
+static void aq_nic_service_timer_cb(struct timer_list *t)
+{
+ struct aq_nic_s *self = from_timer(self, t, service_timer);
+
+ mod_timer(&self->service_timer,
+ jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
+
+ aq_ndev_schedule_work(&self->service_task);
+}
+
+static void aq_nic_polling_timer_cb(struct timer_list *t)
+{
+ struct aq_nic_s *self = from_timer(self, t, polling_timer);
+ unsigned int i = 0U;
+
+ for (i = 0U; self->aq_vecs > i; ++i)
+ aq_vec_isr(i, (void *)self->aq_vec[i]);
+
+ mod_timer(&self->polling_timer, jiffies +
+ AQ_CFG_POLLING_TIMER_INTERVAL);
+}
+
+static int aq_nic_hw_prepare(struct aq_nic_s *self)
+{
+ int err = 0;
+
+ err = self->aq_hw_ops->hw_soft_reset(self->aq_hw);
+ if (err)
+ goto exit;
+
+ err = self->aq_hw_ops->hw_prepare(self->aq_hw, &self->aq_fw_ops);
+
+exit:
+ return err;
+}
+
+static bool aq_nic_is_valid_ether_addr(const u8 *addr)
+{
+ /* Some engineering samples of Aquantia NICs are provisioned with a
+ * partially populated MAC, which is still invalid.
+ */
+ return !(addr[0] == 0 && addr[1] == 0 && addr[2] == 0);
+}
+
+int aq_nic_ndev_register(struct aq_nic_s *self)
+{
+ u8 addr[ETH_ALEN];
+ int err = 0;
+
+ if (!self->ndev) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+
+ err = aq_nic_hw_prepare(self);
+ if (err)
+ goto err_exit;
+
+#if IS_ENABLED(CONFIG_MACSEC)
+ aq_macsec_init(self);
+#endif
+
+ if (platform_get_ethdev_address(&self->pdev->dev, self->ndev) != 0) {
+ // If DT has none or an invalid one, ask device for MAC address
+ mutex_lock(&self->fwreq_mutex);
+ err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr);
+ mutex_unlock(&self->fwreq_mutex);
+
+ if (err)
+ goto err_exit;
+
+ if (is_valid_ether_addr(addr) &&
+ aq_nic_is_valid_ether_addr(addr)) {
+ eth_hw_addr_set(self->ndev, addr);
+ } else {
+ netdev_warn(self->ndev, "MAC is invalid, will use random.");
+ eth_hw_addr_random(self->ndev);
+ }
+ }
+
+#if defined(AQ_CFG_MAC_ADDR_PERMANENT)
+ {
+ static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;
+
+ eth_hw_addr_set(self->ndev, mac_addr_permanent);
+ }
+#endif
+
+ for (self->aq_vecs = 0; self->aq_vecs < aq_nic_get_cfg(self)->vecs;
+ self->aq_vecs++) {
+ self->aq_vec[self->aq_vecs] =
+ aq_vec_alloc(self, self->aq_vecs, aq_nic_get_cfg(self));
+ if (!self->aq_vec[self->aq_vecs]) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ }
+
+ netif_carrier_off(self->ndev);
+
+ netif_tx_disable(self->ndev);
+
+ err = register_netdev(self->ndev);
+ if (err)
+ goto err_exit;
+
+err_exit:
+#if IS_ENABLED(CONFIG_MACSEC)
+ if (err)
+ aq_macsec_free(self);
+#endif
+ return err;
+}
+
+void aq_nic_ndev_init(struct aq_nic_s *self)
+{
+ const struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
+ struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg;
+
+ self->ndev->hw_features |= aq_hw_caps->hw_features;
+ self->ndev->features = aq_hw_caps->hw_features;
+ self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_RXHASH | NETIF_F_SG |
+ NETIF_F_LRO | NETIF_F_TSO | NETIF_F_TSO6;
+ self->ndev->gso_partial_features = NETIF_F_GSO_UDP_L4;
+ self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
+ self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
+ self->msg_enable = NETIF_MSG_DRV | NETIF_MSG_LINK;
+ self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
+ self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
+
+}
+
+void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
+ struct aq_ring_s *ring)
+{
+ self->aq_ring_tx[idx] = ring;
+}
+
+struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
+{
+ return self->ndev;
+}
+
+int aq_nic_init(struct aq_nic_s *self)
+{
+ struct aq_vec_s *aq_vec = NULL;
+ unsigned int i = 0U;
+ int err = 0;
+
+ self->power_state = AQ_HW_POWER_STATE_D0;
+ mutex_lock(&self->fwreq_mutex);
+ err = self->aq_hw_ops->hw_reset(self->aq_hw);
+ mutex_unlock(&self->fwreq_mutex);
+ if (err < 0)
+ goto err_exit;
+ /* Restore default settings */
+ aq_nic_set_downshift(self, self->aq_nic_cfg.downshift_counter);
+ aq_nic_set_media_detect(self, self->aq_nic_cfg.is_media_detect ?
+ AQ_HW_MEDIA_DETECT_CNT : 0);
+
+ err = self->aq_hw_ops->hw_init(self->aq_hw,
+ aq_nic_get_ndev(self)->dev_addr);
+ if (err < 0)
+ goto err_exit;
+
+ if (ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ATLANTIC) &&
+ self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) {
+ self->aq_hw->phy_id = HW_ATL_PHY_ID_MAX;
+ err = aq_phy_init(self->aq_hw);
+
+ /* Disable the PTP on NICs where it's known to cause datapath
+ * problems.
+ * Ideally this should have been done by PHY provisioning, but
+ * many units have been shipped with enabled PTP block already.
+ */
+ if (self->aq_nic_cfg.aq_hw_caps->quirks & AQ_NIC_QUIRK_BAD_PTP)
+ if (self->aq_hw->phy_id != HW_ATL_PHY_ID_MAX)
+ aq_phy_disable_ptp(self->aq_hw);
+ }
+
+ for (i = 0U; i < self->aq_vecs; i++) {
+ aq_vec = self->aq_vec[i];
+ err = aq_vec_ring_alloc(aq_vec, self, i,
+ aq_nic_get_cfg(self));
+ if (err)
+ goto err_exit;
+
+ aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw);
+ }
+
+ if (aq_nic_get_cfg(self)->is_ptp) {
+ err = aq_ptp_init(self, self->irqvecs - 1);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_ptp_ring_alloc(self);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_ptp_ring_init(self);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ netif_carrier_off(self->ndev);
+
+err_exit:
+ return err;
+}
+
+int aq_nic_start(struct aq_nic_s *self)
+{
+ struct aq_vec_s *aq_vec = NULL;
+ struct aq_nic_cfg_s *cfg;
+ unsigned int i = 0U;
+ int err = 0;
+
+ cfg = aq_nic_get_cfg(self);
+
+ err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
+ self->mc_list.ar,
+ self->mc_list.count);
+ if (err < 0)
+ goto err_exit;
+
+ err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
+ self->packet_filter);
+ if (err < 0)
+ goto err_exit;
+
+ for (i = 0U; self->aq_vecs > i; ++i) {
+ aq_vec = self->aq_vec[i];
+ err = aq_vec_start(aq_vec);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ err = aq_ptp_ring_start(self);
+ if (err < 0)
+ goto err_exit;
+
+ aq_nic_set_loopback(self);
+
+ err = self->aq_hw_ops->hw_start(self->aq_hw);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_nic_update_interrupt_moderation_settings(self);
+ if (err)
+ goto err_exit;
+
+ INIT_WORK(&self->service_task, aq_nic_service_task);
+
+ timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
+ aq_nic_service_timer_cb(&self->service_timer);
+
+ if (cfg->is_polling) {
+ timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0);
+ mod_timer(&self->polling_timer, jiffies +
+ AQ_CFG_POLLING_TIMER_INTERVAL);
+ } else {
+ for (i = 0U; self->aq_vecs > i; ++i) {
+ aq_vec = self->aq_vec[i];
+ err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
+ aq_vec_isr, aq_vec,
+ aq_vec_get_affinity_mask(aq_vec));
+ if (err < 0)
+ goto err_exit;
+ }
+
+ err = aq_ptp_irq_alloc(self);
+ if (err < 0)
+ goto err_exit;
+
+ if (cfg->link_irq_vec) {
+ int irqvec = pci_irq_vector(self->pdev,
+ cfg->link_irq_vec);
+ err = request_threaded_irq(irqvec, NULL,
+ aq_linkstate_threaded_isr,
+ IRQF_SHARED | IRQF_ONESHOT,
+ self->ndev->name, self);
+ if (err < 0)
+ goto err_exit;
+ self->msix_entry_mask |= (1 << cfg->link_irq_vec);
+ }
+
+ err = self->aq_hw_ops->hw_irq_enable(self->aq_hw,
+ AQ_CFG_IRQ_MASK);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ err = netif_set_real_num_tx_queues(self->ndev,
+ self->aq_vecs * cfg->tcs);
+ if (err < 0)
+ goto err_exit;
+
+ err = netif_set_real_num_rx_queues(self->ndev,
+ self->aq_vecs * cfg->tcs);
+ if (err < 0)
+ goto err_exit;
+
+ for (i = 0; i < cfg->tcs; i++) {
+ u16 offset = self->aq_vecs * i;
+
+ netdev_set_tc_queue(self->ndev, i, self->aq_vecs, offset);
+ }
+ netif_tx_start_all_queues(self->ndev);
+
+err_exit:
+ return err;
+}
+
+static unsigned int aq_nic_map_xdp(struct aq_nic_s *self,
+ struct xdp_frame *xdpf,
+ struct aq_ring_s *ring)
+{
+ struct device *dev = aq_nic_get_dev(self);
+ struct aq_ring_buff_s *first = NULL;
+ unsigned int dx = ring->sw_tail;
+ struct aq_ring_buff_s *dx_buff;
+ struct skb_shared_info *sinfo;
+ unsigned int frag_count = 0U;
+ unsigned int nr_frags = 0U;
+ unsigned int ret = 0U;
+ u16 total_len;
+
+ dx_buff = &ring->buff_ring[dx];
+ dx_buff->flags = 0U;
+
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ total_len = xdpf->len;
+ dx_buff->len = total_len;
+ if (xdp_frame_has_frags(xdpf)) {
+ nr_frags = sinfo->nr_frags;
+ total_len += sinfo->xdp_frags_size;
+ }
+ dx_buff->pa = dma_map_single(dev, xdpf->data, dx_buff->len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, dx_buff->pa)))
+ goto exit;
+
+ first = dx_buff;
+ dx_buff->len_pkt = total_len;
+ dx_buff->is_sop = 1U;
+ dx_buff->is_mapped = 1U;
+ ++ret;
+
+ for (; nr_frags--; ++frag_count) {
+ skb_frag_t *frag = &sinfo->frags[frag_count];
+ unsigned int frag_len = skb_frag_size(frag);
+ unsigned int buff_offset = 0U;
+ unsigned int buff_size = 0U;
+ dma_addr_t frag_pa;
+
+ while (frag_len) {
+ if (frag_len > AQ_CFG_TX_FRAME_MAX)
+ buff_size = AQ_CFG_TX_FRAME_MAX;
+ else
+ buff_size = frag_len;
+
+ frag_pa = skb_frag_dma_map(dev, frag, buff_offset,
+ buff_size, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, frag_pa)))
+ goto mapping_error;
+
+ dx = aq_ring_next_dx(ring, dx);
+ dx_buff = &ring->buff_ring[dx];
+
+ dx_buff->flags = 0U;
+ dx_buff->len = buff_size;
+ dx_buff->pa = frag_pa;
+ dx_buff->is_mapped = 1U;
+ dx_buff->eop_index = 0xffffU;
+
+ frag_len -= buff_size;
+ buff_offset += buff_size;
+
+ ++ret;
+ }
+ }
+
+ first->eop_index = dx;
+ dx_buff->is_eop = 1U;
+ dx_buff->skb = NULL;
+ dx_buff->xdpf = xdpf;
+ goto exit;
+
+mapping_error:
+ for (dx = ring->sw_tail;
+ ret > 0;
+ --ret, dx = aq_ring_next_dx(ring, dx)) {
+ dx_buff = &ring->buff_ring[dx];
+
+ if (!dx_buff->pa)
+ continue;
+ if (unlikely(dx_buff->is_sop))
+ dma_unmap_single(dev, dx_buff->pa, dx_buff->len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, dx_buff->pa, dx_buff->len,
+ DMA_TO_DEVICE);
+ }
+
+exit:
+ return ret;
+}
+
+unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
+ struct aq_ring_s *ring)
+{
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
+ struct device *dev = aq_nic_get_dev(self);
+ struct aq_ring_buff_s *first = NULL;
+ u8 ipver = ip_hdr(skb)->version;
+ struct aq_ring_buff_s *dx_buff;
+ bool need_context_tag = false;
+ unsigned int frag_count = 0U;
+ unsigned int ret = 0U;
+ unsigned int dx;
+ u8 l4proto = 0;
+
+ if (ipver == 4)
+ l4proto = ip_hdr(skb)->protocol;
+ else if (ipver == 6)
+ l4proto = ipv6_hdr(skb)->nexthdr;
+
+ dx = ring->sw_tail;
+ dx_buff = &ring->buff_ring[dx];
+ dx_buff->flags = 0U;
+
+ if (unlikely(skb_is_gso(skb))) {
+ dx_buff->mss = skb_shinfo(skb)->gso_size;
+ if (l4proto == IPPROTO_TCP) {
+ dx_buff->is_gso_tcp = 1U;
+ dx_buff->len_l4 = tcp_hdrlen(skb);
+ } else if (l4proto == IPPROTO_UDP) {
+ dx_buff->is_gso_udp = 1U;
+ dx_buff->len_l4 = sizeof(struct udphdr);
+ /* UDP GSO Hardware does not replace packet length. */
+ udp_hdr(skb)->len = htons(dx_buff->mss +
+ dx_buff->len_l4);
+ } else {
+ WARN_ONCE(true, "Bad GSO mode");
+ goto exit;
+ }
+ dx_buff->len_pkt = skb->len;
+ dx_buff->len_l2 = ETH_HLEN;
+ dx_buff->len_l3 = skb_network_header_len(skb);
+ dx_buff->eop_index = 0xffffU;
+ dx_buff->is_ipv6 = (ipver == 6);
+ need_context_tag = true;
+ }
+
+ if (cfg->is_vlan_tx_insert && skb_vlan_tag_present(skb)) {
+ dx_buff->vlan_tx_tag = skb_vlan_tag_get(skb);
+ dx_buff->len_pkt = skb->len;
+ dx_buff->is_vlan = 1U;
+ need_context_tag = true;
+ }
+
+ if (need_context_tag) {
+ dx = aq_ring_next_dx(ring, dx);
+ dx_buff = &ring->buff_ring[dx];
+ dx_buff->flags = 0U;
+ ++ret;
+ }
+
+ dx_buff->len = skb_headlen(skb);
+ dx_buff->pa = dma_map_single(dev,
+ skb->data,
+ dx_buff->len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, dx_buff->pa))) {
+ ret = 0;
+ goto exit;
+ }
+
+ first = dx_buff;
+ dx_buff->len_pkt = skb->len;
+ dx_buff->is_sop = 1U;
+ dx_buff->is_mapped = 1U;
+ ++ret;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol);
+ dx_buff->is_tcp_cso = (l4proto == IPPROTO_TCP);
+ dx_buff->is_udp_cso = (l4proto == IPPROTO_UDP);
+ }
+
+ for (; nr_frags--; ++frag_count) {
+ unsigned int frag_len = 0U;
+ unsigned int buff_offset = 0U;
+ unsigned int buff_size = 0U;
+ dma_addr_t frag_pa;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
+
+ frag_len = skb_frag_size(frag);
+
+ while (frag_len) {
+ if (frag_len > AQ_CFG_TX_FRAME_MAX)
+ buff_size = AQ_CFG_TX_FRAME_MAX;
+ else
+ buff_size = frag_len;
+
+ frag_pa = skb_frag_dma_map(dev,
+ frag,
+ buff_offset,
+ buff_size,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev,
+ frag_pa)))
+ goto mapping_error;
+
+ dx = aq_ring_next_dx(ring, dx);
+ dx_buff = &ring->buff_ring[dx];
+
+ dx_buff->flags = 0U;
+ dx_buff->len = buff_size;
+ dx_buff->pa = frag_pa;
+ dx_buff->is_mapped = 1U;
+ dx_buff->eop_index = 0xffffU;
+
+ frag_len -= buff_size;
+ buff_offset += buff_size;
+
+ ++ret;
+ }
+ }
+
+ first->eop_index = dx;
+ dx_buff->is_eop = 1U;
+ dx_buff->skb = skb;
+ dx_buff->xdpf = NULL;
+ goto exit;
+
+mapping_error:
+ for (dx = ring->sw_tail;
+ ret > 0;
+ --ret, dx = aq_ring_next_dx(ring, dx)) {
+ dx_buff = &ring->buff_ring[dx];
+
+ if (!(dx_buff->is_gso_tcp || dx_buff->is_gso_udp) &&
+ !dx_buff->is_vlan && dx_buff->pa) {
+ if (unlikely(dx_buff->is_sop)) {
+ dma_unmap_single(dev,
+ dx_buff->pa,
+ dx_buff->len,
+ DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(dev,
+ dx_buff->pa,
+ dx_buff->len,
+ DMA_TO_DEVICE);
+ }
+ }
+ }
+
+exit:
+ return ret;
+}
+
+int aq_nic_xmit_xdpf(struct aq_nic_s *aq_nic, struct aq_ring_s *tx_ring,
+ struct xdp_frame *xdpf)
+{
+ u16 queue_index = AQ_NIC_RING2QMAP(aq_nic, tx_ring->idx);
+ struct net_device *ndev = aq_nic_get_ndev(aq_nic);
+ struct skb_shared_info *sinfo;
+ int cpu = smp_processor_id();
+ int err = NETDEV_TX_BUSY;
+ struct netdev_queue *nq;
+ unsigned int frags = 1;
+
+ if (xdp_frame_has_frags(xdpf)) {
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ frags += sinfo->nr_frags;
+ }
+
+ if (frags > AQ_CFG_SKB_FRAGS_MAX)
+ return err;
+
+ nq = netdev_get_tx_queue(ndev, tx_ring->idx);
+ __netif_tx_lock(nq, cpu);
+
+ aq_ring_update_queue_state(tx_ring);
+
+ /* Above status update may stop the queue. Check this. */
+ if (__netif_subqueue_stopped(aq_nic_get_ndev(aq_nic), queue_index))
+ goto out;
+
+ frags = aq_nic_map_xdp(aq_nic, xdpf, tx_ring);
+ if (likely(frags))
+ err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw, tx_ring,
+ frags);
+out:
+ __netif_tx_unlock(nq);
+
+ return err;
+}
+
+int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
+{
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
+ unsigned int vec = skb->queue_mapping % cfg->vecs;
+ unsigned int tc = skb->queue_mapping / cfg->vecs;
+ struct aq_ring_s *ring = NULL;
+ unsigned int frags = 0U;
+ int err = NETDEV_TX_OK;
+
+ frags = skb_shinfo(skb)->nr_frags + 1;
+
+ ring = self->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(cfg, tc, vec)];
+
+ if (frags > AQ_CFG_SKB_FRAGS_MAX) {
+ dev_kfree_skb_any(skb);
+ goto err_exit;
+ }
+
+ aq_ring_update_queue_state(ring);
+
+ if (cfg->priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET)) {
+ err = NETDEV_TX_BUSY;
+ goto err_exit;
+ }
+
+ /* Above status update may stop the queue. Check this. */
+ if (__netif_subqueue_stopped(self->ndev,
+ AQ_NIC_RING2QMAP(self, ring->idx))) {
+ err = NETDEV_TX_BUSY;
+ goto err_exit;
+ }
+
+ frags = aq_nic_map_skb(self, skb, ring);
+
+ if (likely(frags)) {
+ err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
+ ring, frags);
+ } else {
+ err = NETDEV_TX_BUSY;
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self)
+{
+ return self->aq_hw_ops->hw_interrupt_moderation_set(self->aq_hw);
+}
+
+int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
+{
+ int err = 0;
+
+ err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, flags);
+ if (err < 0)
+ goto err_exit;
+
+ self->packet_filter = flags;
+
+err_exit:
+ return err;
+}
+
+int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
+{
+ const struct aq_hw_ops *hw_ops = self->aq_hw_ops;
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+ unsigned int packet_filter = ndev->flags;
+ struct netdev_hw_addr *ha = NULL;
+ unsigned int i = 0U;
+ int err = 0;
+
+ self->mc_list.count = 0;
+ if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
+ packet_filter |= IFF_PROMISC;
+ } else {
+ netdev_for_each_uc_addr(ha, ndev) {
+ ether_addr_copy(self->mc_list.ar[i++], ha->addr);
+ }
+ }
+
+ cfg->is_mc_list_enabled = !!(packet_filter & IFF_MULTICAST);
+ if (cfg->is_mc_list_enabled) {
+ if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
+ packet_filter |= IFF_ALLMULTI;
+ } else {
+ netdev_for_each_mc_addr(ha, ndev) {
+ ether_addr_copy(self->mc_list.ar[i++],
+ ha->addr);
+ }
+ }
+ }
+
+ if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
+ self->mc_list.count = i;
+ err = hw_ops->hw_multicast_list_set(self->aq_hw,
+ self->mc_list.ar,
+ self->mc_list.count);
+ if (err < 0)
+ return err;
+ }
+
+ return aq_nic_set_packet_filter(self, packet_filter);
+}
+
+int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
+{
+ self->aq_nic_cfg.mtu = new_mtu;
+
+ return 0;
+}
+
+int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
+{
+ return self->aq_hw_ops->hw_set_mac_address(self->aq_hw, ndev->dev_addr);
+}
+
+unsigned int aq_nic_get_link_speed(struct aq_nic_s *self)
+{
+ return self->link_status.mbps;
+}
+
+int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
+{
+ u32 *regs_buff = p;
+ int err = 0;
+
+ if (unlikely(!self->aq_hw_ops->hw_get_regs))
+ return -EOPNOTSUPP;
+
+ regs->version = 1;
+
+ err = self->aq_hw_ops->hw_get_regs(self->aq_hw,
+ self->aq_nic_cfg.aq_hw_caps,
+ regs_buff);
+ if (err < 0)
+ goto err_exit;
+
+err_exit:
+ return err;
+}
+
+int aq_nic_get_regs_count(struct aq_nic_s *self)
+{
+ if (unlikely(!self->aq_hw_ops->hw_get_regs))
+ return 0;
+
+ return self->aq_nic_cfg.aq_hw_caps->mac_regs_count;
+}
+
+u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
+{
+ struct aq_stats_s *stats;
+ unsigned int count = 0U;
+ unsigned int i = 0U;
+ unsigned int tc;
+
+ if (self->aq_fw_ops->update_stats) {
+ mutex_lock(&self->fwreq_mutex);
+ self->aq_fw_ops->update_stats(self->aq_hw);
+ mutex_unlock(&self->fwreq_mutex);
+ }
+ stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
+
+ if (!stats)
+ goto err_exit;
+
+ data[i] = stats->uprc + stats->mprc + stats->bprc;
+ data[++i] = stats->uprc;
+ data[++i] = stats->mprc;
+ data[++i] = stats->bprc;
+ data[++i] = stats->erpt;
+ data[++i] = stats->uptc + stats->mptc + stats->bptc;
+ data[++i] = stats->uptc;
+ data[++i] = stats->mptc;
+ data[++i] = stats->bptc;
+ data[++i] = stats->ubrc;
+ data[++i] = stats->ubtc;
+ data[++i] = stats->mbrc;
+ data[++i] = stats->mbtc;
+ data[++i] = stats->bbrc;
+ data[++i] = stats->bbtc;
+ if (stats->brc)
+ data[++i] = stats->brc;
+ else
+ data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
+ if (stats->btc)
+ data[++i] = stats->btc;
+ else
+ data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
+ data[++i] = stats->dma_pkt_rc;
+ data[++i] = stats->dma_pkt_tc;
+ data[++i] = stats->dma_oct_rc;
+ data[++i] = stats->dma_oct_tc;
+ data[++i] = stats->dpc;
+
+ i++;
+
+ data += i;
+
+ for (tc = 0U; tc < self->aq_nic_cfg.tcs; tc++) {
+ for (i = 0U; self->aq_vecs > i; ++i) {
+ if (!self->aq_vec[i])
+ break;
+ data += count;
+ count = aq_vec_get_sw_stats(self->aq_vec[i], tc, data);
+ }
+ }
+
+ data += count;
+
+err_exit:
+ return data;
+}
+
+static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
+{
+ struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
+ struct net_device *ndev = self->ndev;
+
+ ndev->stats.rx_packets = stats->dma_pkt_rc;
+ ndev->stats.rx_bytes = stats->dma_oct_rc;
+ ndev->stats.rx_errors = stats->erpr;
+ ndev->stats.rx_dropped = stats->dpc;
+ ndev->stats.tx_packets = stats->dma_pkt_tc;
+ ndev->stats.tx_bytes = stats->dma_oct_tc;
+ ndev->stats.tx_errors = stats->erpt;
+ ndev->stats.multicast = stats->mprc;
+}
+
+void aq_nic_get_link_ksettings(struct aq_nic_s *self,
+ struct ethtool_link_ksettings *cmd)
+{
+ u32 lp_link_speed_msk;
+
+ if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
+ cmd->base.port = PORT_FIBRE;
+ else
+ cmd->base.port = PORT_TP;
+
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ if (self->link_status.mbps)
+ cmd->base.duplex = self->link_status.full_duplex ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ cmd->base.autoneg = self->aq_nic_cfg.is_autoneg;
+
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10G)
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10000baseT_Full);
+
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_5G)
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 5000baseT_Full);
+
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2G5)
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 2500baseT_Full);
+
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G)
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 1000baseT_Full);
+
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G_HALF)
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 1000baseT_Half);
+
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M)
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 100baseT_Full);
+
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M_HALF)
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 100baseT_Half);
+
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10M)
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10baseT_Full);
+
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10M_HALF)
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10baseT_Half);
+
+ if (self->aq_nic_cfg.aq_hw_caps->flow_control) {
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ Asym_Pause);
+ }
+
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+
+ if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ else
+ ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+
+ if (self->aq_nic_cfg.is_autoneg)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10000baseT_Full);
+
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_5G)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 5000baseT_Full);
+
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2G5)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 2500baseT_Full);
+
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 1000baseT_Full);
+
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G_HALF)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 1000baseT_Half);
+
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 100baseT_Full);
+
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M_HALF)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 100baseT_Half);
+
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10M)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10baseT_Full);
+
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10M_HALF)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10baseT_Half);
+
+ if (self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Pause);
+
+ /* Asym is when either RX or TX, but not both */
+ if (!!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_TX) ^
+ !!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX))
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Asym_Pause);
+
+ if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+ else
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
+
+ ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising);
+ lp_link_speed_msk = self->aq_hw->aq_link_status.lp_link_speed_msk;
+
+ if (lp_link_speed_msk & AQ_NIC_RATE_10G)
+ ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
+ 10000baseT_Full);
+
+ if (lp_link_speed_msk & AQ_NIC_RATE_5G)
+ ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
+ 5000baseT_Full);
+
+ if (lp_link_speed_msk & AQ_NIC_RATE_2G5)
+ ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
+ 2500baseT_Full);
+
+ if (lp_link_speed_msk & AQ_NIC_RATE_1G)
+ ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
+ 1000baseT_Full);
+
+ if (lp_link_speed_msk & AQ_NIC_RATE_1G_HALF)
+ ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
+ 1000baseT_Half);
+
+ if (lp_link_speed_msk & AQ_NIC_RATE_100M)
+ ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
+ 100baseT_Full);
+
+ if (lp_link_speed_msk & AQ_NIC_RATE_100M_HALF)
+ ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
+ 100baseT_Half);
+
+ if (lp_link_speed_msk & AQ_NIC_RATE_10M)
+ ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
+ 10baseT_Full);
+
+ if (lp_link_speed_msk & AQ_NIC_RATE_10M_HALF)
+ ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
+ 10baseT_Half);
+
+ if (self->aq_hw->aq_link_status.lp_flow_control & AQ_NIC_FC_RX)
+ ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
+ Pause);
+ if (!!(self->aq_hw->aq_link_status.lp_flow_control & AQ_NIC_FC_TX) ^
+ !!(self->aq_hw->aq_link_status.lp_flow_control & AQ_NIC_FC_RX))
+ ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
+ Asym_Pause);
+}
+
+int aq_nic_set_link_ksettings(struct aq_nic_s *self,
+ const struct ethtool_link_ksettings *cmd)
+{
+ int fduplex = (cmd->base.duplex == DUPLEX_FULL);
+ u32 speed = cmd->base.speed;
+ u32 rate = 0U;
+ int err = 0;
+
+ if (!fduplex && speed > SPEED_1000) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ rate = self->aq_nic_cfg.aq_hw_caps->link_speed_msk;
+ self->aq_nic_cfg.is_autoneg = true;
+ } else {
+ switch (speed) {
+ case SPEED_10:
+ rate = fduplex ? AQ_NIC_RATE_10M : AQ_NIC_RATE_10M_HALF;
+ break;
+
+ case SPEED_100:
+ rate = fduplex ? AQ_NIC_RATE_100M
+ : AQ_NIC_RATE_100M_HALF;
+ break;
+
+ case SPEED_1000:
+ rate = fduplex ? AQ_NIC_RATE_1G : AQ_NIC_RATE_1G_HALF;
+ break;
+
+ case SPEED_2500:
+ rate = AQ_NIC_RATE_2G5;
+ break;
+
+ case SPEED_5000:
+ rate = AQ_NIC_RATE_5G;
+ break;
+
+ case SPEED_10000:
+ rate = AQ_NIC_RATE_10G;
+ break;
+
+ default:
+ err = -1;
+ goto err_exit;
+ }
+ if (!(self->aq_nic_cfg.aq_hw_caps->link_speed_msk & rate)) {
+ err = -1;
+ goto err_exit;
+ }
+
+ self->aq_nic_cfg.is_autoneg = false;
+ }
+
+ mutex_lock(&self->fwreq_mutex);
+ err = self->aq_fw_ops->set_link_speed(self->aq_hw, rate);
+ mutex_unlock(&self->fwreq_mutex);
+ if (err < 0)
+ goto err_exit;
+
+ self->aq_nic_cfg.link_speed_msk = rate;
+
+err_exit:
+ return err;
+}
+
+struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self)
+{
+ return &self->aq_nic_cfg;
+}
+
+u32 aq_nic_get_fw_version(struct aq_nic_s *self)
+{
+ return self->aq_hw_ops->hw_get_fw_version(self->aq_hw);
+}
+
+int aq_nic_set_loopback(struct aq_nic_s *self)
+{
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+
+ if (!self->aq_hw_ops->hw_set_loopback ||
+ !self->aq_fw_ops->set_phyloopback)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&self->fwreq_mutex);
+ self->aq_hw_ops->hw_set_loopback(self->aq_hw,
+ AQ_HW_LOOPBACK_DMA_SYS,
+ !!(cfg->priv_flags &
+ BIT(AQ_HW_LOOPBACK_DMA_SYS)));
+
+ self->aq_hw_ops->hw_set_loopback(self->aq_hw,
+ AQ_HW_LOOPBACK_PKT_SYS,
+ !!(cfg->priv_flags &
+ BIT(AQ_HW_LOOPBACK_PKT_SYS)));
+
+ self->aq_hw_ops->hw_set_loopback(self->aq_hw,
+ AQ_HW_LOOPBACK_DMA_NET,
+ !!(cfg->priv_flags &
+ BIT(AQ_HW_LOOPBACK_DMA_NET)));
+
+ self->aq_fw_ops->set_phyloopback(self->aq_hw,
+ AQ_HW_LOOPBACK_PHYINT_SYS,
+ !!(cfg->priv_flags &
+ BIT(AQ_HW_LOOPBACK_PHYINT_SYS)));
+
+ self->aq_fw_ops->set_phyloopback(self->aq_hw,
+ AQ_HW_LOOPBACK_PHYEXT_SYS,
+ !!(cfg->priv_flags &
+ BIT(AQ_HW_LOOPBACK_PHYEXT_SYS)));
+ mutex_unlock(&self->fwreq_mutex);
+
+ return 0;
+}
+
+int aq_nic_stop(struct aq_nic_s *self)
+{
+ unsigned int i = 0U;
+
+ netif_tx_disable(self->ndev);
+ netif_carrier_off(self->ndev);
+
+ del_timer_sync(&self->service_timer);
+ cancel_work_sync(&self->service_task);
+
+ self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
+
+ if (self->aq_nic_cfg.is_polling)
+ del_timer_sync(&self->polling_timer);
+ else
+ aq_pci_func_free_irqs(self);
+
+ aq_ptp_irq_free(self);
+
+ for (i = 0U; self->aq_vecs > i; ++i)
+ aq_vec_stop(self->aq_vec[i]);
+
+ aq_ptp_ring_stop(self);
+
+ return self->aq_hw_ops->hw_stop(self->aq_hw);
+}
+
+void aq_nic_set_power(struct aq_nic_s *self)
+{
+ if (self->power_state != AQ_HW_POWER_STATE_D0 ||
+ self->aq_hw->aq_nic_cfg->wol)
+ if (likely(self->aq_fw_ops->set_power)) {
+ mutex_lock(&self->fwreq_mutex);
+ self->aq_fw_ops->set_power(self->aq_hw,
+ self->power_state,
+ self->ndev->dev_addr);
+ mutex_unlock(&self->fwreq_mutex);
+ }
+}
+
+void aq_nic_deinit(struct aq_nic_s *self, bool link_down)
+{
+ struct aq_vec_s *aq_vec = NULL;
+ unsigned int i = 0U;
+
+ if (!self)
+ goto err_exit;
+
+ for (i = 0U; i < self->aq_vecs; i++) {
+ aq_vec = self->aq_vec[i];
+ aq_vec_deinit(aq_vec);
+ aq_vec_ring_free(aq_vec);
+ }
+
+ aq_ptp_unregister(self);
+ aq_ptp_ring_deinit(self);
+ aq_ptp_ring_free(self);
+ aq_ptp_free(self);
+
+ if (likely(self->aq_fw_ops->deinit) && link_down) {
+ mutex_lock(&self->fwreq_mutex);
+ self->aq_fw_ops->deinit(self->aq_hw);
+ mutex_unlock(&self->fwreq_mutex);
+ }
+
+err_exit:;
+}
+
+void aq_nic_free_vectors(struct aq_nic_s *self)
+{
+ unsigned int i = 0U;
+
+ if (!self)
+ goto err_exit;
+
+ for (i = ARRAY_SIZE(self->aq_vec); i--;) {
+ if (self->aq_vec[i]) {
+ aq_vec_free(self->aq_vec[i]);
+ self->aq_vec[i] = NULL;
+ }
+ }
+
+err_exit:;
+}
+
+int aq_nic_realloc_vectors(struct aq_nic_s *self)
+{
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
+
+ aq_nic_free_vectors(self);
+
+ for (self->aq_vecs = 0; self->aq_vecs < cfg->vecs; self->aq_vecs++) {
+ self->aq_vec[self->aq_vecs] = aq_vec_alloc(self, self->aq_vecs,
+ cfg);
+ if (unlikely(!self->aq_vec[self->aq_vecs]))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void aq_nic_shutdown(struct aq_nic_s *self)
+{
+ int err = 0;
+
+ if (!self->ndev)
+ return;
+
+ rtnl_lock();
+
+ netif_device_detach(self->ndev);
+
+ if (netif_running(self->ndev)) {
+ err = aq_nic_stop(self);
+ if (err < 0)
+ goto err_exit;
+ }
+ aq_nic_deinit(self, !self->aq_hw->aq_nic_cfg->wol);
+ aq_nic_set_power(self);
+
+err_exit:
+ rtnl_unlock();
+}
+
+u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type)
+{
+ u8 location = 0xFF;
+ u32 fltr_cnt;
+ u32 n_bit;
+
+ switch (type) {
+ case aq_rx_filter_ethertype:
+ location = AQ_RX_LAST_LOC_FETHERT - AQ_RX_FIRST_LOC_FETHERT -
+ self->aq_hw_rx_fltrs.fet_reserved_count;
+ self->aq_hw_rx_fltrs.fet_reserved_count++;
+ break;
+ case aq_rx_filter_l3l4:
+ fltr_cnt = AQ_RX_LAST_LOC_FL3L4 - AQ_RX_FIRST_LOC_FL3L4;
+ n_bit = fltr_cnt - self->aq_hw_rx_fltrs.fl3l4.reserved_count;
+
+ self->aq_hw_rx_fltrs.fl3l4.active_ipv4 |= BIT(n_bit);
+ self->aq_hw_rx_fltrs.fl3l4.reserved_count++;
+ location = n_bit;
+ break;
+ default:
+ break;
+ }
+
+ return location;
+}
+
+void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
+ u32 location)
+{
+ switch (type) {
+ case aq_rx_filter_ethertype:
+ self->aq_hw_rx_fltrs.fet_reserved_count--;
+ break;
+ case aq_rx_filter_l3l4:
+ self->aq_hw_rx_fltrs.fl3l4.reserved_count--;
+ self->aq_hw_rx_fltrs.fl3l4.active_ipv4 &= ~BIT(location);
+ break;
+ default:
+ break;
+ }
+}
+
+int aq_nic_set_downshift(struct aq_nic_s *self, int val)
+{
+ int err = 0;
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+
+ if (!self->aq_fw_ops->set_downshift)
+ return -EOPNOTSUPP;
+
+ if (val > 15) {
+ netdev_err(self->ndev, "downshift counter should be <= 15\n");
+ return -EINVAL;
+ }
+ cfg->downshift_counter = val;
+
+ mutex_lock(&self->fwreq_mutex);
+ err = self->aq_fw_ops->set_downshift(self->aq_hw, cfg->downshift_counter);
+ mutex_unlock(&self->fwreq_mutex);
+
+ return err;
+}
+
+int aq_nic_set_media_detect(struct aq_nic_s *self, int val)
+{
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+ int err = 0;
+
+ if (!self->aq_fw_ops->set_media_detect)
+ return -EOPNOTSUPP;
+
+ if (val > 0 && val != AQ_HW_MEDIA_DETECT_CNT) {
+ netdev_err(self->ndev, "EDPD on this device could have only fixed value of %d\n",
+ AQ_HW_MEDIA_DETECT_CNT);
+ return -EINVAL;
+ }
+
+ mutex_lock(&self->fwreq_mutex);
+ err = self->aq_fw_ops->set_media_detect(self->aq_hw, !!val);
+ mutex_unlock(&self->fwreq_mutex);
+
+ /* msecs plays no role - configuration is always fixed in PHY */
+ if (!err)
+ cfg->is_media_detect = !!val;
+
+ return err;
+}
+
+int aq_nic_setup_tc_mqprio(struct aq_nic_s *self, u32 tcs, u8 *prio_tc_map)
+{
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+ const unsigned int prev_vecs = cfg->vecs;
+ bool ndev_running;
+ int err = 0;
+ int i;
+
+ /* if already the same configuration or
+ * disable request (tcs is 0) and we already is disabled
+ */
+ if (tcs == cfg->tcs || (tcs == 0 && !cfg->is_qos))
+ return 0;
+
+ ndev_running = netif_running(self->ndev);
+ if (ndev_running)
+ dev_close(self->ndev);
+
+ cfg->tcs = tcs;
+ if (cfg->tcs == 0)
+ cfg->tcs = 1;
+ if (prio_tc_map)
+ memcpy(cfg->prio_tc_map, prio_tc_map, sizeof(cfg->prio_tc_map));
+ else
+ for (i = 0; i < sizeof(cfg->prio_tc_map); i++)
+ cfg->prio_tc_map[i] = cfg->tcs * i / 8;
+
+ cfg->is_qos = !!tcs;
+ cfg->is_ptp = (cfg->tcs <= AQ_HW_PTP_TC);
+ if (!cfg->is_ptp)
+ netdev_warn(self->ndev, "%s\n",
+ "PTP is auto disabled due to requested TC count.");
+
+ netdev_set_num_tc(self->ndev, cfg->tcs);
+
+ /* Changing the number of TCs might change the number of vectors */
+ aq_nic_cfg_update_num_vecs(self);
+ if (prev_vecs != cfg->vecs) {
+ err = aq_nic_realloc_vectors(self);
+ if (err)
+ goto err_exit;
+ }
+
+ if (ndev_running)
+ err = dev_open(self->ndev, NULL);
+
+err_exit:
+ return err;
+}
+
+int aq_nic_setup_tc_max_rate(struct aq_nic_s *self, const unsigned int tc,
+ const u32 max_rate)
+{
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+
+ if (tc >= AQ_CFG_TCS_MAX)
+ return -EINVAL;
+
+ if (max_rate && max_rate < 10) {
+ netdev_warn(self->ndev,
+ "Setting %s to the minimum usable value of %dMbps.\n",
+ "max rate", 10);
+ cfg->tc_max_rate[tc] = 10;
+ } else {
+ cfg->tc_max_rate[tc] = max_rate;
+ }
+
+ return 0;
+}
+
+int aq_nic_setup_tc_min_rate(struct aq_nic_s *self, const unsigned int tc,
+ const u32 min_rate)
+{
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+
+ if (tc >= AQ_CFG_TCS_MAX)
+ return -EINVAL;
+
+ if (min_rate)
+ set_bit(tc, &cfg->tc_min_rate_msk);
+ else
+ clear_bit(tc, &cfg->tc_min_rate_msk);
+
+ if (min_rate && min_rate < 20) {
+ netdev_warn(self->ndev,
+ "Setting %s to the minimum usable value of %dMbps.\n",
+ "min rate", 20);
+ cfg->tc_min_rate[tc] = 20;
+ } else {
+ cfg->tc_min_rate[tc] = min_rate;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
new file mode 100644
index 000000000..ad33f8586
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+/* File aq_nic.h: Declaration of common code for NIC. */
+
+#ifndef AQ_NIC_H
+#define AQ_NIC_H
+
+#include <linux/ethtool.h>
+#include <net/xdp.h>
+#include <linux/bpf.h>
+
+#include "aq_common.h"
+#include "aq_rss.h"
+#include "aq_hw.h"
+
+struct aq_ring_s;
+struct aq_hw_ops;
+struct aq_fw_s;
+struct aq_vec_s;
+struct aq_macsec_cfg;
+struct aq_ptp_s;
+enum aq_rx_filter_type;
+
+enum aq_fc_mode {
+ AQ_NIC_FC_OFF = 0,
+ AQ_NIC_FC_TX,
+ AQ_NIC_FC_RX,
+ AQ_NIC_FC_FULL,
+};
+
+struct aq_fc_info {
+ enum aq_fc_mode req;
+ enum aq_fc_mode cur;
+};
+
+struct aq_nic_cfg_s {
+ const struct aq_hw_caps_s *aq_hw_caps;
+ u64 features;
+ u32 rxds; /* rx ring size, descriptors # */
+ u32 txds; /* tx ring size, descriptors # */
+ u32 vecs; /* allocated rx/tx vectors */
+ u32 link_irq_vec;
+ u32 irq_type;
+ u32 itr;
+ u16 rx_itr;
+ u16 tx_itr;
+ u32 rxpageorder;
+ u32 num_rss_queues;
+ u32 mtu;
+ struct aq_fc_info fc;
+ u32 link_speed_msk;
+ u32 wol;
+ u8 is_vlan_rx_strip;
+ u8 is_vlan_tx_insert;
+ bool is_vlan_force_promisc;
+ u16 is_mc_list_enabled;
+ u16 mc_list_count;
+ bool is_autoneg;
+ bool is_polling;
+ bool is_rss;
+ bool is_lro;
+ bool is_qos;
+ bool is_ptp;
+ bool is_media_detect;
+ int downshift_counter;
+ enum aq_tc_mode tc_mode;
+ u32 priv_flags;
+ u8 tcs;
+ u8 prio_tc_map[8];
+ u32 tc_max_rate[AQ_CFG_TCS_MAX];
+ unsigned long tc_min_rate_msk;
+ u32 tc_min_rate[AQ_CFG_TCS_MAX];
+ struct aq_rss_parameters aq_rss;
+ u32 eee_speeds;
+};
+
+#define AQ_NIC_FLAG_STARTED 0x00000004U
+#define AQ_NIC_FLAG_STOPPING 0x00000008U
+#define AQ_NIC_FLAG_RESETTING 0x00000010U
+#define AQ_NIC_FLAG_CLOSING 0x00000020U
+#define AQ_NIC_PTP_DPATH_UP 0x02000000U
+#define AQ_NIC_LINK_DOWN 0x04000000U
+#define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U
+#define AQ_NIC_FLAG_ERR_HW 0x80000000U
+
+#define AQ_NIC_QUIRK_BAD_PTP BIT(0)
+
+#define AQ_NIC_WOL_MODES (WAKE_MAGIC |\
+ WAKE_PHY)
+
+#define AQ_NIC_CFG_RING_PER_TC(_NIC_CFG_) \
+ (((_NIC_CFG_)->tc_mode == AQ_TC_MODE_4TCS) ? 8 : 4)
+
+#define AQ_NIC_CFG_TCVEC2RING(_NIC_CFG_, _TC_, _VEC_) \
+ ((_TC_) * AQ_NIC_CFG_RING_PER_TC(_NIC_CFG_) + (_VEC_))
+
+#define AQ_NIC_RING2QMAP(_NIC_, _ID_) \
+ ((_ID_) / AQ_NIC_CFG_RING_PER_TC(&(_NIC_)->aq_nic_cfg) * \
+ (_NIC_)->aq_vecs + \
+ ((_ID_) % AQ_NIC_CFG_RING_PER_TC(&(_NIC_)->aq_nic_cfg)))
+
+struct aq_hw_rx_fl2 {
+ struct aq_rx_filter_vlan aq_vlans[AQ_VLAN_MAX_FILTERS];
+};
+
+struct aq_hw_rx_fl3l4 {
+ u8 active_ipv4;
+ u8 active_ipv6:2;
+ u8 is_ipv6;
+ u8 reserved_count;
+};
+
+struct aq_hw_rx_fltrs_s {
+ struct hlist_head filter_list;
+ u16 active_filters;
+ struct aq_hw_rx_fl2 fl2;
+ struct aq_hw_rx_fl3l4 fl3l4;
+ /* filter ether type */
+ u8 fet_reserved_count;
+};
+
+struct aq_nic_s {
+ atomic_t flags;
+ u32 msg_enable;
+ struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
+ struct aq_ring_s *aq_ring_tx[AQ_HW_QUEUES_MAX];
+ struct aq_hw_s *aq_hw;
+ struct bpf_prog *xdp_prog;
+ struct net_device *ndev;
+ unsigned int aq_vecs;
+ unsigned int packet_filter;
+ unsigned int power_state;
+ u8 port;
+ const struct aq_hw_ops *aq_hw_ops;
+ const struct aq_fw_ops *aq_fw_ops;
+ struct aq_nic_cfg_s aq_nic_cfg;
+ struct timer_list service_timer;
+ struct work_struct service_task;
+ struct timer_list polling_timer;
+ struct aq_hw_link_status_s link_status;
+ struct {
+ u32 count;
+ u8 ar[AQ_HW_MULTICAST_ADDRESS_MAX][ETH_ALEN];
+ } mc_list;
+ /* Bitmask of currently assigned vlans from linux */
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+
+ struct pci_dev *pdev;
+ unsigned int msix_entry_mask;
+ u32 irqvecs;
+ /* mutex to serialize FW interface access operations */
+ struct mutex fwreq_mutex;
+#if IS_ENABLED(CONFIG_MACSEC)
+ struct aq_macsec_cfg *macsec_cfg;
+ /* mutex to protect data in macsec_cfg */
+ struct mutex macsec_mutex;
+#endif
+ /* PTP support */
+ struct aq_ptp_s *aq_ptp;
+ struct aq_hw_rx_fltrs_s aq_hw_rx_fltrs;
+};
+
+static inline struct device *aq_nic_get_dev(struct aq_nic_s *self)
+{
+ return self->ndev->dev.parent;
+}
+
+void aq_nic_ndev_init(struct aq_nic_s *self);
+struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev);
+void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
+ struct aq_ring_s *ring);
+struct net_device *aq_nic_get_ndev(struct aq_nic_s *self);
+int aq_nic_init(struct aq_nic_s *self);
+void aq_nic_cfg_start(struct aq_nic_s *self);
+int aq_nic_ndev_register(struct aq_nic_s *self);
+void aq_nic_ndev_free(struct aq_nic_s *self);
+int aq_nic_start(struct aq_nic_s *self);
+unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
+ struct aq_ring_s *ring);
+int aq_nic_xmit_xdpf(struct aq_nic_s *aq_nic, struct aq_ring_s *tx_ring,
+ struct xdp_frame *xdpf);
+int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
+int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p);
+int aq_nic_get_regs_count(struct aq_nic_s *self);
+u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data);
+int aq_nic_stop(struct aq_nic_s *self);
+void aq_nic_deinit(struct aq_nic_s *self, bool link_down);
+void aq_nic_set_power(struct aq_nic_s *self);
+void aq_nic_free_hot_resources(struct aq_nic_s *self);
+void aq_nic_free_vectors(struct aq_nic_s *self);
+int aq_nic_realloc_vectors(struct aq_nic_s *self);
+int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu);
+int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev);
+int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags);
+int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev);
+unsigned int aq_nic_get_link_speed(struct aq_nic_s *self);
+void aq_nic_get_link_ksettings(struct aq_nic_s *self,
+ struct ethtool_link_ksettings *cmd);
+int aq_nic_set_link_ksettings(struct aq_nic_s *self,
+ const struct ethtool_link_ksettings *cmd);
+struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self);
+u32 aq_nic_get_fw_version(struct aq_nic_s *self);
+int aq_nic_set_loopback(struct aq_nic_s *self);
+int aq_nic_set_downshift(struct aq_nic_s *self, int val);
+int aq_nic_set_media_detect(struct aq_nic_s *self, int val);
+int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self);
+void aq_nic_shutdown(struct aq_nic_s *self);
+u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type);
+void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
+ u32 location);
+int aq_nic_setup_tc_mqprio(struct aq_nic_s *self, u32 tcs, u8 *prio_tc_map);
+int aq_nic_setup_tc_max_rate(struct aq_nic_s *self, const unsigned int tc,
+ const u32 max_rate);
+int aq_nic_setup_tc_min_rate(struct aq_nic_s *self, const unsigned int tc,
+ const u32 min_rate);
+#endif /* AQ_NIC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
new file mode 100644
index 000000000..8647125d6
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+/* File aq_pci_func.c: Definition of PCI functions. */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+
+#include "aq_main.h"
+#include "aq_nic.h"
+#include "aq_vec.h"
+#include "aq_hw.h"
+#include "aq_pci_func.h"
+#include "hw_atl/hw_atl_a0.h"
+#include "hw_atl/hw_atl_b0.h"
+#include "hw_atl2/hw_atl2.h"
+#include "aq_filters.h"
+#include "aq_drvinfo.h"
+#include "aq_macsec.h"
+
+static const struct pci_device_id aq_pci_tbl[] = {
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D100), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D107), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D108), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D109), },
+
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112), },
+
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100S), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107S), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108S), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109S), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111S), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112S), },
+
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113DEV), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113CS), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC114CS), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113C), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC115C), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113CA), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC116C), },
+
+ {}
+};
+
+static const struct aq_board_revision_s hw_atl_boards[] = {
+ { AQ_DEVICE_ID_0001, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
+ { AQ_DEVICE_ID_D100, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc100, },
+ { AQ_DEVICE_ID_D107, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
+ { AQ_DEVICE_ID_D108, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc108, },
+ { AQ_DEVICE_ID_D109, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc109, },
+
+ { AQ_DEVICE_ID_0001, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
+ { AQ_DEVICE_ID_D100, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc100, },
+ { AQ_DEVICE_ID_D107, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
+ { AQ_DEVICE_ID_D108, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, },
+ { AQ_DEVICE_ID_D109, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, },
+
+ { AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100, },
+ { AQ_DEVICE_ID_AQC107, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
+ { AQ_DEVICE_ID_AQC108, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, },
+ { AQ_DEVICE_ID_AQC109, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, },
+ { AQ_DEVICE_ID_AQC111, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111, },
+ { AQ_DEVICE_ID_AQC112, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112, },
+
+ { AQ_DEVICE_ID_AQC100S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100s, },
+ { AQ_DEVICE_ID_AQC107S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107s, },
+ { AQ_DEVICE_ID_AQC108S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108s, },
+ { AQ_DEVICE_ID_AQC109S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109s, },
+ { AQ_DEVICE_ID_AQC111S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111s, },
+ { AQ_DEVICE_ID_AQC112S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112s, },
+
+ { AQ_DEVICE_ID_AQC113DEV, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC113, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC113CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC114CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC113C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC115C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc115c, },
+ { AQ_DEVICE_ID_AQC113CA, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC116C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc116c, },
+
+};
+
+MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
+
+static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev,
+ const struct aq_hw_ops **ops,
+ const struct aq_hw_caps_s **caps)
+{
+ int i;
+
+ if (pdev->vendor != PCI_VENDOR_ID_AQUANTIA)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(hw_atl_boards); i++) {
+ if (hw_atl_boards[i].devid == pdev->device &&
+ (hw_atl_boards[i].revision == AQ_HWREV_ANY ||
+ hw_atl_boards[i].revision == pdev->revision)) {
+ *ops = hw_atl_boards[i].ops;
+ *caps = hw_atl_boards[i].caps;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(hw_atl_boards))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int aq_pci_func_init(struct pci_dev *pdev)
+{
+ int err;
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err)
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ err = -ENOSR;
+ goto err_exit;
+ }
+
+ err = pci_request_regions(pdev, AQ_CFG_DRV_NAME "_mmio");
+ if (err < 0)
+ goto err_exit;
+
+ pci_set_master(pdev);
+
+ return 0;
+
+err_exit:
+ return err;
+}
+
+int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
+ char *name, irq_handler_t irq_handler,
+ void *irq_arg, cpumask_t *affinity_mask)
+{
+ struct pci_dev *pdev = self->pdev;
+ int err;
+
+ if (pdev->msix_enabled || pdev->msi_enabled)
+ err = request_irq(pci_irq_vector(pdev, i), irq_handler, 0,
+ name, irq_arg);
+ else
+ err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr_legacy,
+ IRQF_SHARED, name, irq_arg);
+
+ if (err >= 0) {
+ self->msix_entry_mask |= (1 << i);
+
+ if (pdev->msix_enabled && affinity_mask)
+ irq_set_affinity_hint(pci_irq_vector(pdev, i),
+ affinity_mask);
+ }
+
+ return err;
+}
+
+void aq_pci_func_free_irqs(struct aq_nic_s *self)
+{
+ struct pci_dev *pdev = self->pdev;
+ unsigned int i;
+ void *irq_data;
+
+ for (i = 32U; i--;) {
+ if (!((1U << i) & self->msix_entry_mask))
+ continue;
+ if (self->aq_nic_cfg.link_irq_vec &&
+ i == self->aq_nic_cfg.link_irq_vec)
+ irq_data = self;
+ else if (i < AQ_CFG_VECS_MAX)
+ irq_data = self->aq_vec[i];
+ else
+ continue;
+
+ if (pdev->msix_enabled)
+ irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
+ free_irq(pci_irq_vector(pdev, i), irq_data);
+ self->msix_entry_mask &= ~(1U << i);
+ }
+}
+
+unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self)
+{
+ if (self->pdev->msix_enabled)
+ return AQ_HW_IRQ_MSIX;
+ if (self->pdev->msi_enabled)
+ return AQ_HW_IRQ_MSI;
+
+ return AQ_HW_IRQ_LEGACY;
+}
+
+static void aq_pci_free_irq_vectors(struct aq_nic_s *self)
+{
+ pci_free_irq_vectors(self->pdev);
+}
+
+static int aq_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
+{
+ struct net_device *ndev;
+ resource_size_t mmio_pa;
+ struct aq_nic_s *self;
+ u32 numvecs;
+ u32 bar;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ err = aq_pci_func_init(pdev);
+ if (err)
+ goto err_pci_func;
+
+ ndev = aq_ndev_alloc();
+ if (!ndev) {
+ err = -ENOMEM;
+ goto err_ndev;
+ }
+
+ self = netdev_priv(ndev);
+ self->pdev = pdev;
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ pci_set_drvdata(pdev, self);
+
+ mutex_init(&self->fwreq_mutex);
+
+ err = aq_pci_probe_get_hw_by_id(pdev, &self->aq_hw_ops,
+ &aq_nic_get_cfg(self)->aq_hw_caps);
+ if (err)
+ goto err_ioremap;
+
+ self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL);
+ if (!self->aq_hw) {
+ err = -ENOMEM;
+ goto err_ioremap;
+ }
+ self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self);
+ if (self->aq_hw->aq_nic_cfg->aq_hw_caps->priv_data_len) {
+ int len = self->aq_hw->aq_nic_cfg->aq_hw_caps->priv_data_len;
+
+ self->aq_hw->priv = kzalloc(len, GFP_KERNEL);
+ if (!self->aq_hw->priv) {
+ err = -ENOMEM;
+ goto err_free_aq_hw;
+ }
+ }
+
+ for (bar = 0; bar < 4; ++bar) {
+ if (IORESOURCE_MEM & pci_resource_flags(pdev, bar)) {
+ resource_size_t reg_sz;
+
+ mmio_pa = pci_resource_start(pdev, bar);
+ if (mmio_pa == 0U) {
+ err = -EIO;
+ goto err_free_aq_hw_priv;
+ }
+
+ reg_sz = pci_resource_len(pdev, bar);
+ if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
+ err = -EIO;
+ goto err_free_aq_hw_priv;
+ }
+
+ self->aq_hw->mmio = ioremap(mmio_pa, reg_sz);
+ if (!self->aq_hw->mmio) {
+ err = -EIO;
+ goto err_free_aq_hw_priv;
+ }
+ break;
+ }
+ }
+
+ if (bar == 4) {
+ err = -EIO;
+ goto err_free_aq_hw_priv;
+ }
+
+ numvecs = min((u8)AQ_CFG_VECS_DEF,
+ aq_nic_get_cfg(self)->aq_hw_caps->msix_irqs);
+ numvecs = min(numvecs, num_online_cpus());
+ /* Request IRQ vector for PTP */
+ numvecs += 1;
+
+ numvecs += AQ_HW_SERVICE_IRQS;
+ /*enable interrupts */
+#if !AQ_CFG_FORCE_LEGACY_INT
+ err = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
+ PCI_IRQ_MSIX | PCI_IRQ_MSI |
+ PCI_IRQ_LEGACY);
+
+ if (err < 0)
+ goto err_hwinit;
+ numvecs = err;
+#endif
+ self->irqvecs = numvecs;
+
+ /* net device init */
+ aq_nic_cfg_start(self);
+
+ aq_nic_ndev_init(self);
+
+ err = aq_nic_ndev_register(self);
+ if (err < 0)
+ goto err_register;
+
+ aq_drvinfo_init(ndev);
+
+ return 0;
+
+err_register:
+ aq_nic_free_vectors(self);
+ aq_pci_free_irq_vectors(self);
+err_hwinit:
+ iounmap(self->aq_hw->mmio);
+err_free_aq_hw_priv:
+ kfree(self->aq_hw->priv);
+err_free_aq_hw:
+ kfree(self->aq_hw);
+err_ioremap:
+ free_netdev(ndev);
+err_ndev:
+ pci_release_regions(pdev);
+err_pci_func:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void aq_pci_remove(struct pci_dev *pdev)
+{
+ struct aq_nic_s *self = pci_get_drvdata(pdev);
+
+ if (self->ndev) {
+ aq_clear_rxnfc_all_rules(self);
+ if (self->ndev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(self->ndev);
+
+#if IS_ENABLED(CONFIG_MACSEC)
+ aq_macsec_free(self);
+#endif
+ aq_nic_free_vectors(self);
+ aq_pci_free_irq_vectors(self);
+ iounmap(self->aq_hw->mmio);
+ kfree(self->aq_hw->priv);
+ kfree(self->aq_hw);
+ pci_release_regions(pdev);
+ free_netdev(self->ndev);
+ }
+
+ pci_disable_device(pdev);
+}
+
+static void aq_pci_shutdown(struct pci_dev *pdev)
+{
+ struct aq_nic_s *self = pci_get_drvdata(pdev);
+
+ aq_nic_shutdown(self);
+
+ pci_disable_device(pdev);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, false);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+static int aq_suspend_common(struct device *dev)
+{
+ struct aq_nic_s *nic = pci_get_drvdata(to_pci_dev(dev));
+
+ rtnl_lock();
+
+ nic->power_state = AQ_HW_POWER_STATE_D3;
+ netif_device_detach(nic->ndev);
+ netif_tx_stop_all_queues(nic->ndev);
+
+ if (netif_running(nic->ndev))
+ aq_nic_stop(nic);
+
+ aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
+ aq_nic_set_power(nic);
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int atl_resume_common(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct aq_nic_s *nic;
+ int ret = 0;
+
+ nic = pci_get_drvdata(pdev);
+
+ rtnl_lock();
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ if (netif_running(nic->ndev)) {
+ ret = aq_nic_init(nic);
+ if (ret)
+ goto err_exit;
+
+ ret = aq_nic_start(nic);
+ if (ret)
+ goto err_exit;
+ }
+
+ netif_device_attach(nic->ndev);
+ netif_tx_start_all_queues(nic->ndev);
+
+err_exit:
+ if (ret < 0)
+ aq_nic_deinit(nic, true);
+
+ rtnl_unlock();
+
+ return ret;
+}
+
+static int aq_pm_freeze(struct device *dev)
+{
+ return aq_suspend_common(dev);
+}
+
+static int aq_pm_suspend_poweroff(struct device *dev)
+{
+ return aq_suspend_common(dev);
+}
+
+static int aq_pm_thaw(struct device *dev)
+{
+ return atl_resume_common(dev);
+}
+
+static int aq_pm_resume_restore(struct device *dev)
+{
+ return atl_resume_common(dev);
+}
+
+static const struct dev_pm_ops aq_pm_ops = {
+ .suspend = aq_pm_suspend_poweroff,
+ .poweroff = aq_pm_suspend_poweroff,
+ .freeze = aq_pm_freeze,
+ .resume = aq_pm_resume_restore,
+ .restore = aq_pm_resume_restore,
+ .thaw = aq_pm_thaw,
+};
+
+static struct pci_driver aq_pci_ops = {
+ .name = AQ_CFG_DRV_NAME,
+ .id_table = aq_pci_tbl,
+ .probe = aq_pci_probe,
+ .remove = aq_pci_remove,
+ .shutdown = aq_pci_shutdown,
+#ifdef CONFIG_PM
+ .driver.pm = &aq_pm_ops,
+#endif
+};
+
+int aq_pci_func_register_driver(void)
+{
+ return pci_register_driver(&aq_pci_ops);
+}
+
+void aq_pci_func_unregister_driver(void)
+{
+ pci_unregister_driver(&aq_pci_ops);
+}
+
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
new file mode 100644
index 000000000..3fa5f7a73
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+/* File aq_pci_func.h: Declaration of PCI functions. */
+
+#ifndef AQ_PCI_FUNC_H
+#define AQ_PCI_FUNC_H
+
+#include "aq_common.h"
+#include "aq_nic.h"
+
+struct aq_board_revision_s {
+ unsigned short devid;
+ unsigned short revision;
+ const struct aq_hw_ops *ops;
+ const struct aq_hw_caps_s *caps;
+};
+
+int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
+ char *name, irq_handler_t irq_handler,
+ void *irq_arg, cpumask_t *affinity_mask);
+void aq_pci_func_free_irqs(struct aq_nic_s *self);
+unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self);
+
+int aq_pci_func_register_driver(void);
+void aq_pci_func_unregister_driver(void);
+
+#endif /* AQ_PCI_FUNC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_phy.c b/drivers/net/ethernet/aquantia/atlantic/aq_phy.c
new file mode 100644
index 000000000..949ac2351
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_phy.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2018-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+#include "aq_phy.h"
+
+#define HW_ATL_PTP_DISABLE_MSK BIT(10)
+
+bool aq_mdio_busy_wait(struct aq_hw_s *aq_hw)
+{
+ int err = 0;
+ u32 val;
+
+ err = readx_poll_timeout_atomic(hw_atl_mdio_busy_get, aq_hw,
+ val, val == 0U, 10U, 100000U);
+
+ if (err < 0)
+ return false;
+
+ return true;
+}
+
+u16 aq_mdio_read_word(struct aq_hw_s *aq_hw, u16 mmd, u16 addr)
+{
+ u16 phy_addr = aq_hw->phy_id << 5 | mmd;
+
+ /* Set Address register. */
+ hw_atl_glb_mdio_iface4_set(aq_hw, (addr & HW_ATL_MDIO_ADDRESS_MSK) <<
+ HW_ATL_MDIO_ADDRESS_SHIFT);
+ /* Send Address command. */
+ hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
+ (3 << HW_ATL_MDIO_OP_MODE_SHIFT) |
+ ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
+ HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
+
+ aq_mdio_busy_wait(aq_hw);
+
+ /* Send Read command. */
+ hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
+ (1 << HW_ATL_MDIO_OP_MODE_SHIFT) |
+ ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
+ HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
+ /* Read result. */
+ aq_mdio_busy_wait(aq_hw);
+
+ return (u16)hw_atl_glb_mdio_iface5_get(aq_hw);
+}
+
+void aq_mdio_write_word(struct aq_hw_s *aq_hw, u16 mmd, u16 addr, u16 data)
+{
+ u16 phy_addr = aq_hw->phy_id << 5 | mmd;
+
+ /* Set Address register. */
+ hw_atl_glb_mdio_iface4_set(aq_hw, (addr & HW_ATL_MDIO_ADDRESS_MSK) <<
+ HW_ATL_MDIO_ADDRESS_SHIFT);
+ /* Send Address command. */
+ hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
+ (3 << HW_ATL_MDIO_OP_MODE_SHIFT) |
+ ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
+ HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
+
+ aq_mdio_busy_wait(aq_hw);
+
+ hw_atl_glb_mdio_iface3_set(aq_hw, (data & HW_ATL_MDIO_WRITE_DATA_MSK) <<
+ HW_ATL_MDIO_WRITE_DATA_SHIFT);
+ /* Send Write command. */
+ hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
+ (2 << HW_ATL_MDIO_OP_MODE_SHIFT) |
+ ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
+ HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
+
+ aq_mdio_busy_wait(aq_hw);
+}
+
+u16 aq_phy_read_reg(struct aq_hw_s *aq_hw, u16 mmd, u16 address)
+{
+ int err = 0;
+ u32 val;
+
+ err = readx_poll_timeout_atomic(hw_atl_sem_mdio_get, aq_hw,
+ val, val == 1U, 10U, 100000U);
+
+ if (err < 0) {
+ err = 0xffff;
+ goto err_exit;
+ }
+
+ err = aq_mdio_read_word(aq_hw, mmd, address);
+
+ hw_atl_reg_glb_cpu_sem_set(aq_hw, 1U, HW_ATL_FW_SM_MDIO);
+
+err_exit:
+ return err;
+}
+
+void aq_phy_write_reg(struct aq_hw_s *aq_hw, u16 mmd, u16 address, u16 data)
+{
+ int err = 0;
+ u32 val;
+
+ err = readx_poll_timeout_atomic(hw_atl_sem_mdio_get, aq_hw,
+ val, val == 1U, 10U, 100000U);
+ if (err < 0)
+ return;
+
+ aq_mdio_write_word(aq_hw, mmd, address, data);
+ hw_atl_reg_glb_cpu_sem_set(aq_hw, 1U, HW_ATL_FW_SM_MDIO);
+}
+
+bool aq_phy_init_phy_id(struct aq_hw_s *aq_hw)
+{
+ u16 val;
+
+ for (aq_hw->phy_id = 0; aq_hw->phy_id < HW_ATL_PHY_ID_MAX;
+ ++aq_hw->phy_id) {
+ /* PMA Standard Device Identifier 2: Address 1.3 */
+ val = aq_phy_read_reg(aq_hw, MDIO_MMD_PMAPMD, 3);
+
+ if (val != 0xffff)
+ return true;
+ }
+
+ return false;
+}
+
+bool aq_phy_init(struct aq_hw_s *aq_hw)
+{
+ u32 dev_id;
+
+ if (aq_hw->phy_id == HW_ATL_PHY_ID_MAX)
+ if (!aq_phy_init_phy_id(aq_hw))
+ return false;
+
+ /* PMA Standard Device Identifier:
+ * Address 1.2 = MSW,
+ * Address 1.3 = LSW
+ */
+ dev_id = aq_phy_read_reg(aq_hw, MDIO_MMD_PMAPMD, 2);
+ dev_id <<= 16;
+ dev_id |= aq_phy_read_reg(aq_hw, MDIO_MMD_PMAPMD, 3);
+
+ if (dev_id == 0xffffffff) {
+ aq_hw->phy_id = HW_ATL_PHY_ID_MAX;
+ return false;
+ }
+
+ return true;
+}
+
+void aq_phy_disable_ptp(struct aq_hw_s *aq_hw)
+{
+ static const u16 ptp_registers[] = {
+ 0x031e,
+ 0x031d,
+ 0x031c,
+ 0x031b,
+ };
+ u16 val;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ptp_registers); i++) {
+ val = aq_phy_read_reg(aq_hw, MDIO_MMD_VEND1,
+ ptp_registers[i]);
+
+ aq_phy_write_reg(aq_hw, MDIO_MMD_VEND1,
+ ptp_registers[i],
+ val & ~HW_ATL_PTP_DISABLE_MSK);
+ }
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_phy.h b/drivers/net/ethernet/aquantia/atlantic/aq_phy.h
new file mode 100644
index 000000000..86cc1ee83
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_phy.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2018-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+#ifndef AQ_PHY_H
+#define AQ_PHY_H
+
+#include <linux/mdio.h>
+
+#include "hw_atl/hw_atl_llh.h"
+#include "hw_atl/hw_atl_llh_internal.h"
+#include "aq_hw_utils.h"
+#include "aq_hw.h"
+
+#define HW_ATL_PHY_ID_MAX 32U
+
+bool aq_mdio_busy_wait(struct aq_hw_s *aq_hw);
+
+u16 aq_mdio_read_word(struct aq_hw_s *aq_hw, u16 mmd, u16 addr);
+
+void aq_mdio_write_word(struct aq_hw_s *aq_hw, u16 mmd, u16 addr, u16 data);
+
+u16 aq_phy_read_reg(struct aq_hw_s *aq_hw, u16 mmd, u16 address);
+
+void aq_phy_write_reg(struct aq_hw_s *aq_hw, u16 mmd, u16 address, u16 data);
+
+bool aq_phy_init_phy_id(struct aq_hw_s *aq_hw);
+
+bool aq_phy_init(struct aq_hw_s *aq_hw);
+
+void aq_phy_disable_ptp(struct aq_hw_s *aq_hw);
+
+#endif /* AQ_PHY_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
new file mode 100644
index 000000000..28c9b6f1a
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
@@ -0,0 +1,1423 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+/* File aq_ptp.c:
+ * Definition of functions for Linux PTP support.
+ */
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/ptp_classify.h>
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+
+#include "aq_nic.h"
+#include "aq_ptp.h"
+#include "aq_ring.h"
+#include "aq_phy.h"
+#include "aq_filters.h"
+
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+
+#define AQ_PTP_TX_TIMEOUT (HZ * 10)
+
+#define POLL_SYNC_TIMER_MS 15
+
+enum ptp_speed_offsets {
+ ptp_offset_idx_10 = 0,
+ ptp_offset_idx_100,
+ ptp_offset_idx_1000,
+ ptp_offset_idx_2500,
+ ptp_offset_idx_5000,
+ ptp_offset_idx_10000,
+};
+
+struct ptp_skb_ring {
+ struct sk_buff **buff;
+ spinlock_t lock;
+ unsigned int size;
+ unsigned int head;
+ unsigned int tail;
+};
+
+struct ptp_tx_timeout {
+ spinlock_t lock;
+ bool active;
+ unsigned long tx_start;
+};
+
+struct aq_ptp_s {
+ struct aq_nic_s *aq_nic;
+ struct hwtstamp_config hwtstamp_config;
+ spinlock_t ptp_lock;
+ spinlock_t ptp_ring_lock;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_info;
+
+ atomic_t offset_egress;
+ atomic_t offset_ingress;
+
+ struct aq_ring_param_s ptp_ring_param;
+
+ struct ptp_tx_timeout ptp_tx_timeout;
+
+ unsigned int idx_vector;
+ struct napi_struct napi;
+
+ struct aq_ring_s ptp_tx;
+ struct aq_ring_s ptp_rx;
+ struct aq_ring_s hwts_rx;
+
+ struct ptp_skb_ring skb_ring;
+
+ struct aq_rx_filter_l3l4 udp_filter;
+ struct aq_rx_filter_l2 eth_type_filter;
+
+ struct delayed_work poll_sync;
+ u32 poll_timeout_ms;
+
+ bool extts_pin_enabled;
+ u64 last_sync1588_ts;
+
+ bool a1_ptp;
+};
+
+struct ptp_tm_offset {
+ unsigned int mbps;
+ int egress;
+ int ingress;
+};
+
+static struct ptp_tm_offset ptp_offset[6];
+
+void aq_ptp_tm_offset_set(struct aq_nic_s *aq_nic, unsigned int mbps)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ int i, egress, ingress;
+
+ if (!aq_ptp)
+ return;
+
+ egress = 0;
+ ingress = 0;
+
+ for (i = 0; i < ARRAY_SIZE(ptp_offset); i++) {
+ if (mbps == ptp_offset[i].mbps) {
+ egress = ptp_offset[i].egress;
+ ingress = ptp_offset[i].ingress;
+ break;
+ }
+ }
+
+ atomic_set(&aq_ptp->offset_egress, egress);
+ atomic_set(&aq_ptp->offset_ingress, ingress);
+}
+
+static int __aq_ptp_skb_put(struct ptp_skb_ring *ring, struct sk_buff *skb)
+{
+ unsigned int next_head = (ring->head + 1) % ring->size;
+
+ if (next_head == ring->tail)
+ return -ENOMEM;
+
+ ring->buff[ring->head] = skb_get(skb);
+ ring->head = next_head;
+
+ return 0;
+}
+
+static int aq_ptp_skb_put(struct ptp_skb_ring *ring, struct sk_buff *skb)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ ret = __aq_ptp_skb_put(ring, skb);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ return ret;
+}
+
+static struct sk_buff *__aq_ptp_skb_get(struct ptp_skb_ring *ring)
+{
+ struct sk_buff *skb;
+
+ if (ring->tail == ring->head)
+ return NULL;
+
+ skb = ring->buff[ring->tail];
+ ring->tail = (ring->tail + 1) % ring->size;
+
+ return skb;
+}
+
+static struct sk_buff *aq_ptp_skb_get(struct ptp_skb_ring *ring)
+{
+ unsigned long flags;
+ struct sk_buff *skb;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ skb = __aq_ptp_skb_get(ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ return skb;
+}
+
+static unsigned int aq_ptp_skb_buf_len(struct ptp_skb_ring *ring)
+{
+ unsigned long flags;
+ unsigned int len;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ len = (ring->head >= ring->tail) ?
+ ring->head - ring->tail :
+ ring->size - ring->tail + ring->head;
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ return len;
+}
+
+static int aq_ptp_skb_ring_init(struct ptp_skb_ring *ring, unsigned int size)
+{
+ struct sk_buff **buff = kmalloc(sizeof(*buff) * size, GFP_KERNEL);
+
+ if (!buff)
+ return -ENOMEM;
+
+ spin_lock_init(&ring->lock);
+
+ ring->buff = buff;
+ ring->size = size;
+ ring->head = 0;
+ ring->tail = 0;
+
+ return 0;
+}
+
+static void aq_ptp_skb_ring_clean(struct ptp_skb_ring *ring)
+{
+ struct sk_buff *skb;
+
+ while ((skb = aq_ptp_skb_get(ring)) != NULL)
+ dev_kfree_skb_any(skb);
+}
+
+static void aq_ptp_skb_ring_release(struct ptp_skb_ring *ring)
+{
+ if (ring->buff) {
+ aq_ptp_skb_ring_clean(ring);
+ kfree(ring->buff);
+ ring->buff = NULL;
+ }
+}
+
+static void aq_ptp_tx_timeout_init(struct ptp_tx_timeout *timeout)
+{
+ spin_lock_init(&timeout->lock);
+ timeout->active = false;
+}
+
+static void aq_ptp_tx_timeout_start(struct aq_ptp_s *aq_ptp)
+{
+ struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
+ unsigned long flags;
+
+ spin_lock_irqsave(&timeout->lock, flags);
+ timeout->active = true;
+ timeout->tx_start = jiffies;
+ spin_unlock_irqrestore(&timeout->lock, flags);
+}
+
+static void aq_ptp_tx_timeout_update(struct aq_ptp_s *aq_ptp)
+{
+ if (!aq_ptp_skb_buf_len(&aq_ptp->skb_ring)) {
+ struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
+ unsigned long flags;
+
+ spin_lock_irqsave(&timeout->lock, flags);
+ timeout->active = false;
+ spin_unlock_irqrestore(&timeout->lock, flags);
+ }
+}
+
+static void aq_ptp_tx_timeout_check(struct aq_ptp_s *aq_ptp)
+{
+ struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
+ unsigned long flags;
+ bool timeout_flag;
+
+ timeout_flag = false;
+
+ spin_lock_irqsave(&timeout->lock, flags);
+ if (timeout->active) {
+ timeout_flag = time_is_before_jiffies(timeout->tx_start +
+ AQ_PTP_TX_TIMEOUT);
+ /* reset active flag if timeout detected */
+ if (timeout_flag)
+ timeout->active = false;
+ }
+ spin_unlock_irqrestore(&timeout->lock, flags);
+
+ if (timeout_flag) {
+ aq_ptp_skb_ring_clean(&aq_ptp->skb_ring);
+ netdev_err(aq_ptp->aq_nic->ndev,
+ "PTP Timeout. Clearing Tx Timestamp SKBs\n");
+ }
+}
+
+/* aq_ptp_adjfine
+ * @ptp: the ptp clock structure
+ * @ppb: parts per billion adjustment from base
+ *
+ * adjust the frequency of the ptp cycle counter by the
+ * indicated ppb from the base frequency.
+ */
+static int aq_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+
+ mutex_lock(&aq_nic->fwreq_mutex);
+ aq_nic->aq_hw_ops->hw_adj_clock_freq(aq_nic->aq_hw,
+ scaled_ppm_to_ppb(scaled_ppm));
+ mutex_unlock(&aq_nic->fwreq_mutex);
+
+ return 0;
+}
+
+/* aq_ptp_adjtime
+ * @ptp: the ptp clock structure
+ * @delta: offset to adjust the cycle counter by
+ *
+ * adjust the timer by resetting the timecounter structure.
+ */
+static int aq_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ unsigned long flags;
+
+ spin_lock_irqsave(&aq_ptp->ptp_lock, flags);
+ aq_nic->aq_hw_ops->hw_adj_sys_clock(aq_nic->aq_hw, delta);
+ spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags);
+
+ return 0;
+}
+
+/* aq_ptp_gettime
+ * @ptp: the ptp clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * read the timecounter and return the correct value on ns,
+ * after converting it into a struct timespec.
+ */
+static int aq_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ unsigned long flags;
+ u64 ns;
+
+ spin_lock_irqsave(&aq_ptp->ptp_lock, flags);
+ aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &ns);
+ spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+/* aq_ptp_settime
+ * @ptp: the ptp clock structure
+ * @ts: the timespec containing the new time for the cycle counter
+ *
+ * reset the timecounter to use a new base value instead of the kernel
+ * wall timer value.
+ */
+static int aq_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ unsigned long flags;
+ u64 ns = timespec64_to_ns(ts);
+ u64 now;
+
+ spin_lock_irqsave(&aq_ptp->ptp_lock, flags);
+ aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &now);
+ aq_nic->aq_hw_ops->hw_adj_sys_clock(aq_nic->aq_hw, (s64)ns - (s64)now);
+
+ spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags);
+
+ return 0;
+}
+
+static void aq_ptp_convert_to_hwtstamp(struct aq_ptp_s *aq_ptp,
+ struct skb_shared_hwtstamps *hwtstamp,
+ u64 timestamp)
+{
+ memset(hwtstamp, 0, sizeof(*hwtstamp));
+ hwtstamp->hwtstamp = ns_to_ktime(timestamp);
+}
+
+static int aq_ptp_hw_pin_conf(struct aq_nic_s *aq_nic, u32 pin_index, u64 start,
+ u64 period)
+{
+ if (period)
+ netdev_dbg(aq_nic->ndev,
+ "Enable GPIO %d pulsing, start time %llu, period %u\n",
+ pin_index, start, (u32)period);
+ else
+ netdev_dbg(aq_nic->ndev,
+ "Disable GPIO %d pulsing, start time %llu, period %u\n",
+ pin_index, start, (u32)period);
+
+ /* Notify hardware of request to being sending pulses.
+ * If period is ZERO then pulsen is disabled.
+ */
+ mutex_lock(&aq_nic->fwreq_mutex);
+ aq_nic->aq_hw_ops->hw_gpio_pulse(aq_nic->aq_hw, pin_index,
+ start, (u32)period);
+ mutex_unlock(&aq_nic->fwreq_mutex);
+
+ return 0;
+}
+
+static int aq_ptp_perout_pin_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+ struct ptp_clock_time *t = &rq->perout.period;
+ struct ptp_clock_time *s = &rq->perout.start;
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ u64 start, period;
+ u32 pin_index = rq->perout.index;
+
+ /* verify the request channel is there */
+ if (pin_index >= ptp->n_per_out)
+ return -EINVAL;
+
+ /* we cannot support periods greater
+ * than 4 seconds due to reg limit
+ */
+ if (t->sec > 4 || t->sec < 0)
+ return -ERANGE;
+
+ /* convert to unsigned 64b ns,
+ * verify we can put it in a 32b register
+ */
+ period = on ? t->sec * NSEC_PER_SEC + t->nsec : 0;
+
+ /* verify the value is in range supported by hardware */
+ if (period > U32_MAX)
+ return -ERANGE;
+ /* convert to unsigned 64b ns */
+ /* TODO convert to AQ time */
+ start = on ? s->sec * NSEC_PER_SEC + s->nsec : 0;
+
+ aq_ptp_hw_pin_conf(aq_nic, pin_index, start, period);
+
+ return 0;
+}
+
+static int aq_ptp_pps_pin_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ u64 start, period;
+ u32 pin_index = 0;
+ u32 rest = 0;
+
+ /* verify the request channel is there */
+ if (pin_index >= ptp->n_per_out)
+ return -EINVAL;
+
+ aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &start);
+ div_u64_rem(start, NSEC_PER_SEC, &rest);
+ period = on ? NSEC_PER_SEC : 0; /* PPS - pulse per second */
+ start = on ? start - rest + NSEC_PER_SEC *
+ (rest > 990000000LL ? 2 : 1) : 0;
+
+ aq_ptp_hw_pin_conf(aq_nic, pin_index, start, period);
+
+ return 0;
+}
+
+static void aq_ptp_extts_pin_ctrl(struct aq_ptp_s *aq_ptp)
+{
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ u32 enable = aq_ptp->extts_pin_enabled;
+
+ if (aq_nic->aq_hw_ops->hw_extts_gpio_enable)
+ aq_nic->aq_hw_ops->hw_extts_gpio_enable(aq_nic->aq_hw, 0,
+ enable);
+}
+
+static int aq_ptp_extts_pin_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+
+ u32 pin_index = rq->extts.index;
+
+ if (pin_index >= ptp->n_ext_ts)
+ return -EINVAL;
+
+ aq_ptp->extts_pin_enabled = !!on;
+ if (on) {
+ aq_ptp->poll_timeout_ms = POLL_SYNC_TIMER_MS;
+ cancel_delayed_work_sync(&aq_ptp->poll_sync);
+ schedule_delayed_work(&aq_ptp->poll_sync,
+ msecs_to_jiffies(aq_ptp->poll_timeout_ms));
+ }
+
+ aq_ptp_extts_pin_ctrl(aq_ptp);
+ return 0;
+}
+
+/* aq_ptp_gpio_feature_enable
+ * @ptp: the ptp clock structure
+ * @rq: the requested feature to change
+ * @on: whether to enable or disable the feature
+ */
+static int aq_ptp_gpio_feature_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return aq_ptp_extts_pin_configure(ptp, rq, on);
+ case PTP_CLK_REQ_PEROUT:
+ return aq_ptp_perout_pin_configure(ptp, rq, on);
+ case PTP_CLK_REQ_PPS:
+ return aq_ptp_pps_pin_configure(ptp, rq, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/* aq_ptp_verify
+ * @ptp: the ptp clock structure
+ * @pin: index of the pin in question
+ * @func: the desired function to use
+ * @chan: the function channel index to use
+ */
+static int aq_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ /* verify the requested pin is there */
+ if (!ptp->pin_config || pin >= ptp->n_pins)
+ return -EINVAL;
+
+ /* enforce locked channels, no changing them */
+ if (chan != ptp->pin_config[pin].chan)
+ return -EINVAL;
+
+ /* we want to keep the functions locked as well */
+ if (func != ptp->pin_config[pin].func)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* aq_ptp_tx_hwtstamp - utility function which checks for TX time stamp
+ * @adapter: the private adapter struct
+ *
+ * if the timestamp is valid, we convert it into the timecounter ns
+ * value, then store that result into the hwtstamps structure which
+ * is passed up the network stack
+ */
+void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ struct sk_buff *skb = aq_ptp_skb_get(&aq_ptp->skb_ring);
+ struct skb_shared_hwtstamps hwtstamp;
+
+ if (!skb) {
+ netdev_err(aq_nic->ndev, "have timestamp but tx_queues empty\n");
+ return;
+ }
+
+ timestamp += atomic_read(&aq_ptp->offset_egress);
+ aq_ptp_convert_to_hwtstamp(aq_ptp, &hwtstamp, timestamp);
+ skb_tstamp_tx(skb, &hwtstamp);
+ dev_kfree_skb_any(skb);
+
+ aq_ptp_tx_timeout_update(aq_ptp);
+}
+
+/* aq_ptp_rx_hwtstamp - utility function which checks for RX time stamp
+ * @adapter: pointer to adapter struct
+ * @shhwtstamps: particular skb_shared_hwtstamps to save timestamp
+ *
+ * if the timestamp is valid, we convert it into the timecounter ns
+ * value, then store that result into the hwtstamps structure which
+ * is passed up the network stack
+ */
+static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct skb_shared_hwtstamps *shhwtstamps,
+ u64 timestamp)
+{
+ timestamp -= atomic_read(&aq_ptp->offset_ingress);
+ aq_ptp_convert_to_hwtstamp(aq_ptp, shhwtstamps, timestamp);
+}
+
+void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
+ struct hwtstamp_config *config)
+{
+ *config = aq_ptp->hwtstamp_config;
+}
+
+static void aq_ptp_prepare_filters(struct aq_ptp_s *aq_ptp)
+{
+ aq_ptp->udp_filter.cmd = HW_ATL_RX_ENABLE_FLTR_L3L4 |
+ HW_ATL_RX_ENABLE_CMP_PROT_L4 |
+ HW_ATL_RX_UDP |
+ HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 |
+ HW_ATL_RX_HOST << HW_ATL_RX_ACTION_FL3F4_SHIFT |
+ HW_ATL_RX_ENABLE_QUEUE_L3L4 |
+ aq_ptp->ptp_rx.idx << HW_ATL_RX_QUEUE_FL3L4_SHIFT;
+ aq_ptp->udp_filter.p_dst = PTP_EV_PORT;
+
+ aq_ptp->eth_type_filter.ethertype = ETH_P_1588;
+ aq_ptp->eth_type_filter.queue = aq_ptp->ptp_rx.idx;
+}
+
+int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
+ struct hwtstamp_config *config)
+{
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ const struct aq_hw_ops *hw_ops;
+ int err = 0;
+
+ hw_ops = aq_nic->aq_hw_ops;
+ if (config->tx_type == HWTSTAMP_TX_ON ||
+ config->rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT) {
+ aq_ptp_prepare_filters(aq_ptp);
+ if (hw_ops->hw_filter_l3l4_set) {
+ err = hw_ops->hw_filter_l3l4_set(aq_nic->aq_hw,
+ &aq_ptp->udp_filter);
+ }
+ if (!err && hw_ops->hw_filter_l2_set) {
+ err = hw_ops->hw_filter_l2_set(aq_nic->aq_hw,
+ &aq_ptp->eth_type_filter);
+ }
+ aq_utils_obj_set(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP);
+ } else {
+ aq_ptp->udp_filter.cmd &= ~HW_ATL_RX_ENABLE_FLTR_L3L4;
+ if (hw_ops->hw_filter_l3l4_set) {
+ err = hw_ops->hw_filter_l3l4_set(aq_nic->aq_hw,
+ &aq_ptp->udp_filter);
+ }
+ if (!err && hw_ops->hw_filter_l2_clear) {
+ err = hw_ops->hw_filter_l2_clear(aq_nic->aq_hw,
+ &aq_ptp->eth_type_filter);
+ }
+ aq_utils_obj_clear(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP);
+ }
+
+ if (err)
+ return -EREMOTEIO;
+
+ aq_ptp->hwtstamp_config = *config;
+
+ return 0;
+}
+
+bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return false;
+
+ return &aq_ptp->ptp_tx == ring ||
+ &aq_ptp->ptp_rx == ring || &aq_ptp->hwts_rx == ring;
+}
+
+u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct skb_shared_hwtstamps *shhwtstamps, u8 *p,
+ unsigned int len)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ u64 timestamp = 0;
+ u16 ret = aq_nic->aq_hw_ops->rx_extract_ts(aq_nic->aq_hw,
+ p, len, &timestamp);
+
+ if (ret > 0)
+ aq_ptp_rx_hwtstamp(aq_ptp, shhwtstamps, timestamp);
+
+ return ret;
+}
+
+static int aq_ptp_poll(struct napi_struct *napi, int budget)
+{
+ struct aq_ptp_s *aq_ptp = container_of(napi, struct aq_ptp_s, napi);
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ bool was_cleaned = false;
+ int work_done = 0;
+ int err;
+
+ /* Processing PTP TX traffic */
+ err = aq_nic->aq_hw_ops->hw_ring_tx_head_update(aq_nic->aq_hw,
+ &aq_ptp->ptp_tx);
+ if (err < 0)
+ goto err_exit;
+
+ if (aq_ptp->ptp_tx.sw_head != aq_ptp->ptp_tx.hw_head) {
+ aq_ring_tx_clean(&aq_ptp->ptp_tx);
+
+ was_cleaned = true;
+ }
+
+ /* Processing HW_TIMESTAMP RX traffic */
+ err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_receive(aq_nic->aq_hw,
+ &aq_ptp->hwts_rx);
+ if (err < 0)
+ goto err_exit;
+
+ if (aq_ptp->hwts_rx.sw_head != aq_ptp->hwts_rx.hw_head) {
+ aq_ring_hwts_rx_clean(&aq_ptp->hwts_rx, aq_nic);
+
+ err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_fill(aq_nic->aq_hw,
+ &aq_ptp->hwts_rx);
+ if (err < 0)
+ goto err_exit;
+
+ was_cleaned = true;
+ }
+
+ /* Processing PTP RX traffic */
+ err = aq_nic->aq_hw_ops->hw_ring_rx_receive(aq_nic->aq_hw,
+ &aq_ptp->ptp_rx);
+ if (err < 0)
+ goto err_exit;
+
+ if (aq_ptp->ptp_rx.sw_head != aq_ptp->ptp_rx.hw_head) {
+ unsigned int sw_tail_old;
+
+ err = aq_ring_rx_clean(&aq_ptp->ptp_rx, napi, &work_done, budget);
+ if (err < 0)
+ goto err_exit;
+
+ sw_tail_old = aq_ptp->ptp_rx.sw_tail;
+ err = aq_ring_rx_fill(&aq_ptp->ptp_rx);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_nic->aq_hw_ops->hw_ring_rx_fill(aq_nic->aq_hw,
+ &aq_ptp->ptp_rx,
+ sw_tail_old);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ if (was_cleaned)
+ work_done = budget;
+
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ aq_nic->aq_hw_ops->hw_irq_enable(aq_nic->aq_hw,
+ BIT_ULL(aq_ptp->ptp_ring_param.vec_idx));
+ }
+
+err_exit:
+ return work_done;
+}
+
+static irqreturn_t aq_ptp_isr(int irq, void *private)
+{
+ struct aq_ptp_s *aq_ptp = private;
+ int err = 0;
+
+ if (!aq_ptp) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+ napi_schedule(&aq_ptp->napi);
+
+err_exit:
+ return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
+}
+
+int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ struct aq_ring_s *ring = &aq_ptp->ptp_tx;
+ unsigned long irq_flags;
+ int err = NETDEV_TX_OK;
+ unsigned int frags;
+
+ if (skb->len <= 0) {
+ dev_kfree_skb_any(skb);
+ goto err_exit;
+ }
+
+ frags = skb_shinfo(skb)->nr_frags + 1;
+ /* Frags cannot be bigger 16KB
+ * because PTP usually works
+ * without Jumbo even in a background
+ */
+ if (frags > AQ_CFG_SKB_FRAGS_MAX || frags > aq_ring_avail_dx(ring)) {
+ /* Drop packet because it doesn't make sence to delay it */
+ dev_kfree_skb_any(skb);
+ goto err_exit;
+ }
+
+ err = aq_ptp_skb_put(&aq_ptp->skb_ring, skb);
+ if (err) {
+ netdev_err(aq_nic->ndev, "SKB Ring is overflow (%u)!\n",
+ ring->size);
+ return NETDEV_TX_BUSY;
+ }
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ aq_ptp_tx_timeout_start(aq_ptp);
+ skb_tx_timestamp(skb);
+
+ spin_lock_irqsave(&aq_nic->aq_ptp->ptp_ring_lock, irq_flags);
+ frags = aq_nic_map_skb(aq_nic, skb, ring);
+
+ if (likely(frags)) {
+ err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw,
+ ring, frags);
+ if (err >= 0) {
+ u64_stats_update_begin(&ring->stats.tx.syncp);
+ ++ring->stats.tx.packets;
+ ring->stats.tx.bytes += skb->len;
+ u64_stats_update_end(&ring->stats.tx.syncp);
+ }
+ } else {
+ err = NETDEV_TX_BUSY;
+ }
+ spin_unlock_irqrestore(&aq_nic->aq_ptp->ptp_ring_lock, irq_flags);
+
+err_exit:
+ return err;
+}
+
+void aq_ptp_service_task(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return;
+
+ aq_ptp_tx_timeout_check(aq_ptp);
+}
+
+int aq_ptp_irq_alloc(struct aq_nic_s *aq_nic)
+{
+ struct pci_dev *pdev = aq_nic->pdev;
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ int err = 0;
+
+ if (!aq_ptp)
+ return 0;
+
+ if (pdev->msix_enabled || pdev->msi_enabled) {
+ err = request_irq(pci_irq_vector(pdev, aq_ptp->idx_vector),
+ aq_ptp_isr, 0, aq_nic->ndev->name, aq_ptp);
+ } else {
+ err = -EINVAL;
+ goto err_exit;
+ }
+
+err_exit:
+ return err;
+}
+
+void aq_ptp_irq_free(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ struct pci_dev *pdev = aq_nic->pdev;
+
+ if (!aq_ptp)
+ return;
+
+ free_irq(pci_irq_vector(pdev, aq_ptp->idx_vector), aq_ptp);
+}
+
+int aq_ptp_ring_init(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ int err = 0;
+
+ if (!aq_ptp)
+ return 0;
+
+ err = aq_ring_init(&aq_ptp->ptp_tx, ATL_RING_TX);
+ if (err < 0)
+ goto err_exit;
+ err = aq_nic->aq_hw_ops->hw_ring_tx_init(aq_nic->aq_hw,
+ &aq_ptp->ptp_tx,
+ &aq_ptp->ptp_ring_param);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_ring_init(&aq_ptp->ptp_rx, ATL_RING_RX);
+ if (err < 0)
+ goto err_exit;
+ err = aq_nic->aq_hw_ops->hw_ring_rx_init(aq_nic->aq_hw,
+ &aq_ptp->ptp_rx,
+ &aq_ptp->ptp_ring_param);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_ring_rx_fill(&aq_ptp->ptp_rx);
+ if (err < 0)
+ goto err_rx_free;
+ err = aq_nic->aq_hw_ops->hw_ring_rx_fill(aq_nic->aq_hw,
+ &aq_ptp->ptp_rx,
+ 0U);
+ if (err < 0)
+ goto err_rx_free;
+
+ err = aq_ring_init(&aq_ptp->hwts_rx, ATL_RING_RX);
+ if (err < 0)
+ goto err_rx_free;
+ err = aq_nic->aq_hw_ops->hw_ring_rx_init(aq_nic->aq_hw,
+ &aq_ptp->hwts_rx,
+ &aq_ptp->ptp_ring_param);
+ if (err < 0)
+ goto err_exit;
+ err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_fill(aq_nic->aq_hw,
+ &aq_ptp->hwts_rx);
+ if (err < 0)
+ goto err_exit;
+
+ return err;
+
+err_rx_free:
+ aq_ring_rx_deinit(&aq_ptp->ptp_rx);
+err_exit:
+ return err;
+}
+
+int aq_ptp_ring_start(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ int err = 0;
+
+ if (!aq_ptp)
+ return 0;
+
+ err = aq_nic->aq_hw_ops->hw_ring_tx_start(aq_nic->aq_hw, &aq_ptp->ptp_tx);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_nic->aq_hw_ops->hw_ring_rx_start(aq_nic->aq_hw, &aq_ptp->ptp_rx);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_nic->aq_hw_ops->hw_ring_rx_start(aq_nic->aq_hw,
+ &aq_ptp->hwts_rx);
+ if (err < 0)
+ goto err_exit;
+
+ napi_enable(&aq_ptp->napi);
+
+err_exit:
+ return err;
+}
+
+void aq_ptp_ring_stop(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return;
+
+ aq_nic->aq_hw_ops->hw_ring_tx_stop(aq_nic->aq_hw, &aq_ptp->ptp_tx);
+ aq_nic->aq_hw_ops->hw_ring_rx_stop(aq_nic->aq_hw, &aq_ptp->ptp_rx);
+
+ aq_nic->aq_hw_ops->hw_ring_rx_stop(aq_nic->aq_hw, &aq_ptp->hwts_rx);
+
+ napi_disable(&aq_ptp->napi);
+}
+
+void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp || !aq_ptp->ptp_tx.aq_nic || !aq_ptp->ptp_rx.aq_nic)
+ return;
+
+ aq_ring_tx_clean(&aq_ptp->ptp_tx);
+ aq_ring_rx_deinit(&aq_ptp->ptp_rx);
+}
+
+int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ unsigned int tx_ring_idx, rx_ring_idx;
+ struct aq_ring_s *hwts;
+ struct aq_ring_s *ring;
+ int err;
+
+ if (!aq_ptp)
+ return 0;
+
+ tx_ring_idx = aq_ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
+
+ ring = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic,
+ tx_ring_idx, &aq_nic->aq_nic_cfg);
+ if (!ring) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ rx_ring_idx = aq_ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
+
+ ring = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic,
+ rx_ring_idx, &aq_nic->aq_nic_cfg);
+ if (!ring) {
+ err = -ENOMEM;
+ goto err_exit_ptp_tx;
+ }
+
+ hwts = aq_ring_hwts_rx_alloc(&aq_ptp->hwts_rx, aq_nic, PTP_HWST_RING_IDX,
+ aq_nic->aq_nic_cfg.rxds,
+ aq_nic->aq_nic_cfg.aq_hw_caps->rxd_size);
+ if (!hwts) {
+ err = -ENOMEM;
+ goto err_exit_ptp_rx;
+ }
+
+ err = aq_ptp_skb_ring_init(&aq_ptp->skb_ring, aq_nic->aq_nic_cfg.rxds);
+ if (err != 0) {
+ err = -ENOMEM;
+ goto err_exit_hwts_rx;
+ }
+
+ aq_ptp->ptp_ring_param.vec_idx = aq_ptp->idx_vector;
+ aq_ptp->ptp_ring_param.cpu = aq_ptp->ptp_ring_param.vec_idx +
+ aq_nic_get_cfg(aq_nic)->aq_rss.base_cpu_number;
+ cpumask_set_cpu(aq_ptp->ptp_ring_param.cpu,
+ &aq_ptp->ptp_ring_param.affinity_mask);
+
+ return 0;
+
+err_exit_hwts_rx:
+ aq_ring_free(&aq_ptp->hwts_rx);
+err_exit_ptp_rx:
+ aq_ring_free(&aq_ptp->ptp_rx);
+err_exit_ptp_tx:
+ aq_ring_free(&aq_ptp->ptp_tx);
+err_exit:
+ return err;
+}
+
+void aq_ptp_ring_free(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return;
+
+ aq_ring_free(&aq_ptp->ptp_tx);
+ aq_ring_free(&aq_ptp->ptp_rx);
+ aq_ring_free(&aq_ptp->hwts_rx);
+
+ aq_ptp_skb_ring_release(&aq_ptp->skb_ring);
+}
+
+#define MAX_PTP_GPIO_COUNT 4
+
+static struct ptp_clock_info aq_ptp_clock = {
+ .owner = THIS_MODULE,
+ .name = "atlantic ptp",
+ .max_adj = 999999999,
+ .n_ext_ts = 0,
+ .pps = 0,
+ .adjfine = aq_ptp_adjfine,
+ .adjtime = aq_ptp_adjtime,
+ .gettime64 = aq_ptp_gettime,
+ .settime64 = aq_ptp_settime,
+ .n_per_out = 0,
+ .enable = aq_ptp_gpio_feature_enable,
+ .n_pins = 0,
+ .verify = aq_ptp_verify,
+ .pin_config = NULL,
+};
+
+#define ptp_offset_init(__idx, __mbps, __egress, __ingress) do { \
+ ptp_offset[__idx].mbps = (__mbps); \
+ ptp_offset[__idx].egress = (__egress); \
+ ptp_offset[__idx].ingress = (__ingress); } \
+ while (0)
+
+static void aq_ptp_offset_init_from_fw(const struct hw_atl_ptp_offset *offsets)
+{
+ int i;
+
+ /* Load offsets for PTP */
+ for (i = 0; i < ARRAY_SIZE(ptp_offset); i++) {
+ switch (i) {
+ /* 100M */
+ case ptp_offset_idx_100:
+ ptp_offset_init(i, 100,
+ offsets->egress_100,
+ offsets->ingress_100);
+ break;
+ /* 1G */
+ case ptp_offset_idx_1000:
+ ptp_offset_init(i, 1000,
+ offsets->egress_1000,
+ offsets->ingress_1000);
+ break;
+ /* 2.5G */
+ case ptp_offset_idx_2500:
+ ptp_offset_init(i, 2500,
+ offsets->egress_2500,
+ offsets->ingress_2500);
+ break;
+ /* 5G */
+ case ptp_offset_idx_5000:
+ ptp_offset_init(i, 5000,
+ offsets->egress_5000,
+ offsets->ingress_5000);
+ break;
+ /* 10G */
+ case ptp_offset_idx_10000:
+ ptp_offset_init(i, 10000,
+ offsets->egress_10000,
+ offsets->ingress_10000);
+ break;
+ }
+ }
+}
+
+static void aq_ptp_offset_init(const struct hw_atl_ptp_offset *offsets)
+{
+ memset(ptp_offset, 0, sizeof(ptp_offset));
+
+ aq_ptp_offset_init_from_fw(offsets);
+}
+
+static void aq_ptp_gpio_init(struct ptp_clock_info *info,
+ struct hw_atl_info *hw_info)
+{
+ struct ptp_pin_desc pin_desc[MAX_PTP_GPIO_COUNT];
+ u32 extts_pin_cnt = 0;
+ u32 out_pin_cnt = 0;
+ u32 i;
+
+ memset(pin_desc, 0, sizeof(pin_desc));
+
+ for (i = 0; i < MAX_PTP_GPIO_COUNT - 1; i++) {
+ if (hw_info->gpio_pin[i] ==
+ (GPIO_PIN_FUNCTION_PTP0 + out_pin_cnt)) {
+ snprintf(pin_desc[out_pin_cnt].name,
+ sizeof(pin_desc[out_pin_cnt].name),
+ "AQ_GPIO%d", i);
+ pin_desc[out_pin_cnt].index = out_pin_cnt;
+ pin_desc[out_pin_cnt].chan = out_pin_cnt;
+ pin_desc[out_pin_cnt++].func = PTP_PF_PEROUT;
+ }
+ }
+
+ info->n_per_out = out_pin_cnt;
+
+ if (hw_info->caps_ex & BIT(CAPS_EX_PHY_CTRL_TS_PIN)) {
+ extts_pin_cnt += 1;
+
+ snprintf(pin_desc[out_pin_cnt].name,
+ sizeof(pin_desc[out_pin_cnt].name),
+ "AQ_GPIO%d", out_pin_cnt);
+ pin_desc[out_pin_cnt].index = out_pin_cnt;
+ pin_desc[out_pin_cnt].chan = 0;
+ pin_desc[out_pin_cnt].func = PTP_PF_EXTTS;
+ }
+
+ info->n_pins = out_pin_cnt + extts_pin_cnt;
+ info->n_ext_ts = extts_pin_cnt;
+
+ if (!info->n_pins)
+ return;
+
+ info->pin_config = kcalloc(info->n_pins, sizeof(struct ptp_pin_desc),
+ GFP_KERNEL);
+
+ if (!info->pin_config)
+ return;
+
+ memcpy(info->pin_config, &pin_desc,
+ sizeof(struct ptp_pin_desc) * info->n_pins);
+}
+
+void aq_ptp_clock_init(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ struct timespec64 ts;
+
+ ktime_get_real_ts64(&ts);
+ aq_ptp_settime(&aq_ptp->ptp_info, &ts);
+}
+
+static void aq_ptp_poll_sync_work_cb(struct work_struct *w);
+
+int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
+{
+ bool a1_ptp = ATL_HW_IS_CHIP_FEATURE(aq_nic->aq_hw, ATLANTIC);
+ struct hw_atl_utils_mbox mbox;
+ struct ptp_clock *clock;
+ struct aq_ptp_s *aq_ptp;
+ int err = 0;
+
+ if (!a1_ptp) {
+ aq_nic->aq_ptp = NULL;
+ return 0;
+ }
+
+ if (!aq_nic->aq_hw_ops->hw_get_ptp_ts) {
+ aq_nic->aq_ptp = NULL;
+ return 0;
+ }
+
+ if (!aq_nic->aq_fw_ops->enable_ptp) {
+ aq_nic->aq_ptp = NULL;
+ return 0;
+ }
+
+ hw_atl_utils_mpi_read_stats(aq_nic->aq_hw, &mbox);
+
+ if (!(mbox.info.caps_ex & BIT(CAPS_EX_PHY_PTP_EN))) {
+ aq_nic->aq_ptp = NULL;
+ return 0;
+ }
+
+ aq_ptp_offset_init(&mbox.info.ptp_offset);
+
+ aq_ptp = kzalloc(sizeof(*aq_ptp), GFP_KERNEL);
+ if (!aq_ptp) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ aq_ptp->aq_nic = aq_nic;
+ aq_ptp->a1_ptp = a1_ptp;
+
+ spin_lock_init(&aq_ptp->ptp_lock);
+ spin_lock_init(&aq_ptp->ptp_ring_lock);
+
+ aq_ptp->ptp_info = aq_ptp_clock;
+ aq_ptp_gpio_init(&aq_ptp->ptp_info, &mbox.info);
+ clock = ptp_clock_register(&aq_ptp->ptp_info, &aq_nic->ndev->dev);
+ if (IS_ERR(clock)) {
+ netdev_err(aq_nic->ndev, "ptp_clock_register failed\n");
+ err = PTR_ERR(clock);
+ goto err_exit;
+ }
+ aq_ptp->ptp_clock = clock;
+ aq_ptp_tx_timeout_init(&aq_ptp->ptp_tx_timeout);
+
+ atomic_set(&aq_ptp->offset_egress, 0);
+ atomic_set(&aq_ptp->offset_ingress, 0);
+
+ netif_napi_add(aq_nic_get_ndev(aq_nic), &aq_ptp->napi, aq_ptp_poll);
+
+ aq_ptp->idx_vector = idx_vec;
+
+ aq_nic->aq_ptp = aq_ptp;
+
+ /* enable ptp counter */
+ aq_utils_obj_set(&aq_nic->aq_hw->flags, AQ_HW_PTP_AVAILABLE);
+ mutex_lock(&aq_nic->fwreq_mutex);
+ aq_nic->aq_fw_ops->enable_ptp(aq_nic->aq_hw, 1);
+ aq_ptp_clock_init(aq_nic);
+ mutex_unlock(&aq_nic->fwreq_mutex);
+
+ INIT_DELAYED_WORK(&aq_ptp->poll_sync, &aq_ptp_poll_sync_work_cb);
+ aq_ptp->eth_type_filter.location =
+ aq_nic_reserve_filter(aq_nic, aq_rx_filter_ethertype);
+ aq_ptp->udp_filter.location =
+ aq_nic_reserve_filter(aq_nic, aq_rx_filter_l3l4);
+
+ return 0;
+
+err_exit:
+ if (aq_ptp)
+ kfree(aq_ptp->ptp_info.pin_config);
+ kfree(aq_ptp);
+ aq_nic->aq_ptp = NULL;
+ return err;
+}
+
+void aq_ptp_unregister(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return;
+
+ ptp_clock_unregister(aq_ptp->ptp_clock);
+}
+
+void aq_ptp_free(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return;
+
+ aq_nic_release_filter(aq_nic, aq_rx_filter_ethertype,
+ aq_ptp->eth_type_filter.location);
+ aq_nic_release_filter(aq_nic, aq_rx_filter_l3l4,
+ aq_ptp->udp_filter.location);
+ cancel_delayed_work_sync(&aq_ptp->poll_sync);
+ /* disable ptp */
+ mutex_lock(&aq_nic->fwreq_mutex);
+ aq_nic->aq_fw_ops->enable_ptp(aq_nic->aq_hw, 0);
+ mutex_unlock(&aq_nic->fwreq_mutex);
+
+ kfree(aq_ptp->ptp_info.pin_config);
+
+ netif_napi_del(&aq_ptp->napi);
+ kfree(aq_ptp);
+ aq_nic->aq_ptp = NULL;
+}
+
+struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp)
+{
+ return aq_ptp->ptp_clock;
+}
+
+/* PTP external GPIO nanoseconds count */
+static uint64_t aq_ptp_get_sync1588_ts(struct aq_nic_s *aq_nic)
+{
+ u64 ts = 0;
+
+ if (aq_nic->aq_hw_ops->hw_get_sync_ts)
+ aq_nic->aq_hw_ops->hw_get_sync_ts(aq_nic->aq_hw, &ts);
+
+ return ts;
+}
+
+static void aq_ptp_start_work(struct aq_ptp_s *aq_ptp)
+{
+ if (aq_ptp->extts_pin_enabled) {
+ aq_ptp->poll_timeout_ms = POLL_SYNC_TIMER_MS;
+ aq_ptp->last_sync1588_ts =
+ aq_ptp_get_sync1588_ts(aq_ptp->aq_nic);
+ schedule_delayed_work(&aq_ptp->poll_sync,
+ msecs_to_jiffies(aq_ptp->poll_timeout_ms));
+ }
+}
+
+int aq_ptp_link_change(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return 0;
+
+ if (aq_nic->aq_hw->aq_link_status.mbps)
+ aq_ptp_start_work(aq_ptp);
+ else
+ cancel_delayed_work_sync(&aq_ptp->poll_sync);
+
+ return 0;
+}
+
+static bool aq_ptp_sync_ts_updated(struct aq_ptp_s *aq_ptp, u64 *new_ts)
+{
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ u64 sync_ts2;
+ u64 sync_ts;
+
+ sync_ts = aq_ptp_get_sync1588_ts(aq_nic);
+
+ if (sync_ts != aq_ptp->last_sync1588_ts) {
+ sync_ts2 = aq_ptp_get_sync1588_ts(aq_nic);
+ if (sync_ts != sync_ts2) {
+ sync_ts = sync_ts2;
+ sync_ts2 = aq_ptp_get_sync1588_ts(aq_nic);
+ if (sync_ts != sync_ts2) {
+ netdev_err(aq_nic->ndev,
+ "%s: Unable to get correct GPIO TS",
+ __func__);
+ sync_ts = 0;
+ }
+ }
+
+ *new_ts = sync_ts;
+ return true;
+ }
+ return false;
+}
+
+static int aq_ptp_check_sync1588(struct aq_ptp_s *aq_ptp)
+{
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ u64 sync_ts;
+
+ /* Sync1588 pin was triggered */
+ if (aq_ptp_sync_ts_updated(aq_ptp, &sync_ts)) {
+ if (aq_ptp->extts_pin_enabled) {
+ struct ptp_clock_event ptp_event;
+ u64 time = 0;
+
+ aq_nic->aq_hw_ops->hw_ts_to_sys_clock(aq_nic->aq_hw,
+ sync_ts, &time);
+ ptp_event.index = aq_ptp->ptp_info.n_pins - 1;
+ ptp_event.timestamp = time;
+
+ ptp_event.type = PTP_CLOCK_EXTTS;
+ ptp_clock_event(aq_ptp->ptp_clock, &ptp_event);
+ }
+
+ aq_ptp->last_sync1588_ts = sync_ts;
+ }
+
+ return 0;
+}
+
+static void aq_ptp_poll_sync_work_cb(struct work_struct *w)
+{
+ struct delayed_work *dw = to_delayed_work(w);
+ struct aq_ptp_s *aq_ptp = container_of(dw, struct aq_ptp_s, poll_sync);
+
+ aq_ptp_check_sync1588(aq_ptp);
+
+ if (aq_ptp->extts_pin_enabled) {
+ unsigned long timeout = msecs_to_jiffies(aq_ptp->poll_timeout_ms);
+
+ schedule_delayed_work(&aq_ptp->poll_sync, timeout);
+ }
+}
+
+int aq_ptp_get_ring_cnt(struct aq_nic_s *aq_nic, const enum atl_ring_type ring_type)
+{
+ if (!aq_nic->aq_ptp)
+ return 0;
+
+ /* Additional RX ring is allocated for PTP HWTS on A1 */
+ return (aq_nic->aq_ptp->a1_ptp && ring_type == ATL_RING_RX) ? 2 : 1;
+}
+
+u64 *aq_ptp_get_stats(struct aq_nic_s *aq_nic, u64 *data)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ unsigned int count = 0U;
+
+ if (!aq_ptp)
+ return data;
+
+ count = aq_ring_fill_stats_data(&aq_ptp->ptp_rx, data);
+ data += count;
+ count = aq_ring_fill_stats_data(&aq_ptp->ptp_tx, data);
+ data += count;
+
+ if (aq_ptp->a1_ptp) {
+ /* Only Receive ring for HWTS */
+ count = aq_ring_fill_stats_data(&aq_ptp->hwts_rx, data);
+ data += count;
+ }
+
+ return data;
+}
+
+#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
new file mode 100644
index 000000000..210b723f2
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+/* File aq_ptp.h: Declaration of PTP functions.
+ */
+#ifndef AQ_PTP_H
+#define AQ_PTP_H
+
+#include <linux/net_tstamp.h>
+
+#include "aq_ring.h"
+
+#define PTP_8TC_RING_IDX 8
+#define PTP_4TC_RING_IDX 16
+#define PTP_HWST_RING_IDX 31
+
+/* Index must to be 8 (8 TCs) or 16 (4 TCs).
+ * It depends from Traffic Class mode.
+ */
+static inline unsigned int aq_ptp_ring_idx(const enum aq_tc_mode tc_mode)
+{
+ if (tc_mode == AQ_TC_MODE_8TCS)
+ return PTP_8TC_RING_IDX;
+
+ return PTP_4TC_RING_IDX;
+}
+
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+
+/* Common functions */
+int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec);
+
+void aq_ptp_unregister(struct aq_nic_s *aq_nic);
+void aq_ptp_free(struct aq_nic_s *aq_nic);
+
+int aq_ptp_irq_alloc(struct aq_nic_s *aq_nic);
+void aq_ptp_irq_free(struct aq_nic_s *aq_nic);
+
+int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic);
+void aq_ptp_ring_free(struct aq_nic_s *aq_nic);
+
+int aq_ptp_ring_init(struct aq_nic_s *aq_nic);
+int aq_ptp_ring_start(struct aq_nic_s *aq_nic);
+void aq_ptp_ring_stop(struct aq_nic_s *aq_nic);
+void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic);
+
+void aq_ptp_service_task(struct aq_nic_s *aq_nic);
+
+void aq_ptp_tm_offset_set(struct aq_nic_s *aq_nic, unsigned int mbps);
+
+void aq_ptp_clock_init(struct aq_nic_s *aq_nic);
+
+/* Traffic processing functions */
+int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb);
+void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp);
+
+/* Must be to check available of PTP before call */
+void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
+ struct hwtstamp_config *config);
+int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
+ struct hwtstamp_config *config);
+
+/* Return either ring is belong to PTP or not*/
+bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring);
+
+u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct skb_shared_hwtstamps *shhwtstamps, u8 *p,
+ unsigned int len);
+
+struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp);
+
+int aq_ptp_link_change(struct aq_nic_s *aq_nic);
+
+/* PTP ring statistics */
+int aq_ptp_get_ring_cnt(struct aq_nic_s *aq_nic, const enum atl_ring_type ring_type);
+u64 *aq_ptp_get_stats(struct aq_nic_s *aq_nic, u64 *data);
+
+#else
+
+static inline int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
+{
+ return 0;
+}
+
+static inline void aq_ptp_unregister(struct aq_nic_s *aq_nic) {}
+
+static inline void aq_ptp_free(struct aq_nic_s *aq_nic)
+{
+}
+
+static inline int aq_ptp_irq_alloc(struct aq_nic_s *aq_nic)
+{
+ return 0;
+}
+
+static inline void aq_ptp_irq_free(struct aq_nic_s *aq_nic)
+{
+}
+
+static inline int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
+{
+ return 0;
+}
+
+static inline void aq_ptp_ring_free(struct aq_nic_s *aq_nic) {}
+
+static inline int aq_ptp_ring_init(struct aq_nic_s *aq_nic)
+{
+ return 0;
+}
+
+static inline int aq_ptp_ring_start(struct aq_nic_s *aq_nic)
+{
+ return 0;
+}
+
+static inline void aq_ptp_ring_stop(struct aq_nic_s *aq_nic) {}
+static inline void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic) {}
+static inline void aq_ptp_service_task(struct aq_nic_s *aq_nic) {}
+static inline void aq_ptp_tm_offset_set(struct aq_nic_s *aq_nic,
+ unsigned int mbps) {}
+static inline void aq_ptp_clock_init(struct aq_nic_s *aq_nic) {}
+static inline int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp) {}
+static inline void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
+ struct hwtstamp_config *config) {}
+static inline int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
+ struct hwtstamp_config *config)
+{
+ return 0;
+}
+
+static inline bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring)
+{
+ return false;
+}
+
+static inline u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic,
+ struct skb_shared_hwtstamps *shhwtstamps, u8 *p,
+ unsigned int len)
+{
+ return 0;
+}
+
+static inline struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp)
+{
+ return NULL;
+}
+
+static inline int aq_ptp_link_change(struct aq_nic_s *aq_nic)
+{
+ return 0;
+}
+#endif
+
+#endif /* AQ_PTP_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
new file mode 100644
index 000000000..4d9d7d1ed
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -0,0 +1,989 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+/* File aq_ring.c: Definition of functions for Rx/Tx rings. */
+
+#include "aq_nic.h"
+#include "aq_hw.h"
+#include "aq_hw_utils.h"
+#include "aq_ptp.h"
+#include "aq_vec.h"
+#include "aq_main.h"
+
+#include <net/xdp.h>
+#include <linux/filter.h>
+#include <linux/bpf_trace.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+static void aq_get_rxpages_xdp(struct aq_ring_buff_s *buff,
+ struct xdp_buff *xdp)
+{
+ struct skb_shared_info *sinfo;
+ int i;
+
+ if (xdp_buff_has_frags(xdp)) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+
+ page_ref_inc(skb_frag_page(frag));
+ }
+ }
+ page_ref_inc(buff->rxdata.page);
+}
+
+static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
+{
+ unsigned int len = PAGE_SIZE << rxpage->order;
+
+ dma_unmap_page(dev, rxpage->daddr, len, DMA_FROM_DEVICE);
+
+ /* Drop the ref for being in the ring. */
+ __free_pages(rxpage->page, rxpage->order);
+ rxpage->page = NULL;
+}
+
+static int aq_alloc_rxpages(struct aq_rxpage *rxpage, struct aq_ring_s *rx_ring)
+{
+ struct device *dev = aq_nic_get_dev(rx_ring->aq_nic);
+ unsigned int order = rx_ring->page_order;
+ struct page *page;
+ int ret = -ENOMEM;
+ dma_addr_t daddr;
+
+ page = dev_alloc_pages(order);
+ if (unlikely(!page))
+ goto err_exit;
+
+ daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order,
+ DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, daddr)))
+ goto free_page;
+
+ rxpage->page = page;
+ rxpage->daddr = daddr;
+ rxpage->order = order;
+ rxpage->pg_off = rx_ring->page_offset;
+
+ return 0;
+
+free_page:
+ __free_pages(page, order);
+
+err_exit:
+ return ret;
+}
+
+static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf)
+{
+ unsigned int order = self->page_order;
+ u16 page_offset = self->page_offset;
+ u16 frame_max = self->frame_max;
+ u16 tail_size = self->tail_size;
+ int ret;
+
+ if (rxbuf->rxdata.page) {
+ /* One means ring is the only user and can reuse */
+ if (page_ref_count(rxbuf->rxdata.page) > 1) {
+ /* Try reuse buffer */
+ rxbuf->rxdata.pg_off += frame_max + page_offset +
+ tail_size;
+ if (rxbuf->rxdata.pg_off + frame_max + tail_size <=
+ (PAGE_SIZE << order)) {
+ u64_stats_update_begin(&self->stats.rx.syncp);
+ self->stats.rx.pg_flips++;
+ u64_stats_update_end(&self->stats.rx.syncp);
+
+ } else {
+ /* Buffer exhausted. We have other users and
+ * should release this page and realloc
+ */
+ aq_free_rxpage(&rxbuf->rxdata,
+ aq_nic_get_dev(self->aq_nic));
+ u64_stats_update_begin(&self->stats.rx.syncp);
+ self->stats.rx.pg_losts++;
+ u64_stats_update_end(&self->stats.rx.syncp);
+ }
+ } else {
+ rxbuf->rxdata.pg_off = page_offset;
+ u64_stats_update_begin(&self->stats.rx.syncp);
+ self->stats.rx.pg_reuses++;
+ u64_stats_update_end(&self->stats.rx.syncp);
+ }
+ }
+
+ if (!rxbuf->rxdata.page) {
+ ret = aq_alloc_rxpages(&rxbuf->rxdata, self);
+ if (ret) {
+ u64_stats_update_begin(&self->stats.rx.syncp);
+ self->stats.rx.alloc_fails++;
+ u64_stats_update_end(&self->stats.rx.syncp);
+ }
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic)
+{
+ int err = 0;
+
+ self->buff_ring =
+ kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL);
+
+ if (!self->buff_ring) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
+ self->size * self->dx_size,
+ &self->dx_ring_pa, GFP_KERNEL);
+ if (!self->dx_ring) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+err_exit:
+ if (err < 0) {
+ aq_ring_free(self);
+ self = NULL;
+ }
+
+ return self;
+}
+
+struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic,
+ unsigned int idx,
+ struct aq_nic_cfg_s *aq_nic_cfg)
+{
+ int err = 0;
+
+ self->aq_nic = aq_nic;
+ self->idx = idx;
+ self->size = aq_nic_cfg->txds;
+ self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size;
+
+ self = aq_ring_alloc(self, aq_nic);
+ if (!self) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+err_exit:
+ if (err < 0) {
+ aq_ring_free(self);
+ self = NULL;
+ }
+
+ return self;
+}
+
+struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic,
+ unsigned int idx,
+ struct aq_nic_cfg_s *aq_nic_cfg)
+{
+ int err = 0;
+
+ self->aq_nic = aq_nic;
+ self->idx = idx;
+ self->size = aq_nic_cfg->rxds;
+ self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
+ self->xdp_prog = aq_nic->xdp_prog;
+ self->frame_max = AQ_CFG_RX_FRAME_MAX;
+
+ /* Only order-2 is allowed if XDP is enabled */
+ if (READ_ONCE(self->xdp_prog)) {
+ self->page_offset = AQ_XDP_HEADROOM;
+ self->page_order = AQ_CFG_XDP_PAGEORDER;
+ self->tail_size = AQ_XDP_TAILROOM;
+ } else {
+ self->page_offset = 0;
+ self->page_order = fls(self->frame_max / PAGE_SIZE +
+ (self->frame_max % PAGE_SIZE ? 1 : 0)) - 1;
+ if (aq_nic_cfg->rxpageorder > self->page_order)
+ self->page_order = aq_nic_cfg->rxpageorder;
+ self->tail_size = 0;
+ }
+
+ self = aq_ring_alloc(self, aq_nic);
+ if (!self) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+err_exit:
+ if (err < 0) {
+ aq_ring_free(self);
+ self = NULL;
+ }
+
+ return self;
+}
+
+struct aq_ring_s *
+aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic,
+ unsigned int idx, unsigned int size, unsigned int dx_size)
+{
+ struct device *dev = aq_nic_get_dev(aq_nic);
+ size_t sz = size * dx_size + AQ_CFG_RXDS_DEF;
+
+ memset(self, 0, sizeof(*self));
+
+ self->aq_nic = aq_nic;
+ self->idx = idx;
+ self->size = size;
+ self->dx_size = dx_size;
+
+ self->dx_ring = dma_alloc_coherent(dev, sz, &self->dx_ring_pa,
+ GFP_KERNEL);
+ if (!self->dx_ring) {
+ aq_ring_free(self);
+ return NULL;
+ }
+
+ return self;
+}
+
+int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type)
+{
+ self->hw_head = 0;
+ self->sw_head = 0;
+ self->sw_tail = 0;
+ self->ring_type = ring_type;
+
+ if (self->ring_type == ATL_RING_RX)
+ u64_stats_init(&self->stats.rx.syncp);
+ else
+ u64_stats_init(&self->stats.tx.syncp);
+
+ return 0;
+}
+
+static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i,
+ unsigned int t)
+{
+ return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
+}
+
+void aq_ring_update_queue_state(struct aq_ring_s *ring)
+{
+ if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX)
+ aq_ring_queue_stop(ring);
+ else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES)
+ aq_ring_queue_wake(ring);
+}
+
+void aq_ring_queue_wake(struct aq_ring_s *ring)
+{
+ struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
+
+ if (__netif_subqueue_stopped(ndev,
+ AQ_NIC_RING2QMAP(ring->aq_nic,
+ ring->idx))) {
+ netif_wake_subqueue(ndev,
+ AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx));
+ u64_stats_update_begin(&ring->stats.tx.syncp);
+ ring->stats.tx.queue_restarts++;
+ u64_stats_update_end(&ring->stats.tx.syncp);
+ }
+}
+
+void aq_ring_queue_stop(struct aq_ring_s *ring)
+{
+ struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
+
+ if (!__netif_subqueue_stopped(ndev,
+ AQ_NIC_RING2QMAP(ring->aq_nic,
+ ring->idx)))
+ netif_stop_subqueue(ndev,
+ AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx));
+}
+
+bool aq_ring_tx_clean(struct aq_ring_s *self)
+{
+ struct device *dev = aq_nic_get_dev(self->aq_nic);
+ unsigned int budget;
+
+ for (budget = AQ_CFG_TX_CLEAN_BUDGET;
+ budget && self->sw_head != self->hw_head; budget--) {
+ struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
+
+ if (likely(buff->is_mapped)) {
+ if (unlikely(buff->is_sop)) {
+ if (!buff->is_eop &&
+ buff->eop_index != 0xffffU &&
+ (!aq_ring_dx_in_range(self->sw_head,
+ buff->eop_index,
+ self->hw_head)))
+ break;
+
+ dma_unmap_single(dev, buff->pa, buff->len,
+ DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(dev, buff->pa, buff->len,
+ DMA_TO_DEVICE);
+ }
+ }
+
+ if (likely(!buff->is_eop))
+ goto out;
+
+ if (buff->skb) {
+ u64_stats_update_begin(&self->stats.tx.syncp);
+ ++self->stats.tx.packets;
+ self->stats.tx.bytes += buff->skb->len;
+ u64_stats_update_end(&self->stats.tx.syncp);
+ dev_kfree_skb_any(buff->skb);
+ } else if (buff->xdpf) {
+ u64_stats_update_begin(&self->stats.tx.syncp);
+ ++self->stats.tx.packets;
+ self->stats.tx.bytes += xdp_get_frame_len(buff->xdpf);
+ u64_stats_update_end(&self->stats.tx.syncp);
+ xdp_return_frame_rx_napi(buff->xdpf);
+ }
+
+out:
+ buff->skb = NULL;
+ buff->xdpf = NULL;
+ buff->pa = 0U;
+ buff->eop_index = 0xffffU;
+ self->sw_head = aq_ring_next_dx(self, self->sw_head);
+ }
+
+ return !!budget;
+}
+
+static void aq_rx_checksum(struct aq_ring_s *self,
+ struct aq_ring_buff_s *buff,
+ struct sk_buff *skb)
+{
+ if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM))
+ return;
+
+ if (unlikely(buff->is_cso_err)) {
+ u64_stats_update_begin(&self->stats.rx.syncp);
+ ++self->stats.rx.errors;
+ u64_stats_update_end(&self->stats.rx.syncp);
+ skb->ip_summed = CHECKSUM_NONE;
+ return;
+ }
+ if (buff->is_ip_cso) {
+ __skb_incr_checksum_unnecessary(skb);
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
+ if (buff->is_udp_cso || buff->is_tcp_cso)
+ __skb_incr_checksum_unnecessary(skb);
+}
+
+int aq_xdp_xmit(struct net_device *dev, int num_frames,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(dev);
+ unsigned int vec, i, drop = 0;
+ int cpu = smp_processor_id();
+ struct aq_nic_cfg_s *aq_cfg;
+ struct aq_ring_s *ring;
+
+ aq_cfg = aq_nic_get_cfg(aq_nic);
+ vec = cpu % aq_cfg->vecs;
+ ring = aq_nic->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(aq_cfg, 0, vec)];
+
+ for (i = 0; i < num_frames; i++) {
+ struct xdp_frame *xdpf = frames[i];
+
+ if (aq_nic_xmit_xdpf(aq_nic, ring, xdpf) == NETDEV_TX_BUSY)
+ drop++;
+ }
+
+ return num_frames - drop;
+}
+
+static struct sk_buff *aq_xdp_build_skb(struct xdp_buff *xdp,
+ struct net_device *dev,
+ struct aq_ring_buff_s *buff)
+{
+ struct xdp_frame *xdpf;
+ struct sk_buff *skb;
+
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf))
+ return NULL;
+
+ skb = xdp_build_skb_from_frame(xdpf, dev);
+ if (!skb)
+ return NULL;
+
+ aq_get_rxpages_xdp(buff, xdp);
+ return skb;
+}
+
+static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
+ struct xdp_buff *xdp,
+ struct aq_ring_s *rx_ring,
+ struct aq_ring_buff_s *buff)
+{
+ int result = NETDEV_TX_BUSY;
+ struct aq_ring_s *tx_ring;
+ struct xdp_frame *xdpf;
+ struct bpf_prog *prog;
+ u32 act = XDP_ABORTED;
+ struct sk_buff *skb;
+
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.packets;
+ rx_ring->stats.rx.bytes += xdp_get_buff_len(xdp);
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+
+ prog = READ_ONCE(rx_ring->xdp_prog);
+ if (!prog)
+ return aq_xdp_build_skb(xdp, aq_nic->ndev, buff);
+
+ prefetchw(xdp->data_hard_start); /* xdp_frame write */
+
+ /* single buffer XDP program, but packet is multi buffer, aborted */
+ if (xdp_buff_has_frags(xdp) && !prog->aux->xdp_has_frags)
+ goto out_aborted;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ skb = aq_xdp_build_skb(xdp, aq_nic->ndev, buff);
+ if (!skb)
+ goto out_aborted;
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_pass;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ return skb;
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf))
+ goto out_aborted;
+ tx_ring = aq_nic->aq_ring_tx[rx_ring->idx];
+ result = aq_nic_xmit_xdpf(aq_nic, tx_ring, xdpf);
+ if (result == NETDEV_TX_BUSY)
+ goto out_aborted;
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_tx;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ aq_get_rxpages_xdp(buff, xdp);
+ break;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(aq_nic->ndev, xdp, prog) < 0)
+ goto out_aborted;
+ xdp_do_flush();
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_redirect;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ aq_get_rxpages_xdp(buff, xdp);
+ break;
+ default:
+ fallthrough;
+ case XDP_ABORTED:
+out_aborted:
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_aborted;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ trace_xdp_exception(aq_nic->ndev, prog, act);
+ bpf_warn_invalid_xdp_action(aq_nic->ndev, prog, act);
+ break;
+ case XDP_DROP:
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_drop;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ break;
+ }
+
+ return ERR_PTR(-result);
+}
+
+static bool aq_add_rx_fragment(struct device *dev,
+ struct aq_ring_s *ring,
+ struct aq_ring_buff_s *buff,
+ struct xdp_buff *xdp)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ struct aq_ring_buff_s *buff_ = buff;
+
+ memset(sinfo, 0, sizeof(*sinfo));
+ do {
+ skb_frag_t *frag;
+
+ if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS))
+ return true;
+
+ frag = &sinfo->frags[sinfo->nr_frags++];
+ buff_ = &ring->buff_ring[buff_->next];
+ dma_sync_single_range_for_cpu(dev,
+ buff_->rxdata.daddr,
+ buff_->rxdata.pg_off,
+ buff_->len,
+ DMA_FROM_DEVICE);
+ skb_frag_off_set(frag, buff_->rxdata.pg_off);
+ skb_frag_size_set(frag, buff_->len);
+ sinfo->xdp_frags_size += buff_->len;
+ __skb_frag_set_page(frag, buff_->rxdata.page);
+
+ buff_->is_cleaned = 1;
+
+ buff->is_ip_cso &= buff_->is_ip_cso;
+ buff->is_udp_cso &= buff_->is_udp_cso;
+ buff->is_tcp_cso &= buff_->is_tcp_cso;
+ buff->is_cso_err |= buff_->is_cso_err;
+
+ if (page_is_pfmemalloc(buff_->rxdata.page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+
+ } while (!buff_->is_eop);
+
+ xdp_buff_set_frags_flag(xdp);
+
+ return false;
+}
+
+static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
+ int *work_done, int budget)
+{
+ struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
+ int err = 0;
+
+ for (; (self->sw_head != self->hw_head) && budget;
+ self->sw_head = aq_ring_next_dx(self, self->sw_head),
+ --budget, ++(*work_done)) {
+ struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
+ bool is_ptp_ring = aq_ptp_ring(self->aq_nic, self);
+ struct aq_ring_buff_s *buff_ = NULL;
+ struct sk_buff *skb = NULL;
+ unsigned int next_ = 0U;
+ unsigned int i = 0U;
+ u16 hdr_len;
+
+ if (buff->is_cleaned)
+ continue;
+
+ if (!buff->is_eop) {
+ unsigned int frag_cnt = 0U;
+ buff_ = buff;
+ do {
+ bool is_rsc_completed = true;
+
+ if (buff_->next >= self->size) {
+ err = -EIO;
+ goto err_exit;
+ }
+
+ frag_cnt++;
+ next_ = buff_->next,
+ buff_ = &self->buff_ring[next_];
+ is_rsc_completed =
+ aq_ring_dx_in_range(self->sw_head,
+ next_,
+ self->hw_head);
+
+ if (unlikely(!is_rsc_completed) ||
+ frag_cnt > MAX_SKB_FRAGS) {
+ err = 0;
+ goto err_exit;
+ }
+
+ buff->is_error |= buff_->is_error;
+ buff->is_cso_err |= buff_->is_cso_err;
+
+ } while (!buff_->is_eop);
+
+ if (buff->is_error ||
+ (buff->is_lro && buff->is_cso_err)) {
+ buff_ = buff;
+ do {
+ if (buff_->next >= self->size) {
+ err = -EIO;
+ goto err_exit;
+ }
+ next_ = buff_->next,
+ buff_ = &self->buff_ring[next_];
+
+ buff_->is_cleaned = true;
+ } while (!buff_->is_eop);
+
+ u64_stats_update_begin(&self->stats.rx.syncp);
+ ++self->stats.rx.errors;
+ u64_stats_update_end(&self->stats.rx.syncp);
+ continue;
+ }
+ }
+
+ if (buff->is_error) {
+ u64_stats_update_begin(&self->stats.rx.syncp);
+ ++self->stats.rx.errors;
+ u64_stats_update_end(&self->stats.rx.syncp);
+ continue;
+ }
+
+ dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
+ buff->rxdata.daddr,
+ buff->rxdata.pg_off,
+ buff->len, DMA_FROM_DEVICE);
+
+ skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
+ if (unlikely(!skb)) {
+ u64_stats_update_begin(&self->stats.rx.syncp);
+ self->stats.rx.skb_alloc_fails++;
+ u64_stats_update_end(&self->stats.rx.syncp);
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ if (is_ptp_ring)
+ buff->len -=
+ aq_ptp_extract_ts(self->aq_nic, skb_hwtstamps(skb),
+ aq_buf_vaddr(&buff->rxdata),
+ buff->len);
+
+ hdr_len = buff->len;
+ if (hdr_len > AQ_CFG_RX_HDR_SIZE)
+ hdr_len = eth_get_headlen(skb->dev,
+ aq_buf_vaddr(&buff->rxdata),
+ AQ_CFG_RX_HDR_SIZE);
+
+ memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
+ ALIGN(hdr_len, sizeof(long)));
+
+ if (buff->len - hdr_len > 0) {
+ skb_add_rx_frag(skb, i++, buff->rxdata.page,
+ buff->rxdata.pg_off + hdr_len,
+ buff->len - hdr_len,
+ self->frame_max);
+ page_ref_inc(buff->rxdata.page);
+ }
+
+ if (!buff->is_eop) {
+ buff_ = buff;
+ do {
+ next_ = buff_->next;
+ buff_ = &self->buff_ring[next_];
+
+ dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
+ buff_->rxdata.daddr,
+ buff_->rxdata.pg_off,
+ buff_->len,
+ DMA_FROM_DEVICE);
+ skb_add_rx_frag(skb, i++,
+ buff_->rxdata.page,
+ buff_->rxdata.pg_off,
+ buff_->len,
+ self->frame_max);
+ page_ref_inc(buff_->rxdata.page);
+ buff_->is_cleaned = 1;
+
+ buff->is_ip_cso &= buff_->is_ip_cso;
+ buff->is_udp_cso &= buff_->is_udp_cso;
+ buff->is_tcp_cso &= buff_->is_tcp_cso;
+ buff->is_cso_err |= buff_->is_cso_err;
+
+ } while (!buff_->is_eop);
+ }
+
+ if (buff->is_vlan)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ buff->vlan_rx_tag);
+
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ aq_rx_checksum(self, buff, skb);
+
+ skb_set_hash(skb, buff->rss_hash,
+ buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
+ PKT_HASH_TYPE_NONE);
+ /* Send all PTP traffic to 0 queue */
+ skb_record_rx_queue(skb,
+ is_ptp_ring ? 0
+ : AQ_NIC_RING2QMAP(self->aq_nic,
+ self->idx));
+
+ u64_stats_update_begin(&self->stats.rx.syncp);
+ ++self->stats.rx.packets;
+ self->stats.rx.bytes += skb->len;
+ u64_stats_update_end(&self->stats.rx.syncp);
+
+ napi_gro_receive(napi, skb);
+ }
+
+err_exit:
+ return err;
+}
+
+static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring,
+ struct napi_struct *napi, int *work_done,
+ int budget)
+{
+ int frame_sz = rx_ring->page_offset + rx_ring->frame_max +
+ rx_ring->tail_size;
+ struct aq_nic_s *aq_nic = rx_ring->aq_nic;
+ bool is_rsc_completed = true;
+ struct device *dev;
+ int err = 0;
+
+ dev = aq_nic_get_dev(aq_nic);
+ for (; (rx_ring->sw_head != rx_ring->hw_head) && budget;
+ rx_ring->sw_head = aq_ring_next_dx(rx_ring, rx_ring->sw_head),
+ --budget, ++(*work_done)) {
+ struct aq_ring_buff_s *buff = &rx_ring->buff_ring[rx_ring->sw_head];
+ bool is_ptp_ring = aq_ptp_ring(rx_ring->aq_nic, rx_ring);
+ struct aq_ring_buff_s *buff_ = NULL;
+ u16 ptp_hwtstamp_len = 0;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct sk_buff *skb = NULL;
+ unsigned int next_ = 0U;
+ struct xdp_buff xdp;
+ void *hard_start;
+
+ if (buff->is_cleaned)
+ continue;
+
+ if (!buff->is_eop) {
+ buff_ = buff;
+ do {
+ if (buff_->next >= rx_ring->size) {
+ err = -EIO;
+ goto err_exit;
+ }
+ next_ = buff_->next;
+ buff_ = &rx_ring->buff_ring[next_];
+ is_rsc_completed =
+ aq_ring_dx_in_range(rx_ring->sw_head,
+ next_,
+ rx_ring->hw_head);
+
+ if (unlikely(!is_rsc_completed))
+ break;
+
+ buff->is_error |= buff_->is_error;
+ buff->is_cso_err |= buff_->is_cso_err;
+ } while (!buff_->is_eop);
+
+ if (!is_rsc_completed) {
+ err = 0;
+ goto err_exit;
+ }
+ if (buff->is_error ||
+ (buff->is_lro && buff->is_cso_err)) {
+ buff_ = buff;
+ do {
+ if (buff_->next >= rx_ring->size) {
+ err = -EIO;
+ goto err_exit;
+ }
+ next_ = buff_->next;
+ buff_ = &rx_ring->buff_ring[next_];
+
+ buff_->is_cleaned = true;
+ } while (!buff_->is_eop);
+
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.errors;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ continue;
+ }
+ }
+
+ if (buff->is_error) {
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.errors;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ continue;
+ }
+
+ dma_sync_single_range_for_cpu(dev,
+ buff->rxdata.daddr,
+ buff->rxdata.pg_off,
+ buff->len, DMA_FROM_DEVICE);
+ hard_start = page_address(buff->rxdata.page) +
+ buff->rxdata.pg_off - rx_ring->page_offset;
+
+ if (is_ptp_ring) {
+ ptp_hwtstamp_len = aq_ptp_extract_ts(rx_ring->aq_nic, &shhwtstamps,
+ aq_buf_vaddr(&buff->rxdata),
+ buff->len);
+ buff->len -= ptp_hwtstamp_len;
+ }
+
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
+ xdp_prepare_buff(&xdp, hard_start, rx_ring->page_offset,
+ buff->len, false);
+ if (!buff->is_eop) {
+ if (aq_add_rx_fragment(dev, rx_ring, buff, &xdp)) {
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.packets;
+ rx_ring->stats.rx.bytes += xdp_get_buff_len(&xdp);
+ ++rx_ring->stats.rx.xdp_aborted;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ continue;
+ }
+ }
+
+ skb = aq_xdp_run_prog(aq_nic, &xdp, rx_ring, buff);
+ if (IS_ERR(skb) || !skb)
+ continue;
+
+ if (ptp_hwtstamp_len > 0)
+ *skb_hwtstamps(skb) = shhwtstamps;
+
+ if (buff->is_vlan)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ buff->vlan_rx_tag);
+
+ aq_rx_checksum(rx_ring, buff, skb);
+
+ skb_set_hash(skb, buff->rss_hash,
+ buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
+ PKT_HASH_TYPE_NONE);
+ /* Send all PTP traffic to 0 queue */
+ skb_record_rx_queue(skb,
+ is_ptp_ring ? 0
+ : AQ_NIC_RING2QMAP(rx_ring->aq_nic,
+ rx_ring->idx));
+
+ napi_gro_receive(napi, skb);
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_ring_rx_clean(struct aq_ring_s *self,
+ struct napi_struct *napi,
+ int *work_done,
+ int budget)
+{
+ if (static_branch_unlikely(&aq_xdp_locking_key))
+ return __aq_ring_xdp_clean(self, napi, work_done, budget);
+ else
+ return __aq_ring_rx_clean(self, napi, work_done, budget);
+}
+
+void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
+{
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+ while (self->sw_head != self->hw_head) {
+ u64 ns;
+
+ aq_nic->aq_hw_ops->extract_hwts(aq_nic->aq_hw,
+ self->dx_ring +
+ (self->sw_head * self->dx_size),
+ self->dx_size, &ns);
+ aq_ptp_tx_hwtstamp(aq_nic, ns);
+
+ self->sw_head = aq_ring_next_dx(self, self->sw_head);
+ }
+#endif
+}
+
+int aq_ring_rx_fill(struct aq_ring_s *self)
+{
+ struct aq_ring_buff_s *buff = NULL;
+ int err = 0;
+ int i = 0;
+
+ if (aq_ring_avail_dx(self) < min_t(unsigned int, AQ_CFG_RX_REFILL_THRES,
+ self->size / 2))
+ return err;
+
+ for (i = aq_ring_avail_dx(self); i--;
+ self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) {
+ buff = &self->buff_ring[self->sw_tail];
+
+ buff->flags = 0U;
+ buff->len = self->frame_max;
+
+ err = aq_get_rxpages(self, buff);
+ if (err)
+ goto err_exit;
+
+ buff->pa = aq_buf_daddr(&buff->rxdata);
+ buff = NULL;
+ }
+
+err_exit:
+ return err;
+}
+
+void aq_ring_rx_deinit(struct aq_ring_s *self)
+{
+ if (!self)
+ return;
+
+ for (; self->sw_head != self->sw_tail;
+ self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
+ struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
+
+ aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic));
+ }
+}
+
+void aq_ring_free(struct aq_ring_s *self)
+{
+ if (!self)
+ return;
+
+ kfree(self->buff_ring);
+ self->buff_ring = NULL;
+
+ if (self->dx_ring) {
+ dma_free_coherent(aq_nic_get_dev(self->aq_nic),
+ self->size * self->dx_size, self->dx_ring,
+ self->dx_ring_pa);
+ self->dx_ring = NULL;
+ }
+}
+
+unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
+{
+ unsigned int count;
+ unsigned int start;
+
+ if (self->ring_type == ATL_RING_RX) {
+ /* This data should mimic aq_ethtool_queue_rx_stat_names structure */
+ do {
+ count = 0;
+ start = u64_stats_fetch_begin_irq(&self->stats.rx.syncp);
+ data[count] = self->stats.rx.packets;
+ data[++count] = self->stats.rx.jumbo_packets;
+ data[++count] = self->stats.rx.lro_packets;
+ data[++count] = self->stats.rx.errors;
+ data[++count] = self->stats.rx.alloc_fails;
+ data[++count] = self->stats.rx.skb_alloc_fails;
+ data[++count] = self->stats.rx.polls;
+ data[++count] = self->stats.rx.pg_flips;
+ data[++count] = self->stats.rx.pg_reuses;
+ data[++count] = self->stats.rx.pg_losts;
+ data[++count] = self->stats.rx.xdp_aborted;
+ data[++count] = self->stats.rx.xdp_drop;
+ data[++count] = self->stats.rx.xdp_pass;
+ data[++count] = self->stats.rx.xdp_tx;
+ data[++count] = self->stats.rx.xdp_invalid;
+ data[++count] = self->stats.rx.xdp_redirect;
+ } while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start));
+ } else {
+ /* This data should mimic aq_ethtool_queue_tx_stat_names structure */
+ do {
+ count = 0;
+ start = u64_stats_fetch_begin_irq(&self->stats.tx.syncp);
+ data[count] = self->stats.tx.packets;
+ data[++count] = self->stats.tx.queue_restarts;
+ } while (u64_stats_fetch_retry_irq(&self->stats.tx.syncp, start));
+ }
+
+ return ++count;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
new file mode 100644
index 000000000..0a6c34438
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+/* File aq_ring.h: Declaration of functions for Rx/Tx rings. */
+
+#ifndef AQ_RING_H
+#define AQ_RING_H
+
+#include "aq_common.h"
+#include "aq_vec.h"
+
+#define AQ_XDP_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8)
+#define AQ_XDP_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+
+struct page;
+struct aq_nic_cfg_s;
+
+struct aq_rxpage {
+ struct page *page;
+ dma_addr_t daddr;
+ unsigned int order;
+ unsigned int pg_off;
+};
+
+/* TxC SOP DX EOP
+ * +----------+----------+----------+-----------
+ * 8bytes|len l3,l4 | pa | pa | pa
+ * +----------+----------+----------+-----------
+ * 4/8bytes|len pkt |len pkt | | skb
+ * +----------+----------+----------+-----------
+ * 4/8bytes|is_gso |len,flags |len |len,is_eop
+ * +----------+----------+----------+-----------
+ *
+ * This aq_ring_buff_s doesn't have endianness dependency.
+ * It is __packed for cache line optimizations.
+ */
+struct __packed aq_ring_buff_s {
+ union {
+ /* RX/TX */
+ dma_addr_t pa;
+ /* RX */
+ struct {
+ u32 rss_hash;
+ u16 next;
+ u8 is_hash_l4;
+ u8 rsvd1;
+ struct aq_rxpage rxdata;
+ u16 vlan_rx_tag;
+ };
+ /* EOP */
+ struct {
+ dma_addr_t pa_eop;
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
+ /* TxC */
+ struct {
+ u32 mss;
+ u8 len_l2;
+ u8 len_l3;
+ u8 len_l4;
+ u8 is_ipv6:1;
+ u8 rsvd2:7;
+ u32 len_pkt;
+ u16 vlan_tx_tag;
+ };
+ };
+ union {
+ struct {
+ u32 len:16;
+ u32 is_ip_cso:1;
+ u32 is_udp_cso:1;
+ u32 is_tcp_cso:1;
+ u32 is_cso_err:1;
+ u32 is_sop:1;
+ u32 is_eop:1;
+ u32 is_gso_tcp:1;
+ u32 is_gso_udp:1;
+ u32 is_mapped:1;
+ u32 is_cleaned:1;
+ u32 is_error:1;
+ u32 is_vlan:1;
+ u32 is_lro:1;
+ u32 rsvd3:3;
+ u16 eop_index;
+ u16 rsvd4;
+ };
+ u64 flags;
+ };
+};
+
+struct aq_ring_stats_rx_s {
+ struct u64_stats_sync syncp; /* must be first */
+ u64 errors;
+ u64 packets;
+ u64 bytes;
+ u64 lro_packets;
+ u64 jumbo_packets;
+ u64 alloc_fails;
+ u64 skb_alloc_fails;
+ u64 polls;
+ u64 pg_losts;
+ u64 pg_flips;
+ u64 pg_reuses;
+ u64 xdp_aborted;
+ u64 xdp_drop;
+ u64 xdp_pass;
+ u64 xdp_tx;
+ u64 xdp_invalid;
+ u64 xdp_redirect;
+};
+
+struct aq_ring_stats_tx_s {
+ struct u64_stats_sync syncp; /* must be first */
+ u64 errors;
+ u64 packets;
+ u64 bytes;
+ u64 queue_restarts;
+};
+
+union aq_ring_stats_s {
+ struct aq_ring_stats_rx_s rx;
+ struct aq_ring_stats_tx_s tx;
+};
+
+enum atl_ring_type {
+ ATL_RING_TX,
+ ATL_RING_RX,
+};
+
+struct aq_ring_s {
+ struct aq_ring_buff_s *buff_ring;
+ u8 *dx_ring; /* descriptors ring, dma shared mem */
+ struct aq_nic_s *aq_nic;
+ unsigned int idx; /* for HW layer registers operations */
+ unsigned int hw_head;
+ unsigned int sw_head;
+ unsigned int sw_tail;
+ unsigned int size; /* descriptors number */
+ unsigned int dx_size; /* TX or RX descriptor size, */
+ /* stored here for fater math */
+ u16 page_order;
+ u16 page_offset;
+ u16 frame_max;
+ u16 tail_size;
+ union aq_ring_stats_s stats;
+ dma_addr_t dx_ring_pa;
+ struct bpf_prog *xdp_prog;
+ enum atl_ring_type ring_type;
+ struct xdp_rxq_info xdp_rxq;
+};
+
+struct aq_ring_param_s {
+ unsigned int vec_idx;
+ unsigned int cpu;
+ cpumask_t affinity_mask;
+};
+
+static inline void *aq_buf_vaddr(struct aq_rxpage *rxpage)
+{
+ return page_to_virt(rxpage->page) + rxpage->pg_off;
+}
+
+static inline dma_addr_t aq_buf_daddr(struct aq_rxpage *rxpage)
+{
+ return rxpage->daddr + rxpage->pg_off;
+}
+
+static inline unsigned int aq_ring_next_dx(struct aq_ring_s *self,
+ unsigned int dx)
+{
+ return (++dx >= self->size) ? 0U : dx;
+}
+
+static inline unsigned int aq_ring_avail_dx(struct aq_ring_s *self)
+{
+ return (((self->sw_tail >= self->sw_head)) ?
+ (self->size - 1) - self->sw_tail + self->sw_head :
+ self->sw_head - self->sw_tail - 1);
+}
+
+struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic,
+ unsigned int idx,
+ struct aq_nic_cfg_s *aq_nic_cfg);
+struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic,
+ unsigned int idx,
+ struct aq_nic_cfg_s *aq_nic_cfg);
+
+int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type);
+void aq_ring_rx_deinit(struct aq_ring_s *self);
+void aq_ring_free(struct aq_ring_s *self);
+void aq_ring_update_queue_state(struct aq_ring_s *ring);
+void aq_ring_queue_wake(struct aq_ring_s *ring);
+void aq_ring_queue_stop(struct aq_ring_s *ring);
+bool aq_ring_tx_clean(struct aq_ring_s *self);
+int aq_xdp_xmit(struct net_device *dev, int num_frames,
+ struct xdp_frame **frames, u32 flags);
+int aq_ring_rx_clean(struct aq_ring_s *self,
+ struct napi_struct *napi,
+ int *work_done,
+ int budget);
+int aq_ring_rx_fill(struct aq_ring_s *self);
+
+struct aq_ring_s *aq_ring_hwts_rx_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic, unsigned int idx,
+ unsigned int size, unsigned int dx_size);
+void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic);
+
+unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data);
+
+#endif /* AQ_RING_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_rss.h b/drivers/net/ethernet/aquantia/atlantic/aq_rss.h
new file mode 100644
index 000000000..39b1f43c8
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_rss.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ */
+
+/* File aq_rss.h: Receive Side Scaling definitions. */
+
+#ifndef AQ_RSS_H
+#define AQ_RSS_H
+
+#include "aq_common.h"
+#include "aq_cfg.h"
+
+struct aq_rss_parameters {
+ u16 base_cpu_number;
+ u16 indirection_table_size;
+ u16 hash_secret_key_size;
+ u32 hash_secret_key[AQ_CFG_RSS_HASHKEY_SIZE / sizeof(u32)];
+ u8 indirection_table[AQ_CFG_RSS_INDIRECTION_TABLE_MAX];
+};
+
+#endif /* AQ_RSS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
new file mode 100644
index 000000000..d3a0b2ec0
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ */
+
+/* File aq_utils.h: Useful macro and structures used in all layers of driver. */
+
+#ifndef AQ_UTILS_H
+#define AQ_UTILS_H
+
+#include "aq_common.h"
+
+static inline void aq_utils_obj_set(atomic_t *flags, u32 mask)
+{
+ unsigned long flags_old, flags_new;
+
+ do {
+ flags_old = atomic_read(flags);
+ flags_new = flags_old | (mask);
+ } while (atomic_cmpxchg(flags, flags_old, flags_new) != flags_old);
+}
+
+static inline void aq_utils_obj_clear(atomic_t *flags, u32 mask)
+{
+ unsigned long flags_old, flags_new;
+
+ do {
+ flags_old = atomic_read(flags);
+ flags_new = flags_old & ~(mask);
+ } while (atomic_cmpxchg(flags, flags_old, flags_new) != flags_old);
+}
+
+static inline bool aq_utils_obj_test(atomic_t *flags, u32 mask)
+{
+ return atomic_read(flags) & mask;
+}
+
+#endif /* AQ_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
new file mode 100644
index 000000000..f5db1c44e
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+/* File aq_vec.c: Definition of common structure for vector of Rx and Tx rings.
+ * Definition of functions for Rx and Tx rings. Friendly module for aq_nic.
+ */
+
+#include "aq_vec.h"
+
+struct aq_vec_s {
+ const struct aq_hw_ops *aq_hw_ops;
+ struct aq_hw_s *aq_hw;
+ struct aq_nic_s *aq_nic;
+ unsigned int tx_rings;
+ unsigned int rx_rings;
+ struct aq_ring_param_s aq_ring_param;
+ struct napi_struct napi;
+ struct aq_ring_s ring[AQ_CFG_TCS_MAX][2];
+};
+
+#define AQ_VEC_TX_ID 0
+#define AQ_VEC_RX_ID 1
+
+static int aq_vec_poll(struct napi_struct *napi, int budget)
+{
+ struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
+ unsigned int sw_tail_old = 0U;
+ struct aq_ring_s *ring = NULL;
+ bool was_tx_cleaned = true;
+ unsigned int i = 0U;
+ int work_done = 0;
+ int err = 0;
+
+ if (!self) {
+ err = -EINVAL;
+ } else {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
+ u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
+ ring[AQ_VEC_RX_ID].stats.rx.polls++;
+ u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
+ if (self->aq_hw_ops->hw_ring_tx_head_update) {
+ err = self->aq_hw_ops->hw_ring_tx_head_update(
+ self->aq_hw,
+ &ring[AQ_VEC_TX_ID]);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ if (ring[AQ_VEC_TX_ID].sw_head !=
+ ring[AQ_VEC_TX_ID].hw_head) {
+ was_tx_cleaned = aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
+ aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]);
+ }
+
+ err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw,
+ &ring[AQ_VEC_RX_ID]);
+ if (err < 0)
+ goto err_exit;
+
+ if (ring[AQ_VEC_RX_ID].sw_head !=
+ ring[AQ_VEC_RX_ID].hw_head) {
+ err = aq_ring_rx_clean(&ring[AQ_VEC_RX_ID],
+ napi,
+ &work_done,
+ budget - work_done);
+ if (err < 0)
+ goto err_exit;
+
+ sw_tail_old = ring[AQ_VEC_RX_ID].sw_tail;
+
+ err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]);
+ if (err < 0)
+ goto err_exit;
+
+ err = self->aq_hw_ops->hw_ring_rx_fill(
+ self->aq_hw,
+ &ring[AQ_VEC_RX_ID], sw_tail_old);
+ if (err < 0)
+ goto err_exit;
+ }
+ }
+
+err_exit:
+ if (!was_tx_cleaned)
+ work_done = budget;
+
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ self->aq_hw_ops->hw_irq_enable(self->aq_hw,
+ 1U << self->aq_ring_param.vec_idx);
+ }
+ }
+
+ return work_done;
+}
+
+struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
+ struct aq_nic_cfg_s *aq_nic_cfg)
+{
+ struct aq_vec_s *self = NULL;
+
+ self = kzalloc(sizeof(*self), GFP_KERNEL);
+ if (!self)
+ goto err_exit;
+
+ self->aq_nic = aq_nic;
+ self->aq_ring_param.vec_idx = idx;
+ self->aq_ring_param.cpu =
+ idx + aq_nic_cfg->aq_rss.base_cpu_number;
+
+ cpumask_set_cpu(self->aq_ring_param.cpu,
+ &self->aq_ring_param.affinity_mask);
+
+ self->tx_rings = 0;
+ self->rx_rings = 0;
+
+ netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi, aq_vec_poll);
+
+err_exit:
+ return self;
+}
+
+int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic,
+ unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg)
+{
+ struct aq_ring_s *ring = NULL;
+ unsigned int i = 0U;
+ int err = 0;
+
+ for (i = 0; i < aq_nic_cfg->tcs; ++i) {
+ const unsigned int idx_ring = AQ_NIC_CFG_TCVEC2RING(aq_nic_cfg,
+ i, idx);
+
+ ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic,
+ idx_ring, aq_nic_cfg);
+ if (!ring) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ ++self->tx_rings;
+
+ aq_nic_set_tx_ring(aq_nic, idx_ring, ring);
+
+ if (xdp_rxq_info_reg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq,
+ aq_nic->ndev, idx,
+ self->napi.napi_id) < 0) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ if (xdp_rxq_info_reg_mem_model(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq,
+ MEM_TYPE_PAGE_SHARED, NULL) < 0) {
+ xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq);
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic,
+ idx_ring, aq_nic_cfg);
+ if (!ring) {
+ xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq);
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ ++self->rx_rings;
+ }
+
+err_exit:
+ if (err < 0) {
+ aq_vec_ring_free(self);
+ self = NULL;
+ }
+
+ return err;
+}
+
+int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
+ struct aq_hw_s *aq_hw)
+{
+ struct aq_ring_s *ring = NULL;
+ unsigned int i = 0U;
+ int err = 0;
+
+ self->aq_hw_ops = aq_hw_ops;
+ self->aq_hw = aq_hw;
+
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
+ err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX);
+ if (err < 0)
+ goto err_exit;
+
+ err = self->aq_hw_ops->hw_ring_tx_init(self->aq_hw,
+ &ring[AQ_VEC_TX_ID],
+ &self->aq_ring_param);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_ring_init(&ring[AQ_VEC_RX_ID], ATL_RING_RX);
+ if (err < 0)
+ goto err_exit;
+
+ err = self->aq_hw_ops->hw_ring_rx_init(self->aq_hw,
+ &ring[AQ_VEC_RX_ID],
+ &self->aq_ring_param);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]);
+ if (err < 0)
+ goto err_exit;
+
+ err = self->aq_hw_ops->hw_ring_rx_fill(self->aq_hw,
+ &ring[AQ_VEC_RX_ID], 0U);
+ if (err < 0)
+ goto err_exit;
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_vec_start(struct aq_vec_s *self)
+{
+ struct aq_ring_s *ring = NULL;
+ unsigned int i = 0U;
+ int err = 0;
+
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
+ err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
+ &ring[AQ_VEC_TX_ID]);
+ if (err < 0)
+ goto err_exit;
+
+ err = self->aq_hw_ops->hw_ring_rx_start(self->aq_hw,
+ &ring[AQ_VEC_RX_ID]);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ napi_enable(&self->napi);
+
+err_exit:
+ return err;
+}
+
+void aq_vec_stop(struct aq_vec_s *self)
+{
+ struct aq_ring_s *ring = NULL;
+ unsigned int i = 0U;
+
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
+ self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
+ &ring[AQ_VEC_TX_ID]);
+
+ self->aq_hw_ops->hw_ring_rx_stop(self->aq_hw,
+ &ring[AQ_VEC_RX_ID]);
+ }
+
+ napi_disable(&self->napi);
+}
+
+void aq_vec_deinit(struct aq_vec_s *self)
+{
+ struct aq_ring_s *ring = NULL;
+ unsigned int i = 0U;
+
+ if (!self)
+ goto err_exit;
+
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
+ aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
+ aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
+ }
+
+err_exit:;
+}
+
+void aq_vec_free(struct aq_vec_s *self)
+{
+ if (!self)
+ goto err_exit;
+
+ netif_napi_del(&self->napi);
+
+ kfree(self);
+
+err_exit:;
+}
+
+void aq_vec_ring_free(struct aq_vec_s *self)
+{
+ struct aq_ring_s *ring = NULL;
+ unsigned int i = 0U;
+
+ if (!self)
+ goto err_exit;
+
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
+ aq_ring_free(&ring[AQ_VEC_TX_ID]);
+ if (i < self->rx_rings) {
+ xdp_rxq_info_unreg(&ring[AQ_VEC_RX_ID].xdp_rxq);
+ aq_ring_free(&ring[AQ_VEC_RX_ID]);
+ }
+ }
+
+ self->tx_rings = 0;
+ self->rx_rings = 0;
+err_exit:;
+}
+
+irqreturn_t aq_vec_isr(int irq, void *private)
+{
+ struct aq_vec_s *self = private;
+ int err = 0;
+
+ if (!self) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+ napi_schedule(&self->napi);
+
+err_exit:
+ return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
+}
+
+irqreturn_t aq_vec_isr_legacy(int irq, void *private)
+{
+ struct aq_vec_s *self = private;
+ u64 irq_mask = 0U;
+ int err;
+
+ if (!self)
+ return IRQ_NONE;
+ err = self->aq_hw_ops->hw_irq_read(self->aq_hw, &irq_mask);
+ if (err < 0)
+ return IRQ_NONE;
+
+ if (irq_mask) {
+ self->aq_hw_ops->hw_irq_disable(self->aq_hw,
+ 1U << self->aq_ring_param.vec_idx);
+ napi_schedule(&self->napi);
+ } else {
+ self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U);
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self)
+{
+ return &self->aq_ring_param.affinity_mask;
+}
+
+bool aq_vec_is_valid_tc(struct aq_vec_s *self, const unsigned int tc)
+{
+ return tc < self->rx_rings && tc < self->tx_rings;
+}
+
+unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data)
+{
+ unsigned int count;
+
+ if (!aq_vec_is_valid_tc(self, tc))
+ return 0;
+
+ count = aq_ring_fill_stats_data(&self->ring[tc][AQ_VEC_RX_ID], data);
+ count += aq_ring_fill_stats_data(&self->ring[tc][AQ_VEC_TX_ID], data + count);
+
+ return count;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
new file mode 100644
index 000000000..78fac609b
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+/* File aq_vec.h: Definition of common structures for vector of Rx and Tx rings.
+ * Declaration of functions for Rx and Tx rings.
+ */
+
+#ifndef AQ_VEC_H
+#define AQ_VEC_H
+
+#include "aq_common.h"
+#include "aq_nic.h"
+#include "aq_ring.h"
+#include "aq_hw.h"
+
+#include <linux/irqreturn.h>
+#include <linux/filter.h>
+#include <linux/netdevice.h>
+
+struct aq_hw_s;
+struct aq_hw_ops;
+struct aq_nic_s;
+struct aq_nic_cfg_s;
+struct aq_ring_stats_rx_s;
+struct aq_ring_stats_tx_s;
+
+irqreturn_t aq_vec_isr(int irq, void *private);
+irqreturn_t aq_vec_isr_legacy(int irq, void *private);
+struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
+ struct aq_nic_cfg_s *aq_nic_cfg);
+int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic,
+ unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg);
+int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
+ struct aq_hw_s *aq_hw);
+void aq_vec_deinit(struct aq_vec_s *self);
+void aq_vec_free(struct aq_vec_s *self);
+void aq_vec_ring_free(struct aq_vec_s *self);
+int aq_vec_start(struct aq_vec_s *self);
+void aq_vec_stop(struct aq_vec_s *self);
+cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self);
+bool aq_vec_is_valid_tc(struct aq_vec_s *self, const unsigned int tc);
+unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data);
+
+#endif /* AQ_VEC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
new file mode 100644
index 000000000..9dfd68f0f
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -0,0 +1,981 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+/* File hw_atl_a0.c: Definition of Atlantic hardware specific functions. */
+
+#include "../aq_hw.h"
+#include "../aq_hw_utils.h"
+#include "../aq_ring.h"
+#include "../aq_nic.h"
+#include "hw_atl_a0.h"
+#include "hw_atl_utils.h"
+#include "hw_atl_llh.h"
+#include "hw_atl_a0_internal.h"
+
+#define DEFAULT_A0_BOARD_BASIC_CAPABILITIES \
+ .is_64_dma = true, \
+ .op64bit = false, \
+ .msix_irqs = 4U, \
+ .irq_mask = ~0U, \
+ .vecs = HW_ATL_A0_RSS_MAX, \
+ .tcs_max = HW_ATL_A0_TC_MAX, \
+ .rxd_alignment = 1U, \
+ .rxd_size = HW_ATL_A0_RXD_SIZE, \
+ .rxds_max = HW_ATL_A0_MAX_RXD, \
+ .rxds_min = HW_ATL_A0_MIN_RXD, \
+ .txd_alignment = 1U, \
+ .txd_size = HW_ATL_A0_TXD_SIZE, \
+ .txds_max = HW_ATL_A0_MAX_TXD, \
+ .txds_min = HW_ATL_A0_MIN_RXD, \
+ .txhwb_alignment = 4096U, \
+ .tx_rings = HW_ATL_A0_TX_RINGS, \
+ .rx_rings = HW_ATL_A0_RX_RINGS, \
+ .hw_features = NETIF_F_HW_CSUM | \
+ NETIF_F_RXHASH | \
+ NETIF_F_RXCSUM | \
+ NETIF_F_SG | \
+ NETIF_F_TSO | \
+ NETIF_F_NTUPLE | \
+ NETIF_F_HW_VLAN_CTAG_FILTER, \
+ .hw_priv_flags = IFF_UNICAST_FLT, \
+ .flow_control = true, \
+ .mtu = HW_ATL_A0_MTU_JUMBO, \
+ .mac_regs_count = 88, \
+ .hw_alive_check_addr = 0x10U
+
+const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = {
+ DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_FIBRE,
+ .link_speed_msk = AQ_NIC_RATE_5G |
+ AQ_NIC_RATE_2G5 |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M,
+};
+
+const struct aq_hw_caps_s hw_atl_a0_caps_aqc107 = {
+ DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = AQ_NIC_RATE_10G |
+ AQ_NIC_RATE_5G |
+ AQ_NIC_RATE_2G5 |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M,
+};
+
+const struct aq_hw_caps_s hw_atl_a0_caps_aqc108 = {
+ DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = AQ_NIC_RATE_5G |
+ AQ_NIC_RATE_2G5 |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M,
+};
+
+const struct aq_hw_caps_s hw_atl_a0_caps_aqc109 = {
+ DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = AQ_NIC_RATE_2G5 |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M,
+};
+
+static int hw_atl_a0_hw_reset(struct aq_hw_s *self)
+{
+ int err = 0;
+ u32 val;
+
+ hw_atl_glb_glb_reg_res_dis_set(self, 1U);
+ hw_atl_pci_pci_reg_res_dis_set(self, 0U);
+ hw_atl_rx_rx_reg_res_dis_set(self, 0U);
+ hw_atl_tx_tx_reg_res_dis_set(self, 0U);
+
+ HW_ATL_FLUSH();
+ hw_atl_glb_soft_res_set(self, 1);
+
+ /* check 10 times by 1ms */
+ err = readx_poll_timeout_atomic(hw_atl_glb_soft_res_get,
+ self, val, val == 0,
+ 1000U, 10000U);
+ if (err < 0)
+ goto err_exit;
+
+ hw_atl_itr_irq_reg_res_dis_set(self, 0U);
+ hw_atl_itr_res_irq_set(self, 1U);
+
+ /* check 10 times by 1ms */
+ err = readx_poll_timeout_atomic(hw_atl_itr_res_irq_get,
+ self, val, val == 0,
+ 1000U, 10000U);
+ if (err < 0)
+ goto err_exit;
+
+ self->aq_fw_ops->set_state(self, MPI_RESET);
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
+{
+ bool is_rx_flow_control = false;
+ unsigned int i_priority = 0U;
+ u32 buff_size = 0U;
+ u32 tc = 0U;
+
+ /* TPS Descriptor rate init */
+ hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
+ hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
+
+ /* TPS VM init */
+ hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
+
+ /* TPS TC credits init */
+ hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
+ hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
+
+ hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0U, 0xFFF);
+ hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0U, 0x64);
+ hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0U, 0x50);
+ hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0U, 0x1E);
+
+ /* Tx buf size */
+ buff_size = HW_ATL_A0_TXBUF_MAX;
+
+ hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
+ (buff_size *
+ (1024 / 32U) * 66U) /
+ 100U, tc);
+ hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
+ (buff_size *
+ (1024 / 32U) * 50U) /
+ 100U, tc);
+
+ /* QoS Rx buf size per TC */
+ tc = 0;
+ is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->fc.req);
+ buff_size = HW_ATL_A0_RXBUF_MAX;
+
+ hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 66U) /
+ 100U, tc);
+ hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 50U) /
+ 100U, tc);
+ hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
+
+ /* QoS 802.1p priority -> TC mapping */
+ for (i_priority = 8U; i_priority--;)
+ hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ unsigned int addr = 0U;
+ unsigned int i = 0U;
+ int err = 0;
+ u32 val;
+
+ for (i = 10, addr = 0U; i--; ++addr) {
+ u32 key_data = cfg->is_rss ?
+ __swab32(rss_params->hash_secret_key[i]) : 0U;
+ hw_atl_rpf_rss_key_wr_data_set(self, key_data);
+ hw_atl_rpf_rss_key_addr_set(self, addr);
+ hw_atl_rpf_rss_key_wr_en_set(self, 1U);
+ err = readx_poll_timeout_atomic(hw_atl_rpf_rss_key_wr_en_get,
+ self, val, val == 0,
+ 1000U, 10000U);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params)
+{
+ u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
+ u8 *indirection_table = rss_params->indirection_table;
+ u16 bitary[1 + (HW_ATL_A0_RSS_REDIRECTION_MAX *
+ HW_ATL_A0_RSS_REDIRECTION_BITS / 16U)];
+ int err = 0;
+ u32 i = 0U;
+ u32 val;
+
+ memset(bitary, 0, sizeof(bitary));
+
+ for (i = HW_ATL_A0_RSS_REDIRECTION_MAX; i--; ) {
+ (*(u32 *)(bitary + ((i * 3U) / 16U))) |=
+ ((indirection_table[i] % num_rss_queues) <<
+ ((i * 3U) & 0xFU));
+ }
+
+ for (i = ARRAY_SIZE(bitary); i--;) {
+ hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
+ hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
+ hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
+ err = readx_poll_timeout_atomic(hw_atl_rpf_rss_redir_wr_en_get,
+ self, val, val == 0,
+ 1000U, 10000U);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self,
+ struct aq_nic_cfg_s *aq_nic_cfg)
+{
+ /* TX checksums offloads*/
+ hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
+ hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
+
+ /* RX checksums offloads*/
+ hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
+ hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
+
+ /* LSO offloads*/
+ hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self)
+{
+ hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
+
+ /* Tx interrupts */
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+
+ /* misc */
+ aq_hw_write_reg(self, 0x00007040U, 0x00000000U);
+ hw_atl_tdm_tx_dca_en_set(self, 0U);
+ hw_atl_tdm_tx_dca_mode_set(self, 0U);
+
+ hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ int i;
+
+ /* Rx TC/RSS number config */
+ hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
+
+ /* Rx flow control */
+ hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
+
+ /* RSS Ring selection */
+ hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
+ 0xB3333333U : 0x00000000U);
+
+ /* Multicast filters */
+ for (i = HW_ATL_A0_MAC_MAX; i--;) {
+ hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
+ hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
+ }
+
+ hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
+ hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
+
+ /* Vlan filters */
+ hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
+ hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
+ hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
+
+ /* Rx Interrupts */
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+
+ /* misc */
+ hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
+ hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
+
+ hw_atl_rdm_rx_dca_en_set(self, 0U);
+ hw_atl_rdm_rx_dca_mode_set(self, 0U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, const u8 *mac_addr)
+{
+ unsigned int h = 0U;
+ unsigned int l = 0U;
+ int err = 0;
+
+ if (!mac_addr) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+
+ h = (mac_addr[0] << 8) | (mac_addr[1]);
+ l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+ (mac_addr[4] << 8) | mac_addr[5];
+
+ hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC);
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC);
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC);
+ hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_A0_MAC);
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_a0_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
+{
+ static u32 aq_hw_atl_igcr_table_[4][2] = {
+ [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
+ [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U },
+ [AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
+ [AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
+ };
+ struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
+ int err = 0;
+
+ hw_atl_a0_hw_init_tx_path(self);
+ hw_atl_a0_hw_init_rx_path(self);
+
+ hw_atl_a0_hw_mac_addr_set(self, mac_addr);
+
+ self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
+ self->aq_fw_ops->set_state(self, MPI_INIT);
+
+ hw_atl_reg_tx_dma_debug_ctl_set(self, 0x800000b8U);
+ hw_atl_reg_tx_dma_debug_ctl_set(self, 0x000000b8U);
+
+ hw_atl_a0_hw_qos_set(self);
+ hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
+ hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
+
+ /* Reset link status and read out initial hardware counters */
+ self->aq_link_status.mbps = 0;
+ self->aq_fw_ops->update_stats(self);
+
+ err = aq_hw_err_from_flags(self);
+ if (err < 0)
+ goto err_exit;
+
+ /* Interrupts */
+ hw_atl_reg_irq_glb_ctl_set(self,
+ aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
+ [(aq_nic_cfg->vecs > 1U) ? 1 : 0]);
+
+ hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
+
+ /* Interrupts */
+ hw_atl_reg_gen_irq_map_set(self,
+ ((HW_ATL_A0_ERR_INT << 0x18) | (1U << 0x1F)) |
+ ((HW_ATL_A0_ERR_INT << 0x10) | (1U << 0x17)) |
+ ((HW_ATL_A0_ERR_INT << 8) | (1U << 0xF)) |
+ ((HW_ATL_A0_ERR_INT) | (1U << 0x7)), 0U);
+
+ hw_atl_a0_hw_offload_set(self, aq_nic_cfg);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_start(struct aq_hw_s *self)
+{
+ hw_atl_tpb_tx_buff_en_set(self, 1);
+ hw_atl_rpb_rx_buff_en_set(self, 1);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+
+ return 0;
+}
+
+static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
+ struct aq_ring_s *ring,
+ unsigned int frags)
+{
+ struct aq_ring_buff_s *buff = NULL;
+ struct hw_atl_txd_s *txd = NULL;
+ unsigned int buff_pa_len = 0U;
+ unsigned int frag_count = 0U;
+ unsigned int pkt_len = 0U;
+ bool is_gso = false;
+
+ buff = &ring->buff_ring[ring->sw_tail];
+ pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt;
+
+ for (frag_count = 0; frag_count < frags; frag_count++) {
+ txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail *
+ HW_ATL_A0_TXD_SIZE];
+ txd->ctl = 0;
+ txd->ctl2 = 0;
+ txd->buf_addr = 0;
+
+ buff = &ring->buff_ring[ring->sw_tail];
+
+ if (buff->is_gso_tcp) {
+ txd->ctl |= (buff->len_l3 << 31) |
+ (buff->len_l2 << 24) |
+ HW_ATL_A0_TXD_CTL_CMD_TCP |
+ HW_ATL_A0_TXD_CTL_DESC_TYPE_TXC;
+ txd->ctl2 |= (buff->mss << 16) |
+ (buff->len_l4 << 8) |
+ (buff->len_l3 >> 1);
+
+ pkt_len -= (buff->len_l4 +
+ buff->len_l3 +
+ buff->len_l2);
+ is_gso = true;
+
+ if (buff->is_ipv6)
+ txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_IPV6;
+ } else {
+ buff_pa_len = buff->len;
+
+ txd->buf_addr = buff->pa;
+ txd->ctl |= (HW_ATL_A0_TXD_CTL_BLEN &
+ ((u32)buff_pa_len << 4));
+ txd->ctl |= HW_ATL_A0_TXD_CTL_DESC_TYPE_TXD;
+ /* PAY_LEN */
+ txd->ctl2 |= HW_ATL_A0_TXD_CTL2_LEN & (pkt_len << 14);
+
+ if (is_gso) {
+ txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_LSO;
+ txd->ctl2 |= HW_ATL_A0_TXD_CTL2_CTX_EN;
+ }
+
+ /* Tx checksum offloads */
+ if (buff->is_ip_cso)
+ txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_IPCSO;
+
+ if (buff->is_udp_cso || buff->is_tcp_cso)
+ txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_TUCSO;
+
+ if (unlikely(buff->is_eop)) {
+ txd->ctl |= HW_ATL_A0_TXD_CTL_EOP;
+ txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_WB;
+ is_gso = false;
+ }
+ }
+
+ ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail);
+ }
+
+ hw_atl_a0_hw_tx_ring_tail_update(self, ring);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param)
+{
+ u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
+ u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
+
+ hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
+
+ hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+
+ hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
+ aq_ring->idx);
+
+ hw_atl_reg_rx_dma_desc_base_addressmswset(self,
+ dma_desc_addr_msw,
+ aq_ring->idx);
+
+ hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+
+ hw_atl_rdm_rx_desc_data_buff_size_set(self,
+ aq_ring->frame_max / 1024U,
+ aq_ring->idx);
+
+ hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+ hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
+
+ /* Rx ring set mode */
+
+ /* Mapping interrupt vector */
+ hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
+
+ hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param)
+{
+ u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
+ u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
+
+ hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
+ aq_ring->idx);
+
+ hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
+ aq_ring->idx);
+
+ hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+
+ hw_atl_a0_hw_tx_ring_tail_update(self, aq_ring);
+
+ /* Set Tx threshold */
+ hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
+
+ /* Mapping interrupt vector */
+ hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx);
+
+ hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s *self,
+ struct aq_ring_s *ring,
+ unsigned int sw_tail_old)
+{
+ for (; sw_tail_old != ring->sw_tail;
+ sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) {
+ struct hw_atl_rxd_s *rxd =
+ (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old *
+ HW_ATL_A0_RXD_SIZE];
+
+ struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old];
+
+ rxd->buf_addr = buff->pa;
+ rxd->hdr_addr = 0U;
+ }
+
+ hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ unsigned int hw_head = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
+ int err = 0;
+
+ if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+ err = -ENXIO;
+ goto err_exit;
+ }
+ ring->hw_head = hw_head;
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ for (; ring->hw_head != ring->sw_tail;
+ ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
+ struct aq_ring_buff_s *buff = NULL;
+ struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
+ &ring->dx_ring[ring->hw_head * HW_ATL_A0_RXD_SIZE];
+
+ unsigned int is_err = 1U;
+ unsigned int is_rx_check_sum_enabled = 0U;
+ unsigned int pkt_type = 0U;
+
+ if (!(rxd_wb->status & 0x5U)) { /* RxD is not done */
+ if ((1U << 4) &
+ hw_atl_reg_rx_dma_desc_status_get(self, ring->idx)) {
+ hw_atl_rdm_rx_desc_en_set(self, false, ring->idx);
+ hw_atl_rdm_rx_desc_res_set(self, true, ring->idx);
+ hw_atl_rdm_rx_desc_res_set(self, false, ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, true, ring->idx);
+ }
+
+ if (ring->hw_head ||
+ (hw_atl_rdm_rx_desc_head_ptr_get(self,
+ ring->idx) < 2U)) {
+ break;
+ } else if (!(rxd_wb->status & 0x1U)) {
+ struct hw_atl_rxd_wb_s *rxd_wb1 =
+ (struct hw_atl_rxd_wb_s *)
+ (&ring->dx_ring[(1U) *
+ HW_ATL_A0_RXD_SIZE]);
+
+ if ((rxd_wb1->status & 0x1U)) {
+ rxd_wb->pkt_len = 1514U;
+ rxd_wb->status = 3U;
+ } else {
+ break;
+ }
+ }
+ }
+
+ buff = &ring->buff_ring[ring->hw_head];
+
+ if (0x3U != (rxd_wb->status & 0x3U))
+ rxd_wb->status |= 4;
+
+ is_err = (0x0000001CU & rxd_wb->status);
+ is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19);
+ pkt_type = 0xFFU & (rxd_wb->type >> 4);
+
+ if (is_rx_check_sum_enabled) {
+ if (0x0U == (pkt_type & 0x3U))
+ buff->is_ip_cso = (is_err & 0x08U) ? 0 : 1;
+
+ if (0x4U == (pkt_type & 0x1CU))
+ buff->is_udp_cso = (is_err & 0x10U) ? 0 : 1;
+ else if (0x0U == (pkt_type & 0x1CU))
+ buff->is_tcp_cso = (is_err & 0x10U) ? 0 : 1;
+
+ /* Checksum offload workaround for small packets */
+ if (rxd_wb->pkt_len <= 60) {
+ buff->is_ip_cso = 0U;
+ buff->is_cso_err = 0U;
+ }
+ }
+
+ is_err &= ~0x18U;
+ is_err &= ~0x04U;
+
+ if (is_err || rxd_wb->type & 0x1000U) {
+ /* status error or DMA error */
+ buff->is_error = 1U;
+ } else {
+ if (self->aq_nic_cfg->is_rss) {
+ /* last 4 byte */
+ u16 rss_type = rxd_wb->type & 0xFU;
+
+ if (rss_type && rss_type < 0x8U) {
+ buff->is_hash_l4 = (rss_type == 0x4 ||
+ rss_type == 0x5);
+ buff->rss_hash = rxd_wb->rss_hash;
+ }
+ }
+
+ if (HW_ATL_A0_RXD_WB_STAT2_EOP & rxd_wb->status) {
+ buff->len = rxd_wb->pkt_len %
+ ring->frame_max;
+ buff->len = buff->len ?
+ buff->len : ring->frame_max;
+ buff->next = 0U;
+ buff->is_eop = 1U;
+ } else {
+ /* jumbo */
+ buff->next = aq_ring_next_dx(ring,
+ ring->hw_head);
+ ++ring->stats.rx.jumbo_packets;
+ }
+ }
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
+{
+ hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask) |
+ (1U << HW_ATL_A0_ERR_INT));
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
+{
+ hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
+ hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
+
+ if ((1U << 16) & hw_atl_reg_gen_irq_status_get(self))
+ atomic_inc(&self->dpc);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
+{
+ *mask = hw_atl_itr_irq_statuslsw_get(self);
+
+ return aq_hw_err_from_flags(self);
+}
+
+#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
+
+static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
+ unsigned int packet_filter)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ unsigned int i = 0U;
+
+ hw_atl_rpfl2promiscuous_mode_en_set(self,
+ IS_FILTER_ENABLED(IFF_PROMISC));
+ hw_atl_rpfl2multicast_flr_en_set(self,
+ IS_FILTER_ENABLED(IFF_MULTICAST), 0);
+ hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
+
+ cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
+
+ for (i = HW_ATL_A0_MAC_MIN; i < HW_ATL_A0_MAC_MAX; ++i)
+ hw_atl_rpfl2_uc_flr_en_set(self,
+ (cfg->is_mc_list_enabled &&
+ (i <= cfg->mc_list_count)) ? 1U : 0U,
+ i);
+
+ return aq_hw_err_from_flags(self);
+}
+
+#undef IS_FILTER_ENABLED
+
+static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
+ u8 ar_mac
+ [AQ_HW_MULTICAST_ADDRESS_MAX]
+ [ETH_ALEN],
+ u32 count)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ int err = 0;
+
+ if (count > (HW_ATL_A0_MAC_MAX - HW_ATL_A0_MAC_MIN)) {
+ err = -EBADRQC;
+ goto err_exit;
+ }
+ for (cfg->mc_list_count = 0U; cfg->mc_list_count < count; ++cfg->mc_list_count) {
+ u32 i = cfg->mc_list_count;
+ u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
+ u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
+ (ar_mac[i][4] << 8) | ar_mac[i][5];
+
+ hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i);
+
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self,
+ l,
+ HW_ATL_A0_MAC_MIN + i);
+
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self,
+ h,
+ HW_ATL_A0_MAC_MIN + i);
+
+ hw_atl_rpfl2_uc_flr_en_set(self,
+ (cfg->is_mc_list_enabled),
+ HW_ATL_A0_MAC_MIN + i);
+ }
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self)
+{
+ unsigned int i = 0U;
+ u32 itr_rx;
+
+ if (self->aq_nic_cfg->itr) {
+ if (self->aq_nic_cfg->itr != AQ_CFG_INTERRUPT_MODERATION_AUTO) {
+ u32 itr_ = (self->aq_nic_cfg->itr >> 1);
+
+ itr_ = min(AQ_CFG_IRQ_MASK, itr_);
+
+ itr_rx = 0x80000000U | (itr_ << 0x10);
+ } else {
+ u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U);
+
+ if (n < self->aq_link_status.mbps) {
+ itr_rx = 0U;
+ } else {
+ static unsigned int hw_timers_tbl_[] = {
+ 0x01CU, /* 10Gbit */
+ 0x039U, /* 5Gbit */
+ 0x039U, /* 5Gbit 5GS */
+ 0x073U, /* 2.5Gbit */
+ 0x120U, /* 1Gbit */
+ 0x1FFU, /* 100Mbit */
+ };
+
+ unsigned int speed_index =
+ hw_atl_utils_mbps_2_speed_index(
+ self->aq_link_status.mbps);
+
+ itr_rx = 0x80000000U |
+ (hw_timers_tbl_[speed_index] << 0x10U);
+ }
+
+ aq_hw_write_reg(self, 0x00002A00U, 0x40000000U);
+ aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U);
+ }
+ } else {
+ itr_rx = 0U;
+ }
+
+ for (i = HW_ATL_A0_RINGS_MAX; i--;)
+ hw_atl_reg_irq_thr_set(self, itr_rx, i);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_stop(struct aq_hw_s *self)
+{
+ hw_atl_a0_hw_irq_disable(self, HW_ATL_A0_INT_MASK);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_fl3l4_clear(struct aq_hw_s *self,
+ struct aq_rx_filter_l3l4 *data)
+{
+ u8 location = data->location;
+
+ if (!data->is_ipv6) {
+ hw_atl_rpfl3l4_cmd_clear(self, location);
+ hw_atl_rpf_l4_spd_set(self, 0U, location);
+ hw_atl_rpf_l4_dpd_set(self, 0U, location);
+ hw_atl_rpfl3l4_ipv4_src_addr_clear(self, location);
+ hw_atl_rpfl3l4_ipv4_dest_addr_clear(self, location);
+ } else {
+ int i;
+
+ for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
+ hw_atl_rpfl3l4_cmd_clear(self, location + i);
+ hw_atl_rpf_l4_spd_set(self, 0U, location + i);
+ hw_atl_rpf_l4_dpd_set(self, 0U, location + i);
+ }
+ hw_atl_rpfl3l4_ipv6_src_addr_clear(self, location);
+ hw_atl_rpfl3l4_ipv6_dest_addr_clear(self, location);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_fl3l4_set(struct aq_hw_s *self,
+ struct aq_rx_filter_l3l4 *data)
+{
+ u8 location = data->location;
+
+ hw_atl_a0_hw_fl3l4_clear(self, data);
+
+ if (data->cmd) {
+ if (!data->is_ipv6) {
+ hw_atl_rpfl3l4_ipv4_dest_addr_set(self,
+ location,
+ data->ip_dst[0]);
+ hw_atl_rpfl3l4_ipv4_src_addr_set(self,
+ location,
+ data->ip_src[0]);
+ } else {
+ hw_atl_rpfl3l4_ipv6_dest_addr_set(self,
+ location,
+ data->ip_dst);
+ hw_atl_rpfl3l4_ipv6_src_addr_set(self,
+ location,
+ data->ip_src);
+ }
+ }
+ hw_atl_rpf_l4_dpd_set(self, data->p_dst, location);
+ hw_atl_rpf_l4_spd_set(self, data->p_src, location);
+ hw_atl_rpfl3l4_cmd_set(self, location, data->cmd);
+
+ return aq_hw_err_from_flags(self);
+}
+
+const struct aq_hw_ops hw_atl_ops_a0 = {
+ .hw_soft_reset = hw_atl_utils_soft_reset,
+ .hw_prepare = hw_atl_utils_initfw,
+ .hw_set_mac_address = hw_atl_a0_hw_mac_addr_set,
+ .hw_init = hw_atl_a0_hw_init,
+ .hw_reset = hw_atl_a0_hw_reset,
+ .hw_start = hw_atl_a0_hw_start,
+ .hw_ring_tx_start = hw_atl_a0_hw_ring_tx_start,
+ .hw_ring_tx_stop = hw_atl_a0_hw_ring_tx_stop,
+ .hw_ring_rx_start = hw_atl_a0_hw_ring_rx_start,
+ .hw_ring_rx_stop = hw_atl_a0_hw_ring_rx_stop,
+ .hw_stop = hw_atl_a0_hw_stop,
+
+ .hw_ring_tx_xmit = hw_atl_a0_hw_ring_tx_xmit,
+ .hw_ring_tx_head_update = hw_atl_a0_hw_ring_tx_head_update,
+
+ .hw_ring_rx_receive = hw_atl_a0_hw_ring_rx_receive,
+ .hw_ring_rx_fill = hw_atl_a0_hw_ring_rx_fill,
+
+ .hw_irq_enable = hw_atl_a0_hw_irq_enable,
+ .hw_irq_disable = hw_atl_a0_hw_irq_disable,
+ .hw_irq_read = hw_atl_a0_hw_irq_read,
+
+ .hw_ring_rx_init = hw_atl_a0_hw_ring_rx_init,
+ .hw_ring_tx_init = hw_atl_a0_hw_ring_tx_init,
+ .hw_packet_filter_set = hw_atl_a0_hw_packet_filter_set,
+ .hw_filter_l3l4_set = hw_atl_a0_hw_fl3l4_set,
+ .hw_multicast_list_set = hw_atl_a0_hw_multicast_list_set,
+ .hw_interrupt_moderation_set = hw_atl_a0_hw_interrupt_moderation_set,
+ .hw_rss_set = hw_atl_a0_hw_rss_set,
+ .hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set,
+ .hw_get_regs = hw_atl_utils_hw_get_regs,
+ .hw_get_hw_stats = hw_atl_utils_get_hw_stats,
+ .hw_get_fw_version = hw_atl_utils_get_fw_version,
+};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h
new file mode 100644
index 000000000..1a294ad4d
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ */
+
+/* File hw_atl_a0.h: Declaration of abstract interface for Atlantic hardware
+ * specific functions.
+ */
+
+#ifndef HW_ATL_A0_H
+#define HW_ATL_A0_H
+
+#include "../aq_common.h"
+
+extern const struct aq_hw_caps_s hw_atl_a0_caps_aqc100;
+extern const struct aq_hw_caps_s hw_atl_a0_caps_aqc107;
+extern const struct aq_hw_caps_s hw_atl_a0_caps_aqc108;
+extern const struct aq_hw_caps_s hw_atl_a0_caps_aqc109;
+
+extern const struct aq_hw_ops hw_atl_ops_a0;
+
+#endif /* HW_ATL_A0_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
new file mode 100644
index 000000000..1b9b2e560
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ */
+
+/* File hw_atl_a0_internal.h: Definition of Atlantic A0 chip specific
+ * constants.
+ */
+
+#ifndef HW_ATL_A0_INTERNAL_H
+#define HW_ATL_A0_INTERNAL_H
+
+#include "../aq_common.h"
+
+#define HW_ATL_A0_MTU_JUMBO 9014U
+
+#define HW_ATL_A0_TX_RINGS 4U
+#define HW_ATL_A0_RX_RINGS 4U
+
+#define HW_ATL_A0_RINGS_MAX 32U
+#define HW_ATL_A0_TXD_SIZE 16U
+#define HW_ATL_A0_RXD_SIZE 16U
+
+#define HW_ATL_A0_MAC 0U
+#define HW_ATL_A0_MAC_MIN 1U
+#define HW_ATL_A0_MAC_MAX 33U
+
+/* interrupts */
+#define HW_ATL_A0_ERR_INT 8U
+#define HW_ATL_A0_INT_MASK 0xFFFFFFFFU
+
+#define HW_ATL_A0_TXD_CTL2_LEN 0xFFFFC000U
+#define HW_ATL_A0_TXD_CTL2_CTX_EN 0x00002000U
+#define HW_ATL_A0_TXD_CTL2_CTX_IDX 0x00001000U
+
+#define HW_ATL_A0_TXD_CTL_DESC_TYPE_TXD 0x00000001U
+#define HW_ATL_A0_TXD_CTL_DESC_TYPE_TXC 0x00000002U
+#define HW_ATL_A0_TXD_CTL_BLEN 0x000FFFF0U
+#define HW_ATL_A0_TXD_CTL_DD 0x00100000U
+#define HW_ATL_A0_TXD_CTL_EOP 0x00200000U
+
+#define HW_ATL_A0_TXD_CTL_CMD_X 0x3FC00000U
+
+#define HW_ATL_A0_TXD_CTL_CMD_VLAN BIT(22)
+#define HW_ATL_A0_TXD_CTL_CMD_FCS BIT(23)
+#define HW_ATL_A0_TXD_CTL_CMD_IPCSO BIT(24)
+#define HW_ATL_A0_TXD_CTL_CMD_TUCSO BIT(25)
+#define HW_ATL_A0_TXD_CTL_CMD_LSO BIT(26)
+#define HW_ATL_A0_TXD_CTL_CMD_WB BIT(27)
+#define HW_ATL_A0_TXD_CTL_CMD_VXLAN BIT(28)
+
+#define HW_ATL_A0_TXD_CTL_CMD_IPV6 BIT(21)
+#define HW_ATL_A0_TXD_CTL_CMD_TCP BIT(22)
+
+#define HW_ATL_A0_MPI_CONTROL_ADR 0x0368U
+#define HW_ATL_A0_MPI_STATE_ADR 0x036CU
+
+#define HW_ATL_A0_MPI_SPEED_MSK 0xFFFFU
+#define HW_ATL_A0_MPI_SPEED_SHIFT 16U
+
+#define HW_ATL_A0_TXBUF_MAX 160U
+#define HW_ATL_A0_RXBUF_MAX 320U
+
+#define HW_ATL_A0_RSS_REDIRECTION_MAX 64U
+#define HW_ATL_A0_RSS_REDIRECTION_BITS 3U
+
+#define HW_ATL_A0_TC_MAX 1U
+#define HW_ATL_A0_RSS_MAX 8U
+
+#define HW_ATL_A0_FW_SEMA_RAM 0x2U
+
+#define HW_ATL_A0_RXD_DD 0x1U
+#define HW_ATL_A0_RXD_NCEA0 0x1U
+
+#define HW_ATL_A0_RXD_WB_STAT2_EOP 0x0002U
+
+#define HW_ATL_A0_UCP_0X370_REG 0x370U
+
+#define HW_ATL_A0_FW_VER_EXPECTED 0x01050006U
+
+#define HW_ATL_A0_MIN_RXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE))
+#define HW_ATL_A0_MIN_TXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE))
+
+#define HW_ATL_A0_MAX_RXD 8184U
+#define HW_ATL_A0_MAX_TXD 8184U
+
+#endif /* HW_ATL_A0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
new file mode 100644
index 000000000..54e70f07b
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -0,0 +1,1715 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+/* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */
+
+#include "../aq_hw.h"
+#include "../aq_hw_utils.h"
+#include "../aq_ring.h"
+#include "../aq_nic.h"
+#include "../aq_phy.h"
+#include "hw_atl_b0.h"
+#include "hw_atl_utils.h"
+#include "hw_atl_llh.h"
+#include "hw_atl_b0_internal.h"
+#include "hw_atl_llh_internal.h"
+
+#define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \
+ .is_64_dma = true, \
+ .op64bit = false, \
+ .msix_irqs = 8U, \
+ .irq_mask = ~0U, \
+ .vecs = HW_ATL_B0_RSS_MAX, \
+ .tcs_max = HW_ATL_B0_TC_MAX, \
+ .rxd_alignment = 1U, \
+ .rxd_size = HW_ATL_B0_RXD_SIZE, \
+ .rxds_max = HW_ATL_B0_MAX_RXD, \
+ .rxds_min = HW_ATL_B0_MIN_RXD, \
+ .txd_alignment = 1U, \
+ .txd_size = HW_ATL_B0_TXD_SIZE, \
+ .txds_max = HW_ATL_B0_MAX_TXD, \
+ .txds_min = HW_ATL_B0_MIN_TXD, \
+ .txhwb_alignment = 4096U, \
+ .tx_rings = HW_ATL_B0_TX_RINGS, \
+ .rx_rings = HW_ATL_B0_RX_RINGS, \
+ .hw_features = NETIF_F_HW_CSUM | \
+ NETIF_F_RXCSUM | \
+ NETIF_F_RXHASH | \
+ NETIF_F_SG | \
+ NETIF_F_TSO | \
+ NETIF_F_TSO6 | \
+ NETIF_F_LRO | \
+ NETIF_F_NTUPLE | \
+ NETIF_F_HW_VLAN_CTAG_FILTER | \
+ NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_GSO_UDP_L4 | \
+ NETIF_F_GSO_PARTIAL | \
+ NETIF_F_HW_TC, \
+ .hw_priv_flags = IFF_UNICAST_FLT, \
+ .flow_control = true, \
+ .mtu = HW_ATL_B0_MTU_JUMBO, \
+ .mac_regs_count = 88, \
+ .hw_alive_check_addr = 0x10U
+
+const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
+ DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_FIBRE,
+ .link_speed_msk = AQ_NIC_RATE_10G |
+ AQ_NIC_RATE_5G |
+ AQ_NIC_RATE_2G5 |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M,
+};
+
+const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = {
+ DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = AQ_NIC_RATE_10G |
+ AQ_NIC_RATE_5G |
+ AQ_NIC_RATE_2G5 |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M,
+};
+
+const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = {
+ DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = AQ_NIC_RATE_5G |
+ AQ_NIC_RATE_2G5 |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M,
+};
+
+const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = {
+ DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = AQ_NIC_RATE_2G5 |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M,
+};
+
+const struct aq_hw_caps_s hw_atl_b0_caps_aqc111 = {
+ DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = AQ_NIC_RATE_5G |
+ AQ_NIC_RATE_2G5 |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M,
+ .quirks = AQ_NIC_QUIRK_BAD_PTP,
+};
+
+const struct aq_hw_caps_s hw_atl_b0_caps_aqc112 = {
+ DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = AQ_NIC_RATE_2G5 |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M,
+ .quirks = AQ_NIC_QUIRK_BAD_PTP,
+};
+
+static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
+{
+ int err = 0;
+
+ err = hw_atl_utils_soft_reset(self);
+ if (err)
+ return err;
+
+ self->aq_fw_ops->set_state(self, MPI_RESET);
+
+ err = aq_hw_err_from_flags(self);
+
+ return err;
+}
+
+int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
+{
+ hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc);
+
+ return 0;
+}
+
+static int hw_atl_b0_tc_ptp_set(struct aq_hw_s *self)
+{
+ /* Init TC2 for PTP_TX */
+ hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_TXBUF_SIZE,
+ AQ_HW_PTP_TC);
+
+ /* Init TC2 for PTP_RX */
+ hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_RXBUF_SIZE,
+ AQ_HW_PTP_TC);
+ /* No flow control for PTP */
+ hw_atl_rpb_rx_xoff_en_per_tc_set(self, 0U, AQ_HW_PTP_TC);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ u32 tx_buff_size = HW_ATL_B0_TXBUF_MAX;
+ u32 rx_buff_size = HW_ATL_B0_RXBUF_MAX;
+ unsigned int prio = 0U;
+ u32 tc = 0U;
+
+ if (cfg->is_ptp) {
+ tx_buff_size -= HW_ATL_B0_PTP_TXBUF_SIZE;
+ rx_buff_size -= HW_ATL_B0_PTP_RXBUF_SIZE;
+ }
+
+ /* TPS Descriptor rate init */
+ hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
+ hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
+
+ /* TPS VM init */
+ hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
+
+ tx_buff_size /= cfg->tcs;
+ rx_buff_size /= cfg->tcs;
+ for (tc = 0; tc < cfg->tcs; tc++) {
+ u32 threshold = 0U;
+
+ /* Tx buf size TC0 */
+ hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, tx_buff_size, tc);
+
+ threshold = (tx_buff_size * (1024 / 32U) * 66U) / 100U;
+ hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, threshold, tc);
+
+ threshold = (tx_buff_size * (1024 / 32U) * 50U) / 100U;
+ hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, threshold, tc);
+
+ /* QoS Rx buf size per TC */
+ hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, rx_buff_size, tc);
+
+ threshold = (rx_buff_size * (1024U / 32U) * 66U) / 100U;
+ hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, threshold, tc);
+
+ threshold = (rx_buff_size * (1024U / 32U) * 50U) / 100U;
+ hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, threshold, tc);
+
+ hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc);
+ }
+
+ if (cfg->is_ptp)
+ hw_atl_b0_tc_ptp_set(self);
+
+ /* QoS 802.1p priority -> TC mapping */
+ for (prio = 0; prio < 8; ++prio)
+ hw_atl_rpf_rpb_user_priority_tc_map_set(self, prio,
+ cfg->prio_tc_map[prio]);
+
+ return aq_hw_err_from_flags(self);
+}
+
+int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ unsigned int addr = 0U;
+ unsigned int i = 0U;
+ int err = 0;
+ u32 val;
+
+ for (i = 10, addr = 0U; i--; ++addr) {
+ u32 key_data = cfg->is_rss ?
+ __swab32(rss_params->hash_secret_key[i]) : 0U;
+ hw_atl_rpf_rss_key_wr_data_set(self, key_data);
+ hw_atl_rpf_rss_key_addr_set(self, addr);
+ hw_atl_rpf_rss_key_wr_en_set(self, 1U);
+ err = readx_poll_timeout_atomic(hw_atl_rpf_rss_key_wr_en_get,
+ self, val, val == 0,
+ 1000U, 10000U);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params)
+{
+ u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
+ u8 *indirection_table = rss_params->indirection_table;
+ u16 bitary[1 + (HW_ATL_B0_RSS_REDIRECTION_MAX *
+ HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)];
+ int err = 0;
+ u32 i = 0U;
+ u32 val;
+
+ memset(bitary, 0, sizeof(bitary));
+
+ for (i = HW_ATL_B0_RSS_REDIRECTION_MAX; i--;) {
+ (*(u32 *)(bitary + ((i * 3U) / 16U))) |=
+ ((indirection_table[i] % num_rss_queues) <<
+ ((i * 3U) & 0xFU));
+ }
+
+ for (i = ARRAY_SIZE(bitary); i--;) {
+ hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
+ hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
+ hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
+ err = readx_poll_timeout_atomic(hw_atl_rpf_rss_redir_wr_en_get,
+ self, val, val == 0,
+ 1000U, 10000U);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
+ struct aq_nic_cfg_s *aq_nic_cfg)
+{
+ u64 rxcsum = !!(aq_nic_cfg->features & NETIF_F_RXCSUM);
+ unsigned int i;
+
+ /* TX checksums offloads*/
+ hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
+ hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
+
+ /* RX checksums offloads*/
+ hw_atl_rpo_ipv4header_crc_offload_en_set(self, rxcsum);
+ hw_atl_rpo_tcp_udp_crc_offload_en_set(self, rxcsum);
+
+ /* LSO offloads*/
+ hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
+
+ /* Outer VLAN tag offload */
+ hw_atl_rpo_outer_vlan_tag_mode_set(self, 1U);
+
+ /* LRO offloads */
+ {
+ unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
+ ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
+ ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
+
+ for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
+ hw_atl_rpo_lro_max_num_of_descriptors_set(self, val, i);
+
+ hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU);
+ hw_atl_rpo_lro_inactive_interval_set(self, 0);
+ /* the LRO timebase divider is 5 uS (0x61a),
+ * which is multiplied by 50(0x32)
+ * to get a maximum coalescing interval of 250 uS,
+ * which is the default value
+ */
+ hw_atl_rpo_lro_max_coalescing_interval_set(self, 50);
+
+ hw_atl_rpo_lro_qsessions_lim_set(self, 1U);
+
+ hw_atl_rpo_lro_total_desc_lim_set(self, 2U);
+
+ hw_atl_rpo_lro_patch_optimization_en_set(self, 1U);
+
+ hw_atl_rpo_lro_min_pay_of_first_pkt_set(self, 10U);
+
+ hw_atl_rpo_lro_pkt_lim_set(self, 1U);
+
+ hw_atl_rpo_lro_en_set(self,
+ aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
+ hw_atl_itr_rsc_en_set(self,
+ aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
+
+ hw_atl_itr_rsc_delay_set(self, 1U);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_init_tx_tc_rate_limit(struct aq_hw_s *self)
+{
+ static const u32 max_weight = BIT(HW_ATL_TPS_DATA_TCTWEIGHT_WIDTH) - 1;
+ /* Scale factor is based on the number of bits in fractional portion */
+ static const u32 scale = BIT(HW_ATL_TPS_DESC_RATE_Y_WIDTH);
+ static const u32 frac_msk = HW_ATL_TPS_DESC_RATE_Y_MSK >>
+ HW_ATL_TPS_DESC_RATE_Y_SHIFT;
+ const u32 link_speed = self->aq_link_status.mbps;
+ struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg;
+ unsigned long num_min_rated_tcs = 0;
+ u32 tc_weight[AQ_CFG_TCS_MAX];
+ u32 fixed_max_credit;
+ u8 min_rate_msk = 0;
+ u32 sum_weight = 0;
+ int tc;
+
+ /* By default max_credit is based upon MTU (in unit of 64b) */
+ fixed_max_credit = nic_cfg->aq_hw_caps->mtu / 64;
+
+ if (link_speed) {
+ min_rate_msk = nic_cfg->tc_min_rate_msk &
+ (BIT(nic_cfg->tcs) - 1);
+ num_min_rated_tcs = hweight8(min_rate_msk);
+ }
+
+ /* First, calculate weights where min_rate is specified */
+ if (num_min_rated_tcs) {
+ for (tc = 0; tc != nic_cfg->tcs; tc++) {
+ if (!nic_cfg->tc_min_rate[tc]) {
+ tc_weight[tc] = 0;
+ continue;
+ }
+
+ tc_weight[tc] = (-1L + link_speed +
+ nic_cfg->tc_min_rate[tc] *
+ max_weight) /
+ link_speed;
+ tc_weight[tc] = min(tc_weight[tc], max_weight);
+ sum_weight += tc_weight[tc];
+ }
+ }
+
+ /* WSP, if min_rate is set for at least one TC.
+ * RR otherwise.
+ *
+ * NB! MAC FW sets arb mode itself if PTP is enabled. We shouldn't
+ * overwrite it here in that case.
+ */
+ if (!nic_cfg->is_ptp)
+ hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U);
+
+ /* Data TC Arbiter takes precedence over Descriptor TC Arbiter,
+ * leave Descriptor TC Arbiter as RR.
+ */
+ hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
+
+ hw_atl_tps_tx_desc_rate_mode_set(self, nic_cfg->is_qos ? 1U : 0U);
+
+ for (tc = 0; tc != nic_cfg->tcs; tc++) {
+ const u32 en = (nic_cfg->tc_max_rate[tc] != 0) ? 1U : 0U;
+ const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0);
+ u32 weight, max_credit;
+
+ hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, tc,
+ fixed_max_credit);
+ hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, tc, 0x1E);
+
+ if (num_min_rated_tcs) {
+ weight = tc_weight[tc];
+
+ if (!weight && sum_weight < max_weight)
+ weight = (max_weight - sum_weight) /
+ (nic_cfg->tcs - num_min_rated_tcs);
+ else if (!weight)
+ weight = 0x64;
+
+ max_credit = max(8 * weight, fixed_max_credit);
+ } else {
+ weight = 0x64;
+ max_credit = 0xFFF;
+ }
+
+ hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, tc, weight);
+ hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, tc,
+ max_credit);
+
+ hw_atl_tps_tx_desc_rate_en_set(self, desc, en);
+
+ if (en) {
+ /* Nominal rate is always 10G */
+ const u32 rate = 10000U * scale /
+ nic_cfg->tc_max_rate[tc];
+ const u32 rate_int = rate >>
+ HW_ATL_TPS_DESC_RATE_Y_WIDTH;
+ const u32 rate_frac = rate & frac_msk;
+
+ hw_atl_tps_tx_desc_rate_x_set(self, desc, rate_int);
+ hw_atl_tps_tx_desc_rate_y_set(self, desc, rate_frac);
+ } else {
+ /* A value of 1 indicates the queue is not
+ * rate controlled.
+ */
+ hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U);
+ hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U);
+ }
+ }
+ for (tc = nic_cfg->tcs; tc != AQ_CFG_TCS_MAX; tc++) {
+ const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0);
+
+ hw_atl_tps_tx_desc_rate_en_set(self, desc, 0U);
+ hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U);
+ hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
+{
+ struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg;
+
+ /* Tx TC/Queue number config */
+ hw_atl_tpb_tps_tx_tc_mode_set(self, nic_cfg->tc_mode);
+
+ hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
+
+ /* Tx interrupts */
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+
+ /* misc */
+ aq_hw_write_reg(self, 0x00007040U, ATL_HW_IS_CHIP_FEATURE(self, TPO2) ?
+ 0x00010000U : 0x00000000U);
+ hw_atl_tdm_tx_dca_en_set(self, 0U);
+ hw_atl_tdm_tx_dca_mode_set(self, 0U);
+
+ hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+void hw_atl_b0_hw_init_rx_rss_ctrl1(struct aq_hw_s *self)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ u32 rss_ctrl1 = HW_ATL_RSS_DISABLED;
+
+ if (cfg->is_rss)
+ rss_ctrl1 = (cfg->tc_mode == AQ_TC_MODE_8TCS) ?
+ HW_ATL_RSS_ENABLED_8TCS_2INDEX_BITS :
+ HW_ATL_RSS_ENABLED_4TCS_3INDEX_BITS;
+
+ hw_atl_reg_rx_flr_rss_control1set(self, rss_ctrl1);
+}
+
+static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ int i;
+
+ /* Rx TC/RSS number config */
+ hw_atl_rpb_rpf_rx_traf_class_mode_set(self, cfg->tc_mode);
+
+ /* Rx flow control */
+ hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
+
+ /* RSS Ring selection */
+ hw_atl_b0_hw_init_rx_rss_ctrl1(self);
+
+ /* Multicast filters */
+ for (i = HW_ATL_B0_MAC_MAX; i--;) {
+ hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
+ hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
+ }
+
+ hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
+ hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
+
+ /* Vlan filters */
+ hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
+ hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
+
+ hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
+
+ // Always accept untagged packets
+ hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
+ hw_atl_rpf_vlan_untagged_act_set(self, 1U);
+
+ /* Rx Interrupts */
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+
+ /* misc */
+ aq_hw_write_reg(self, 0x00005040U, ATL_HW_IS_CHIP_FEATURE(self, RPF2) ?
+ 0x000F0000U : 0x00000000U);
+
+ hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
+ hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
+
+ hw_atl_rdm_rx_dca_en_set(self, 0U);
+ hw_atl_rdm_rx_dca_mode_set(self, 0U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, const u8 *mac_addr)
+{
+ unsigned int h = 0U;
+ unsigned int l = 0U;
+ int err = 0;
+
+ if (!mac_addr) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+ h = (mac_addr[0] << 8) | (mac_addr[1]);
+ l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+ (mac_addr[4] << 8) | mac_addr[5];
+
+ hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
+ hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_b0_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
+{
+ static u32 aq_hw_atl_igcr_table_[4][2] = {
+ [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
+ [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U },
+ [AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
+ [AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
+ };
+ struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
+ int err = 0;
+ u32 val;
+
+
+ hw_atl_b0_hw_init_tx_path(self);
+ hw_atl_b0_hw_init_rx_path(self);
+
+ hw_atl_b0_hw_mac_addr_set(self, mac_addr);
+
+ self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
+ self->aq_fw_ops->set_state(self, MPI_INIT);
+
+ hw_atl_b0_hw_qos_set(self);
+ hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
+ hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
+
+ /* Force limit MRRS on RDM/TDM to 2K */
+ val = aq_hw_read_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR);
+ aq_hw_write_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR,
+ (val & ~0x707) | 0x404);
+
+ /* TX DMA total request limit. B0 hardware is not capable to
+ * handle more than (8K-MRRS) incoming DMA data.
+ * Value 24 in 256byte units
+ */
+ aq_hw_write_reg(self, HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24);
+
+ /* Reset link status and read out initial hardware counters */
+ self->aq_link_status.mbps = 0;
+ self->aq_fw_ops->update_stats(self);
+
+ err = aq_hw_err_from_flags(self);
+ if (err < 0)
+ goto err_exit;
+
+ /* Interrupts */
+ hw_atl_reg_irq_glb_ctl_set(self,
+ aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
+ [(aq_nic_cfg->vecs > 1U) ?
+ 1 : 0]);
+
+ hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
+
+ /* Interrupts */
+ hw_atl_reg_gen_irq_map_set(self,
+ ((HW_ATL_B0_ERR_INT << 0x18) |
+ (1U << 0x1F)) |
+ ((HW_ATL_B0_ERR_INT << 0x10) |
+ (1U << 0x17)), 0U);
+
+ /* Enable link interrupt */
+ if (aq_nic_cfg->link_irq_vec)
+ hw_atl_reg_gen_irq_map_set(self, BIT(7) |
+ aq_nic_cfg->link_irq_vec, 3U);
+
+ hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
+
+err_exit:
+ return err;
+}
+
+int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self, struct aq_ring_s *ring)
+{
+ hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self, struct aq_ring_s *ring)
+{
+ hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+int hw_atl_b0_hw_start(struct aq_hw_s *self)
+{
+ hw_atl_tpb_tx_buff_en_set(self, 1);
+ hw_atl_rpb_rx_buff_en_set(self, 1);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+
+ return 0;
+}
+
+int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, struct aq_ring_s *ring,
+ unsigned int frags)
+{
+ struct aq_ring_buff_s *buff = NULL;
+ struct hw_atl_txd_s *txd = NULL;
+ unsigned int buff_pa_len = 0U;
+ unsigned int frag_count = 0U;
+ unsigned int pkt_len = 0U;
+ bool is_vlan = false;
+ bool is_gso = false;
+
+ buff = &ring->buff_ring[ring->sw_tail];
+ pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt;
+
+ for (frag_count = 0; frag_count < frags; frag_count++) {
+ txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail *
+ HW_ATL_B0_TXD_SIZE];
+ txd->ctl = 0;
+ txd->ctl2 = 0;
+ txd->buf_addr = 0;
+
+ buff = &ring->buff_ring[ring->sw_tail];
+
+ if (buff->is_gso_tcp || buff->is_gso_udp) {
+ if (buff->is_gso_tcp)
+ txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TCP;
+ txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
+ txd->ctl |= (buff->len_l3 << 31) |
+ (buff->len_l2 << 24);
+ txd->ctl2 |= (buff->mss << 16);
+ is_gso = true;
+
+ pkt_len -= (buff->len_l4 +
+ buff->len_l3 +
+ buff->len_l2);
+ if (buff->is_ipv6)
+ txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPV6;
+ txd->ctl2 |= (buff->len_l4 << 8) |
+ (buff->len_l3 >> 1);
+ }
+ if (buff->is_vlan) {
+ txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
+ txd->ctl |= buff->vlan_tx_tag << 4;
+ is_vlan = true;
+ }
+ if (!buff->is_gso_tcp && !buff->is_gso_udp && !buff->is_vlan) {
+ buff_pa_len = buff->len;
+
+ txd->buf_addr = buff->pa;
+ txd->ctl |= (HW_ATL_B0_TXD_CTL_BLEN &
+ ((u32)buff_pa_len << 4));
+ txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD;
+
+ /* PAY_LEN */
+ txd->ctl2 |= HW_ATL_B0_TXD_CTL2_LEN & (pkt_len << 14);
+
+ if (is_gso || is_vlan) {
+ /* enable tx context */
+ txd->ctl2 |= HW_ATL_B0_TXD_CTL2_CTX_EN;
+ }
+ if (is_gso)
+ txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_LSO;
+
+ /* Tx checksum offloads */
+ if (buff->is_ip_cso)
+ txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPCSO;
+
+ if (buff->is_udp_cso || buff->is_tcp_cso)
+ txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TUCSO;
+
+ if (is_vlan)
+ txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_VLAN;
+
+ if (unlikely(buff->is_eop)) {
+ txd->ctl |= HW_ATL_B0_TXD_CTL_EOP;
+ txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB;
+ is_gso = false;
+ is_vlan = false;
+ }
+ }
+ ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail);
+ }
+
+ hw_atl_b0_hw_tx_ring_tail_update(self, ring);
+
+ return aq_hw_err_from_flags(self);
+}
+
+int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param)
+{
+ u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
+ u32 vlan_rx_stripping = self->aq_nic_cfg->is_vlan_rx_strip;
+ u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
+
+ hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
+
+ hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+
+ hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
+ aq_ring->idx);
+
+ hw_atl_reg_rx_dma_desc_base_addressmswset(self,
+ dma_desc_addr_msw, aq_ring->idx);
+
+ hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+
+ hw_atl_rdm_rx_desc_data_buff_size_set(self,
+ aq_ring->frame_max / 1024U,
+ aq_ring->idx);
+
+ hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+ hw_atl_rpo_rx_desc_vlan_stripping_set(self, !!vlan_rx_stripping,
+ aq_ring->idx);
+
+ /* Rx ring set mode */
+
+ /* Mapping interrupt vector */
+ hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
+
+ hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param)
+{
+ u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
+ u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
+
+ hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
+ aq_ring->idx);
+
+ hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
+ aq_ring->idx);
+
+ hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+
+ hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring);
+
+ /* Set Tx threshold */
+ hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
+
+ /* Mapping interrupt vector */
+ hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx);
+
+ hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self, struct aq_ring_s *ring,
+ unsigned int sw_tail_old)
+{
+ for (; sw_tail_old != ring->sw_tail;
+ sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) {
+ struct hw_atl_rxd_s *rxd =
+ (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old *
+ HW_ATL_B0_RXD_SIZE];
+
+ struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old];
+
+ rxd->buf_addr = buff->pa;
+ rxd->hdr_addr = 0U;
+ }
+
+ hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_ring_hwts_rx_fill(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ unsigned int i;
+
+ for (i = aq_ring_avail_dx(ring); i--;
+ ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail)) {
+ struct hw_atl_rxd_s *rxd =
+ (struct hw_atl_rxd_s *)
+ &ring->dx_ring[ring->sw_tail * HW_ATL_B0_RXD_SIZE];
+
+ rxd->buf_addr = ring->dx_ring_pa + ring->size * ring->dx_size;
+ rxd->hdr_addr = 0U;
+ }
+ /* Make sure descriptors are updated before bump tail*/
+ wmb();
+
+ hw_atl_reg_rx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_ring_hwts_rx_receive(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ while (ring->hw_head != ring->sw_tail) {
+ struct hw_atl_rxd_hwts_wb_s *hwts_wb =
+ (struct hw_atl_rxd_hwts_wb_s *)
+ (ring->dx_ring + (ring->hw_head * HW_ATL_B0_RXD_SIZE));
+
+ /* RxD is not done */
+ if (!(hwts_wb->sec_lw0 & 0x1U))
+ break;
+
+ ring->hw_head = aq_ring_next_dx(ring, ring->hw_head);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ unsigned int hw_head_;
+ int err = 0;
+
+ hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
+
+ if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+ err = -ENXIO;
+ goto err_exit;
+ }
+
+ /* Validate that the new hw_head_ is reasonable. */
+ if (hw_head_ >= ring->size) {
+ err = -ENXIO;
+ goto err_exit;
+ }
+
+ ring->hw_head = hw_head_;
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, struct aq_ring_s *ring)
+{
+ for (; ring->hw_head != ring->sw_tail;
+ ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
+ struct aq_ring_buff_s *buff = NULL;
+ struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
+ &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE];
+
+ unsigned int is_rx_check_sum_enabled = 0U;
+ unsigned int pkt_type = 0U;
+ u8 rx_stat = 0U;
+
+ if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */
+ break;
+ }
+
+ buff = &ring->buff_ring[ring->hw_head];
+
+ buff->flags = 0U;
+ buff->is_hash_l4 = 0U;
+
+ rx_stat = (0x0000003CU & rxd_wb->status) >> 2;
+
+ is_rx_check_sum_enabled = (rxd_wb->type >> 19) & 0x3U;
+
+ pkt_type = (rxd_wb->type & HW_ATL_B0_RXD_WB_STAT_PKTTYPE) >>
+ HW_ATL_B0_RXD_WB_STAT_PKTTYPE_SHIFT;
+
+ if (is_rx_check_sum_enabled & BIT(0) &&
+ (0x0U == (pkt_type & 0x3U)))
+ buff->is_ip_cso = (rx_stat & BIT(1)) ? 0U : 1U;
+
+ if (is_rx_check_sum_enabled & BIT(1)) {
+ if (0x4U == (pkt_type & 0x1CU))
+ buff->is_udp_cso = (rx_stat & BIT(2)) ? 0U :
+ !!(rx_stat & BIT(3));
+ else if (0x0U == (pkt_type & 0x1CU))
+ buff->is_tcp_cso = (rx_stat & BIT(2)) ? 0U :
+ !!(rx_stat & BIT(3));
+ }
+ buff->is_cso_err = !!(rx_stat & 0x6);
+ /* Checksum offload workaround for small packets */
+ if (unlikely(rxd_wb->pkt_len <= 60)) {
+ buff->is_ip_cso = 0U;
+ buff->is_cso_err = 0U;
+ }
+
+ if (self->aq_nic_cfg->is_vlan_rx_strip &&
+ ((pkt_type & HW_ATL_B0_RXD_WB_PKTTYPE_VLAN) ||
+ (pkt_type & HW_ATL_B0_RXD_WB_PKTTYPE_VLAN_DOUBLE))) {
+ buff->is_vlan = 1;
+ buff->vlan_rx_tag = le16_to_cpu(rxd_wb->vlan);
+ }
+
+ if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
+ /* MAC error or DMA error */
+ buff->is_error = 1U;
+ }
+ if (self->aq_nic_cfg->is_rss) {
+ /* last 4 byte */
+ u16 rss_type = rxd_wb->type & 0xFU;
+
+ if (rss_type && rss_type < 0x8U) {
+ buff->is_hash_l4 = (rss_type == 0x4 ||
+ rss_type == 0x5);
+ buff->rss_hash = rxd_wb->rss_hash;
+ }
+ }
+
+ buff->is_lro = !!(HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
+ rxd_wb->status);
+ if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
+ buff->len = rxd_wb->pkt_len %
+ ring->frame_max;
+ buff->len = buff->len ?
+ buff->len : ring->frame_max;
+ buff->next = 0U;
+ buff->is_eop = 1U;
+ } else {
+ buff->len =
+ rxd_wb->pkt_len > ring->frame_max ?
+ ring->frame_max : rxd_wb->pkt_len;
+
+ if (buff->is_lro) {
+ /* LRO */
+ buff->next = rxd_wb->next_desc_ptr;
+ ++ring->stats.rx.lro_packets;
+ } else {
+ /* jumbo */
+ buff->next =
+ aq_ring_next_dx(ring,
+ ring->hw_head);
+ ++ring->stats.rx.jumbo_packets;
+ }
+ }
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
+{
+ hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
+
+ return aq_hw_err_from_flags(self);
+}
+
+int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
+{
+ hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
+ hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
+
+ atomic_inc(&self->dpc);
+
+ return aq_hw_err_from_flags(self);
+}
+
+int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
+{
+ *mask = hw_atl_itr_irq_statuslsw_get(self);
+
+ return aq_hw_err_from_flags(self);
+}
+
+#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
+
+int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
+ unsigned int packet_filter)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ unsigned int i = 0U;
+ u32 vlan_promisc;
+ u32 l2_promisc;
+
+ l2_promisc = IS_FILTER_ENABLED(IFF_PROMISC) ||
+ !!(cfg->priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET));
+ vlan_promisc = l2_promisc || cfg->is_vlan_force_promisc;
+
+ hw_atl_rpfl2promiscuous_mode_en_set(self, l2_promisc);
+
+ hw_atl_rpf_vlan_prom_mode_en_set(self, vlan_promisc);
+
+ hw_atl_rpfl2multicast_flr_en_set(self,
+ IS_FILTER_ENABLED(IFF_ALLMULTI) &&
+ IS_FILTER_ENABLED(IFF_MULTICAST), 0);
+
+ hw_atl_rpfl2_accept_all_mc_packets_set(self,
+ IS_FILTER_ENABLED(IFF_ALLMULTI) &&
+ IS_FILTER_ENABLED(IFF_MULTICAST));
+
+ hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
+
+
+ for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
+ hw_atl_rpfl2_uc_flr_en_set(self,
+ (cfg->is_mc_list_enabled &&
+ (i <= cfg->mc_list_count)) ?
+ 1U : 0U, i);
+
+ return aq_hw_err_from_flags(self);
+}
+
+#undef IS_FILTER_ENABLED
+
+static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
+ u8 ar_mac
+ [AQ_HW_MULTICAST_ADDRESS_MAX]
+ [ETH_ALEN],
+ u32 count)
+{
+ int err = 0;
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+
+ if (count > (HW_ATL_B0_MAC_MAX - HW_ATL_B0_MAC_MIN)) {
+ err = -EBADRQC;
+ goto err_exit;
+ }
+ for (cfg->mc_list_count = 0U;
+ cfg->mc_list_count < count;
+ ++cfg->mc_list_count) {
+ u32 i = cfg->mc_list_count;
+ u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
+ u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
+ (ar_mac[i][4] << 8) | ar_mac[i][5];
+
+ hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
+
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self, l,
+ HW_ATL_B0_MAC_MIN + i);
+
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self, h,
+ HW_ATL_B0_MAC_MIN + i);
+
+ hw_atl_rpfl2_uc_flr_en_set(self,
+ (cfg->is_mc_list_enabled),
+ HW_ATL_B0_MAC_MIN + i);
+ }
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
+{
+ unsigned int i = 0U;
+ u32 itr_tx = 2U;
+ u32 itr_rx = 2U;
+
+ switch (self->aq_nic_cfg->itr) {
+ case AQ_CFG_INTERRUPT_MODERATION_ON:
+ case AQ_CFG_INTERRUPT_MODERATION_AUTO:
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
+ hw_atl_tdm_tdm_intr_moder_en_set(self, 1U);
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
+ hw_atl_rdm_rdm_intr_moder_en_set(self, 1U);
+
+ if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
+ /* HW timers are in 2us units */
+ int tx_max_timer = self->aq_nic_cfg->tx_itr / 2;
+ int tx_min_timer = tx_max_timer / 2;
+
+ int rx_max_timer = self->aq_nic_cfg->rx_itr / 2;
+ int rx_min_timer = rx_max_timer / 2;
+
+ tx_max_timer = min(HW_ATL_INTR_MODER_MAX, tx_max_timer);
+ tx_min_timer = min(HW_ATL_INTR_MODER_MIN, tx_min_timer);
+ rx_max_timer = min(HW_ATL_INTR_MODER_MAX, rx_max_timer);
+ rx_min_timer = min(HW_ATL_INTR_MODER_MIN, rx_min_timer);
+
+ itr_tx |= tx_min_timer << 0x8U;
+ itr_tx |= tx_max_timer << 0x10U;
+ itr_rx |= rx_min_timer << 0x8U;
+ itr_rx |= rx_max_timer << 0x10U;
+ } else {
+ static unsigned int hw_atl_b0_timers_table_tx_[][2] = {
+ {0xfU, 0xffU}, /* 10Gbit */
+ {0xfU, 0x1ffU}, /* 5Gbit */
+ {0xfU, 0x1ffU}, /* 5Gbit 5GS */
+ {0xfU, 0x1ffU}, /* 2.5Gbit */
+ {0xfU, 0x1ffU}, /* 1Gbit */
+ {0xfU, 0x1ffU}, /* 100Mbit */
+ };
+
+ static unsigned int hw_atl_b0_timers_table_rx_[][2] = {
+ {0x6U, 0x38U},/* 10Gbit */
+ {0xCU, 0x70U},/* 5Gbit */
+ {0xCU, 0x70U},/* 5Gbit 5GS */
+ {0x18U, 0xE0U},/* 2.5Gbit */
+ {0x30U, 0x80U},/* 1Gbit */
+ {0x4U, 0x50U},/* 100Mbit */
+ };
+
+ unsigned int speed_index =
+ hw_atl_utils_mbps_2_speed_index(
+ self->aq_link_status.mbps);
+
+ /* Update user visible ITR settings */
+ self->aq_nic_cfg->tx_itr = hw_atl_b0_timers_table_tx_
+ [speed_index][1] * 2;
+ self->aq_nic_cfg->rx_itr = hw_atl_b0_timers_table_rx_
+ [speed_index][1] * 2;
+
+ itr_tx |= hw_atl_b0_timers_table_tx_
+ [speed_index][0] << 0x8U;
+ itr_tx |= hw_atl_b0_timers_table_tx_
+ [speed_index][1] << 0x10U;
+
+ itr_rx |= hw_atl_b0_timers_table_rx_
+ [speed_index][0] << 0x8U;
+ itr_rx |= hw_atl_b0_timers_table_rx_
+ [speed_index][1] << 0x10U;
+ }
+ break;
+ case AQ_CFG_INTERRUPT_MODERATION_OFF:
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_tdm_tdm_intr_moder_en_set(self, 0U);
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_rdm_rdm_intr_moder_en_set(self, 0U);
+ itr_tx = 0U;
+ itr_rx = 0U;
+ break;
+ }
+
+ for (i = HW_ATL_B0_RINGS_MAX; i--;) {
+ hw_atl_reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
+ hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
+{
+ int err;
+ u32 val;
+
+ hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
+
+ /* Invalidate Descriptor Cache to prevent writing to the cached
+ * descriptors and to the data pointer of those descriptors
+ */
+ hw_atl_rdm_rx_dma_desc_cache_init_tgl(self);
+
+ err = aq_hw_err_from_flags(self);
+
+ if (err)
+ goto err_exit;
+
+ readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
+ self, val, val == 1, 1000U, 10000U);
+
+err_exit:
+ return err;
+}
+
+int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, struct aq_ring_s *ring)
+{
+ hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring)
+{
+ hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+#define get_ptp_ts_val_u64(self, indx) \
+ ((u64)(hw_atl_pcs_ptp_clock_get(self, indx) & 0xffff))
+
+static void hw_atl_b0_get_ptp_ts(struct aq_hw_s *self, u64 *stamp)
+{
+ u64 ns;
+
+ hw_atl_pcs_ptp_clock_read_enable(self, 1);
+ hw_atl_pcs_ptp_clock_read_enable(self, 0);
+ ns = (get_ptp_ts_val_u64(self, 0) +
+ (get_ptp_ts_val_u64(self, 1) << 16)) * NSEC_PER_SEC +
+ (get_ptp_ts_val_u64(self, 3) +
+ (get_ptp_ts_val_u64(self, 4) << 16));
+
+ *stamp = ns + self->ptp_clk_offset;
+}
+
+static void hw_atl_b0_adj_params_get(u64 freq, s64 adj, u32 *ns, u32 *fns)
+{
+ /* For accuracy, the digit is extended */
+ s64 base_ns = ((adj + NSEC_PER_SEC) * NSEC_PER_SEC);
+ u64 nsi_frac = 0;
+ u64 nsi;
+
+ base_ns = div64_s64(base_ns, freq);
+ nsi = div64_u64(base_ns, NSEC_PER_SEC);
+
+ if (base_ns != nsi * NSEC_PER_SEC) {
+ s64 divisor = div64_s64((s64)NSEC_PER_SEC * NSEC_PER_SEC,
+ base_ns - nsi * NSEC_PER_SEC);
+ nsi_frac = div64_s64(AQ_FRAC_PER_NS * NSEC_PER_SEC, divisor);
+ }
+
+ *ns = (u32)nsi;
+ *fns = (u32)nsi_frac;
+}
+
+static void
+hw_atl_b0_mac_adj_param_calc(struct hw_fw_request_ptp_adj_freq *ptp_adj_freq,
+ u64 phyfreq, u64 macfreq)
+{
+ s64 adj_fns_val;
+ s64 fns_in_sec_phy = phyfreq * (ptp_adj_freq->fns_phy +
+ AQ_FRAC_PER_NS * ptp_adj_freq->ns_phy);
+ s64 fns_in_sec_mac = macfreq * (ptp_adj_freq->fns_mac +
+ AQ_FRAC_PER_NS * ptp_adj_freq->ns_mac);
+ s64 fault_in_sec_phy = AQ_FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_phy;
+ s64 fault_in_sec_mac = AQ_FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_mac;
+ /* MAC MCP counter freq is macfreq / 4 */
+ s64 diff_in_mcp_overflow = (fault_in_sec_mac - fault_in_sec_phy) *
+ 4 * AQ_FRAC_PER_NS;
+
+ diff_in_mcp_overflow = div64_s64(diff_in_mcp_overflow,
+ AQ_HW_MAC_COUNTER_HZ);
+ adj_fns_val = (ptp_adj_freq->fns_mac + AQ_FRAC_PER_NS *
+ ptp_adj_freq->ns_mac) + diff_in_mcp_overflow;
+
+ ptp_adj_freq->mac_ns_adj = div64_s64(adj_fns_val, AQ_FRAC_PER_NS);
+ ptp_adj_freq->mac_fns_adj = adj_fns_val - ptp_adj_freq->mac_ns_adj *
+ AQ_FRAC_PER_NS;
+}
+
+static int hw_atl_b0_adj_sys_clock(struct aq_hw_s *self, s64 delta)
+{
+ self->ptp_clk_offset += delta;
+
+ self->aq_fw_ops->adjust_ptp(self, self->ptp_clk_offset);
+
+ return 0;
+}
+
+static int hw_atl_b0_set_sys_clock(struct aq_hw_s *self, u64 time, u64 ts)
+{
+ s64 delta = time - (self->ptp_clk_offset + ts);
+
+ return hw_atl_b0_adj_sys_clock(self, delta);
+}
+
+static int hw_atl_b0_ts_to_sys_clock(struct aq_hw_s *self, u64 ts, u64 *time)
+{
+ *time = self->ptp_clk_offset + ts;
+ return 0;
+}
+
+static int hw_atl_b0_adj_clock_freq(struct aq_hw_s *self, s32 ppb)
+{
+ struct hw_fw_request_iface fwreq;
+ size_t size;
+
+ memset(&fwreq, 0, sizeof(fwreq));
+
+ fwreq.msg_id = HW_AQ_FW_REQUEST_PTP_ADJ_FREQ;
+ hw_atl_b0_adj_params_get(AQ_HW_MAC_COUNTER_HZ, ppb,
+ &fwreq.ptp_adj_freq.ns_mac,
+ &fwreq.ptp_adj_freq.fns_mac);
+ hw_atl_b0_adj_params_get(AQ_HW_PHY_COUNTER_HZ, ppb,
+ &fwreq.ptp_adj_freq.ns_phy,
+ &fwreq.ptp_adj_freq.fns_phy);
+ hw_atl_b0_mac_adj_param_calc(&fwreq.ptp_adj_freq,
+ AQ_HW_PHY_COUNTER_HZ,
+ AQ_HW_MAC_COUNTER_HZ);
+
+ size = sizeof(fwreq.msg_id) + sizeof(fwreq.ptp_adj_freq);
+ return self->aq_fw_ops->send_fw_request(self, &fwreq, size);
+}
+
+static int hw_atl_b0_gpio_pulse(struct aq_hw_s *self, u32 index,
+ u64 start, u32 period)
+{
+ struct hw_fw_request_iface fwreq;
+ size_t size;
+
+ memset(&fwreq, 0, sizeof(fwreq));
+
+ fwreq.msg_id = HW_AQ_FW_REQUEST_PTP_GPIO_CTRL;
+ fwreq.ptp_gpio_ctrl.index = index;
+ fwreq.ptp_gpio_ctrl.period = period;
+ /* Apply time offset */
+ fwreq.ptp_gpio_ctrl.start = start;
+
+ size = sizeof(fwreq.msg_id) + sizeof(fwreq.ptp_gpio_ctrl);
+ return self->aq_fw_ops->send_fw_request(self, &fwreq, size);
+}
+
+static int hw_atl_b0_extts_gpio_enable(struct aq_hw_s *self, u32 index,
+ u32 enable)
+{
+ /* Enable/disable Sync1588 GPIO Timestamping */
+ aq_phy_write_reg(self, MDIO_MMD_PCS, 0xc611, enable ? 0x71 : 0);
+
+ return 0;
+}
+
+static int hw_atl_b0_get_sync_ts(struct aq_hw_s *self, u64 *ts)
+{
+ u64 sec_l;
+ u64 sec_h;
+ u64 nsec_l;
+ u64 nsec_h;
+
+ if (!ts)
+ return -1;
+
+ /* PTP external GPIO clock seconds count 15:0 */
+ sec_l = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc914);
+ /* PTP external GPIO clock seconds count 31:16 */
+ sec_h = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc915);
+ /* PTP external GPIO clock nanoseconds count 15:0 */
+ nsec_l = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc916);
+ /* PTP external GPIO clock nanoseconds count 31:16 */
+ nsec_h = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc917);
+
+ *ts = (nsec_h << 16) + nsec_l + ((sec_h << 16) + sec_l) * NSEC_PER_SEC;
+
+ return 0;
+}
+
+static u16 hw_atl_b0_rx_extract_ts(struct aq_hw_s *self, u8 *p,
+ unsigned int len, u64 *timestamp)
+{
+ unsigned int offset = 14;
+ struct ethhdr *eth;
+ __be64 sec;
+ __be32 ns;
+ u8 *ptr;
+
+ if (len <= offset || !timestamp)
+ return 0;
+
+ /* The TIMESTAMP in the end of package has following format:
+ * (big-endian)
+ * struct {
+ * uint64_t sec;
+ * uint32_t ns;
+ * uint16_t stream_id;
+ * };
+ */
+ ptr = p + (len - offset);
+ memcpy(&sec, ptr, sizeof(sec));
+ ptr += sizeof(sec);
+ memcpy(&ns, ptr, sizeof(ns));
+
+ *timestamp = (be64_to_cpu(sec) & 0xffffffffffffllu) * NSEC_PER_SEC +
+ be32_to_cpu(ns) + self->ptp_clk_offset;
+
+ eth = (struct ethhdr *)p;
+
+ return (eth->h_proto == htons(ETH_P_1588)) ? 12 : 14;
+}
+
+static int hw_atl_b0_extract_hwts(struct aq_hw_s *self, u8 *p, unsigned int len,
+ u64 *timestamp)
+{
+ struct hw_atl_rxd_hwts_wb_s *hwts_wb = (struct hw_atl_rxd_hwts_wb_s *)p;
+ u64 tmp, sec, ns;
+
+ sec = 0;
+ tmp = (hwts_wb->sec_lw0 >> 2) & 0x3ff;
+ sec += tmp;
+ tmp = (u64)((hwts_wb->sec_lw1 >> 16) & 0xffff) << 10;
+ sec += tmp;
+ tmp = (u64)(hwts_wb->sec_hw & 0xfff) << 26;
+ sec += tmp;
+ tmp = (u64)((hwts_wb->sec_hw >> 22) & 0x3ff) << 38;
+ sec += tmp;
+ ns = sec * NSEC_PER_SEC + hwts_wb->ns;
+ if (timestamp)
+ *timestamp = ns + self->ptp_clk_offset;
+ return 0;
+}
+
+static int hw_atl_b0_hw_fl3l4_clear(struct aq_hw_s *self,
+ struct aq_rx_filter_l3l4 *data)
+{
+ u8 location = data->location;
+
+ if (!data->is_ipv6) {
+ hw_atl_rpfl3l4_cmd_clear(self, location);
+ hw_atl_rpf_l4_spd_set(self, 0U, location);
+ hw_atl_rpf_l4_dpd_set(self, 0U, location);
+ hw_atl_rpfl3l4_ipv4_src_addr_clear(self, location);
+ hw_atl_rpfl3l4_ipv4_dest_addr_clear(self, location);
+ } else {
+ int i;
+
+ for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
+ hw_atl_rpfl3l4_cmd_clear(self, location + i);
+ hw_atl_rpf_l4_spd_set(self, 0U, location + i);
+ hw_atl_rpf_l4_dpd_set(self, 0U, location + i);
+ }
+ hw_atl_rpfl3l4_ipv6_src_addr_clear(self, location);
+ hw_atl_rpfl3l4_ipv6_dest_addr_clear(self, location);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_fl3l4_set(struct aq_hw_s *self,
+ struct aq_rx_filter_l3l4 *data)
+{
+ u8 location = data->location;
+
+ hw_atl_b0_hw_fl3l4_clear(self, data);
+
+ if (data->cmd & (HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3 |
+ HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3)) {
+ if (!data->is_ipv6) {
+ hw_atl_rpfl3l4_ipv4_dest_addr_set(self,
+ location,
+ data->ip_dst[0]);
+ hw_atl_rpfl3l4_ipv4_src_addr_set(self,
+ location,
+ data->ip_src[0]);
+ } else {
+ hw_atl_rpfl3l4_ipv6_dest_addr_set(self,
+ location,
+ data->ip_dst);
+ hw_atl_rpfl3l4_ipv6_src_addr_set(self,
+ location,
+ data->ip_src);
+ }
+ }
+
+ if (data->cmd & (HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 |
+ HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4)) {
+ hw_atl_rpf_l4_dpd_set(self, data->p_dst, location);
+ hw_atl_rpf_l4_spd_set(self, data->p_src, location);
+ }
+
+ hw_atl_rpfl3l4_cmd_set(self, location, data->cmd);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_fl2_set(struct aq_hw_s *self,
+ struct aq_rx_filter_l2 *data)
+{
+ hw_atl_rpf_etht_flr_en_set(self, 1U, data->location);
+ hw_atl_rpf_etht_flr_set(self, data->ethertype, data->location);
+ hw_atl_rpf_etht_user_priority_en_set(self,
+ !!data->user_priority_en,
+ data->location);
+ if (data->user_priority_en)
+ hw_atl_rpf_etht_user_priority_set(self,
+ data->user_priority,
+ data->location);
+
+ if (data->queue < 0) {
+ hw_atl_rpf_etht_flr_act_set(self, 0U, data->location);
+ hw_atl_rpf_etht_rx_queue_en_set(self, 0U, data->location);
+ } else {
+ hw_atl_rpf_etht_flr_act_set(self, 1U, data->location);
+ hw_atl_rpf_etht_rx_queue_en_set(self, 1U, data->location);
+ hw_atl_rpf_etht_rx_queue_set(self, data->queue, data->location);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_fl2_clear(struct aq_hw_s *self,
+ struct aq_rx_filter_l2 *data)
+{
+ hw_atl_rpf_etht_flr_en_set(self, 0U, data->location);
+ hw_atl_rpf_etht_flr_set(self, 0U, data->location);
+ hw_atl_rpf_etht_user_priority_en_set(self, 0U, data->location);
+
+ return aq_hw_err_from_flags(self);
+}
+
+/*
+ * @brief Set VLAN filter table
+ * @details Configure VLAN filter table to accept (and assign the queue) traffic
+ * for the particular vlan ids.
+ * Note: use this function under vlan promisc mode not to lost the traffic
+ *
+ * @param aq_hw_s
+ * @param aq_rx_filter_vlan VLAN filter configuration
+ * @return 0 - OK, <0 - error
+ */
+static int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self,
+ struct aq_rx_filter_vlan *aq_vlans)
+{
+ int i;
+
+ for (i = 0; i < AQ_VLAN_MAX_FILTERS; i++) {
+ hw_atl_rpf_vlan_flr_en_set(self, 0U, i);
+ hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i);
+ if (aq_vlans[i].enable) {
+ hw_atl_rpf_vlan_id_flr_set(self,
+ aq_vlans[i].vlan_id,
+ i);
+ hw_atl_rpf_vlan_flr_act_set(self, 1U, i);
+ hw_atl_rpf_vlan_flr_en_set(self, 1U, i);
+ if (aq_vlans[i].queue != 0xFF) {
+ hw_atl_rpf_vlan_rxq_flr_set(self,
+ aq_vlans[i].queue,
+ i);
+ hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i);
+ }
+ }
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
+{
+ /* set promisc in case of disabing the vland filter */
+ hw_atl_rpf_vlan_prom_mode_en_set(self, !enable);
+
+ return aq_hw_err_from_flags(self);
+}
+
+int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable)
+{
+ switch (mode) {
+ case AQ_HW_LOOPBACK_DMA_SYS:
+ hw_atl_tpb_tx_dma_sys_lbk_en_set(self, enable);
+ hw_atl_rpb_dma_sys_lbk_set(self, enable);
+ break;
+ case AQ_HW_LOOPBACK_PKT_SYS:
+ hw_atl_tpo_tx_pkt_sys_lbk_en_set(self, enable);
+ hw_atl_rpf_tpo_to_rpf_sys_lbk_set(self, enable);
+ break;
+ case AQ_HW_LOOPBACK_DMA_NET:
+ hw_atl_rpf_vlan_prom_mode_en_set(self, enable);
+ hw_atl_rpfl2promiscuous_mode_en_set(self, enable);
+ hw_atl_tpb_tx_tx_clk_gate_en_set(self, !enable);
+ hw_atl_tpb_tx_dma_net_lbk_en_set(self, enable);
+ hw_atl_rpb_dma_net_lbk_set(self, enable);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u32 hw_atl_b0_ts_ready_and_latch_high_get(struct aq_hw_s *self)
+{
+ if (hw_atl_ts_ready_get(self) && hw_atl_ts_ready_latch_high_get(self))
+ return 1;
+
+ return 0;
+}
+
+static int hw_atl_b0_get_mac_temp(struct aq_hw_s *self, u32 *temp)
+{
+ bool ts_disabled;
+ int err;
+ u32 val;
+ u32 ts;
+
+ ts_disabled = (hw_atl_ts_power_down_get(self) == 1U);
+
+ if (ts_disabled) {
+ // Set AFE Temperature Sensor to on (off by default)
+ hw_atl_ts_power_down_set(self, 0U);
+
+ // Reset internal capacitors, biasing, and counters
+ hw_atl_ts_reset_set(self, 1);
+ hw_atl_ts_reset_set(self, 0);
+ }
+
+ err = readx_poll_timeout(hw_atl_b0_ts_ready_and_latch_high_get, self,
+ val, val == 1, 10000U, 500000U);
+ if (err)
+ return err;
+
+ ts = hw_atl_ts_data_get(self);
+ *temp = ts * ts * 16 / 100000 + 60 * ts - 83410;
+
+ if (ts_disabled) {
+ // Set AFE Temperature Sensor back to off
+ hw_atl_ts_power_down_set(self, 1U);
+ }
+
+ return 0;
+}
+
+const struct aq_hw_ops hw_atl_ops_b0 = {
+ .hw_soft_reset = hw_atl_utils_soft_reset,
+ .hw_prepare = hw_atl_utils_initfw,
+ .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
+ .hw_init = hw_atl_b0_hw_init,
+ .hw_reset = hw_atl_b0_hw_reset,
+ .hw_start = hw_atl_b0_hw_start,
+ .hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start,
+ .hw_ring_tx_stop = hw_atl_b0_hw_ring_tx_stop,
+ .hw_ring_rx_start = hw_atl_b0_hw_ring_rx_start,
+ .hw_ring_rx_stop = hw_atl_b0_hw_ring_rx_stop,
+ .hw_stop = hw_atl_b0_hw_stop,
+
+ .hw_ring_tx_xmit = hw_atl_b0_hw_ring_tx_xmit,
+ .hw_ring_tx_head_update = hw_atl_b0_hw_ring_tx_head_update,
+
+ .hw_ring_rx_receive = hw_atl_b0_hw_ring_rx_receive,
+ .hw_ring_rx_fill = hw_atl_b0_hw_ring_rx_fill,
+
+ .hw_irq_enable = hw_atl_b0_hw_irq_enable,
+ .hw_irq_disable = hw_atl_b0_hw_irq_disable,
+ .hw_irq_read = hw_atl_b0_hw_irq_read,
+
+ .hw_ring_rx_init = hw_atl_b0_hw_ring_rx_init,
+ .hw_ring_tx_init = hw_atl_b0_hw_ring_tx_init,
+ .hw_packet_filter_set = hw_atl_b0_hw_packet_filter_set,
+ .hw_filter_l2_set = hw_atl_b0_hw_fl2_set,
+ .hw_filter_l2_clear = hw_atl_b0_hw_fl2_clear,
+ .hw_filter_l3l4_set = hw_atl_b0_hw_fl3l4_set,
+ .hw_filter_vlan_set = hw_atl_b0_hw_vlan_set,
+ .hw_filter_vlan_ctrl = hw_atl_b0_hw_vlan_ctrl,
+ .hw_multicast_list_set = hw_atl_b0_hw_multicast_list_set,
+ .hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set,
+ .hw_rss_set = hw_atl_b0_hw_rss_set,
+ .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set,
+ .hw_tc_rate_limit_set = hw_atl_b0_hw_init_tx_tc_rate_limit,
+ .hw_get_regs = hw_atl_utils_hw_get_regs,
+ .hw_get_hw_stats = hw_atl_utils_get_hw_stats,
+ .hw_get_fw_version = hw_atl_utils_get_fw_version,
+
+ .hw_ring_hwts_rx_fill = hw_atl_b0_hw_ring_hwts_rx_fill,
+ .hw_ring_hwts_rx_receive = hw_atl_b0_hw_ring_hwts_rx_receive,
+
+ .hw_get_ptp_ts = hw_atl_b0_get_ptp_ts,
+ .hw_adj_sys_clock = hw_atl_b0_adj_sys_clock,
+ .hw_set_sys_clock = hw_atl_b0_set_sys_clock,
+ .hw_ts_to_sys_clock = hw_atl_b0_ts_to_sys_clock,
+ .hw_adj_clock_freq = hw_atl_b0_adj_clock_freq,
+ .hw_gpio_pulse = hw_atl_b0_gpio_pulse,
+ .hw_extts_gpio_enable = hw_atl_b0_extts_gpio_enable,
+ .hw_get_sync_ts = hw_atl_b0_get_sync_ts,
+ .rx_extract_ts = hw_atl_b0_rx_extract_ts,
+ .extract_hwts = hw_atl_b0_extract_hwts,
+ .hw_set_offload = hw_atl_b0_hw_offload_set,
+ .hw_set_loopback = hw_atl_b0_set_loopback,
+ .hw_set_fc = hw_atl_b0_set_fc,
+
+ .hw_get_mac_temp = hw_atl_b0_get_mac_temp,
+};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
new file mode 100644
index 000000000..5298846dd
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+/* File hw_atl_b0.h: Declaration of abstract interface for Atlantic hardware
+ * specific functions.
+ */
+
+#ifndef HW_ATL_B0_H
+#define HW_ATL_B0_H
+
+#include "../aq_common.h"
+
+extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc100;
+extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc107;
+extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc108;
+extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc109;
+extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc111;
+extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc112;
+
+#define hw_atl_b0_caps_aqc100s hw_atl_b0_caps_aqc100
+#define hw_atl_b0_caps_aqc107s hw_atl_b0_caps_aqc107
+#define hw_atl_b0_caps_aqc108s hw_atl_b0_caps_aqc108
+#define hw_atl_b0_caps_aqc109s hw_atl_b0_caps_aqc109
+#define hw_atl_b0_caps_aqc111s hw_atl_b0_caps_aqc111
+#define hw_atl_b0_caps_aqc112s hw_atl_b0_caps_aqc112
+
+extern const struct aq_hw_ops hw_atl_ops_b0;
+
+#define hw_atl_ops_b1 hw_atl_ops_b0
+
+int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params);
+int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
+ struct aq_nic_cfg_s *aq_nic_cfg);
+
+int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self, struct aq_ring_s *ring);
+int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self, struct aq_ring_s *ring);
+
+int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param);
+int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self, struct aq_ring_s *ring,
+ unsigned int sw_tail_old);
+int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, struct aq_ring_s *ring);
+
+int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param);
+int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, struct aq_ring_s *ring,
+ unsigned int frags);
+int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
+ struct aq_ring_s *ring);
+
+int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, struct aq_ring_s *ring);
+int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring);
+
+void hw_atl_b0_hw_init_rx_rss_ctrl1(struct aq_hw_s *self);
+
+int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, const u8 *mac_addr);
+
+int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc);
+int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable);
+
+int hw_atl_b0_hw_start(struct aq_hw_s *self);
+
+int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask);
+int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask);
+int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask);
+
+int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
+ unsigned int packet_filter);
+
+#endif /* HW_ATL_B0_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
new file mode 100644
index 000000000..cf460d61a
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+ */
+
+/* File hw_atl_b0_internal.h: Definition of Atlantic B0 chip specific
+ * constants.
+ */
+
+#ifndef HW_ATL_B0_INTERNAL_H
+#define HW_ATL_B0_INTERNAL_H
+
+#include "../aq_common.h"
+
+#define HW_ATL_B0_MTU_JUMBO 16352U
+#define HW_ATL_B0_MTU 1514U
+
+#define HW_ATL_B0_TX_RINGS 4U
+#define HW_ATL_B0_RX_RINGS 4U
+
+#define HW_ATL_B0_RINGS_MAX 32U
+#define HW_ATL_B0_TXD_SIZE (16U)
+#define HW_ATL_B0_RXD_SIZE (16U)
+
+#define HW_ATL_B0_MAC 0U
+#define HW_ATL_B0_MAC_MIN 1U
+#define HW_ATL_B0_MAC_MAX 33U
+
+/* UCAST/MCAST filters */
+#define HW_ATL_B0_UCAST_FILTERS_MAX 38
+#define HW_ATL_B0_MCAST_FILTERS_MAX 8
+
+/* interrupts */
+#define HW_ATL_B0_ERR_INT 8U
+#define HW_ATL_B0_INT_MASK (0xFFFFFFFFU)
+
+#define HW_ATL_B0_TXD_CTL2_LEN (0xFFFFC000)
+#define HW_ATL_B0_TXD_CTL2_CTX_EN (0x00002000)
+#define HW_ATL_B0_TXD_CTL2_CTX_IDX (0x00001000)
+
+#define HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD (0x00000001)
+#define HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC (0x00000002)
+#define HW_ATL_B0_TXD_CTL_BLEN (0x000FFFF0)
+#define HW_ATL_B0_TXD_CTL_DD (0x00100000)
+#define HW_ATL_B0_TXD_CTL_EOP (0x00200000)
+
+#define HW_ATL_B0_TXD_CTL_CMD_X (0x3FC00000)
+
+#define HW_ATL_B0_TXD_CTL_CMD_VLAN BIT(22)
+#define HW_ATL_B0_TXD_CTL_CMD_FCS BIT(23)
+#define HW_ATL_B0_TXD_CTL_CMD_IPCSO BIT(24)
+#define HW_ATL_B0_TXD_CTL_CMD_TUCSO BIT(25)
+#define HW_ATL_B0_TXD_CTL_CMD_LSO BIT(26)
+#define HW_ATL_B0_TXD_CTL_CMD_WB BIT(27)
+#define HW_ATL_B0_TXD_CTL_CMD_VXLAN BIT(28)
+
+#define HW_ATL_B0_TXD_CTL_CMD_IPV6 BIT(21)
+#define HW_ATL_B0_TXD_CTL_CMD_TCP BIT(22)
+
+#define HW_ATL_B0_MPI_CONTROL_ADR 0x0368U
+#define HW_ATL_B0_MPI_STATE_ADR 0x036CU
+
+#define HW_ATL_B0_MPI_SPEED_MSK 0xFFFFU
+#define HW_ATL_B0_MPI_SPEED_SHIFT 16U
+
+#define HW_ATL_B0_TXBUF_MAX 160U
+#define HW_ATL_B0_PTP_TXBUF_SIZE 8U
+
+#define HW_ATL_B0_RXBUF_MAX 320U
+#define HW_ATL_B0_PTP_RXBUF_SIZE 16U
+
+#define HW_ATL_B0_RSS_REDIRECTION_MAX 64U
+#define HW_ATL_B0_RSS_REDIRECTION_BITS 3U
+#define HW_ATL_B0_RSS_HASHKEY_BITS 320U
+
+#define HW_ATL_B0_TCRSS_4_8 1
+#define HW_ATL_B0_TC_MAX 8U
+#define HW_ATL_B0_RSS_MAX 8U
+
+#define HW_ATL_B0_LRO_RXD_MAX 16U
+#define HW_ATL_B0_RS_SLIP_ENABLED 0U
+
+/* (256k -1(max pay_len) - 54(header)) */
+#define HAL_ATL_B0_LSO_MAX_SEGMENT_SIZE 262089U
+
+/* (256k -1(max pay_len) - 74(header)) */
+#define HAL_ATL_B0_LSO_IPV6_MAX_SEGMENT_SIZE 262069U
+
+#define HW_ATL_B0_CHIP_REVISION_B0 0xA0U
+#define HW_ATL_B0_CHIP_REVISION_UNKNOWN 0xFFU
+
+#define HW_ATL_B0_FW_SEMA_RAM 0x2U
+
+#define HW_ATL_B0_TXC_LEN_TUNLEN (0x0000FF00)
+#define HW_ATL_B0_TXC_LEN_OUTLEN (0xFFFF0000)
+
+#define HW_ATL_B0_TXC_CTL_DESC_TYPE (0x00000007)
+#define HW_ATL_B0_TXC_CTL_CTX_ID (0x00000008)
+#define HW_ATL_B0_TXC_CTL_VLAN (0x000FFFF0)
+#define HW_ATL_B0_TXC_CTL_CMD (0x00F00000)
+#define HW_ATL_B0_TXC_CTL_L2LEN (0x7F000000)
+
+#define HW_ATL_B0_TXC_CTL_L3LEN (0x80000000) /* L3LEN lsb */
+#define HW_ATL_B0_TXC_LEN2_L3LEN (0x000000FF) /* L3LE upper bits */
+#define HW_ATL_B0_TXC_LEN2_L4LEN (0x0000FF00)
+#define HW_ATL_B0_TXC_LEN2_MSSLEN (0xFFFF0000)
+
+#define HW_ATL_B0_RXD_DD (0x1)
+#define HW_ATL_B0_RXD_NCEA0 (0x1)
+
+#define HW_ATL_B0_RXD_WB_STAT_RSSTYPE (0x0000000F)
+#define HW_ATL_B0_RXD_WB_STAT_RSSTYPE_SHIFT (0x0)
+#define HW_ATL_B0_RXD_WB_STAT_PKTTYPE (0x00000FF0)
+#define HW_ATL_B0_RXD_WB_STAT_PKTTYPE_SHIFT (0x4)
+#define HW_ATL_B0_RXD_WB_STAT_RXCTRL (0x00180000)
+#define HW_ATL_B0_RXD_WB_STAT_RXCTRL_SHIFT (0x13)
+#define HW_ATL_B0_RXD_WB_STAT_SPLHDR (0x00200000)
+#define HW_ATL_B0_RXD_WB_STAT_HDRLEN (0xFFC00000)
+#define HW_ATL_B0_RXD_WB_STAT_HDRLEN_SHIFT (0x16)
+
+#define HW_ATL_B0_RXD_WB_PKTTYPE_VLAN BIT(5)
+#define HW_ATL_B0_RXD_WB_PKTTYPE_VLAN_DOUBLE BIT(6)
+
+#define HW_ATL_B0_RXD_WB_STAT2_DD (0x0001)
+#define HW_ATL_B0_RXD_WB_STAT2_EOP (0x0002)
+#define HW_ATL_B0_RXD_WB_STAT2_RXSTAT (0x003C)
+#define HW_ATL_B0_RXD_WB_STAT2_MACERR (0x0004)
+#define HW_ATL_B0_RXD_WB_STAT2_IP4ERR (0x0008)
+#define HW_ATL_B0_RXD_WB_STAT2_TCPUPDERR (0x0010)
+#define HW_ATL_B0_RXD_WB_STAT2_RXESTAT (0x0FC0)
+#define HW_ATL_B0_RXD_WB_STAT2_RSCCNT (0xF000)
+
+#define L2_FILTER_ACTION_DISCARD (0x0)
+#define L2_FILTER_ACTION_HOST (0x1)
+
+#define HW_ATL_B0_UCP_0X370_REG (0x370)
+
+#define HW_ATL_B0_FLUSH() AQ_HW_READ_REG(self, 0x10)
+
+#define HW_ATL_B0_FW_VER_EXPECTED 0x01050006U
+
+#define HW_ATL_INTR_MODER_MAX 0x1FF
+#define HW_ATL_INTR_MODER_MIN 0xFF
+
+#define HW_ATL_B0_MIN_RXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE))
+#define HW_ATL_B0_MIN_TXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE))
+
+#define HW_ATL_B0_MAX_RXD 8184U
+#define HW_ATL_B0_MAX_TXD 8184U
+
+#define HW_ATL_RSS_DISABLED 0x00000000U
+#define HW_ATL_RSS_ENABLED_8TCS_2INDEX_BITS 0xA2222222U
+#define HW_ATL_RSS_ENABLED_4TCS_3INDEX_BITS 0x80003333U
+
+/* HW layer capabilities */
+
+#endif /* HW_ATL_B0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
new file mode 100644
index 000000000..7b67bdd8a
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -0,0 +1,1853 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+/* File hw_atl_llh.c: Definitions of bitfield and register access functions for
+ * Atlantic registers.
+ */
+
+#include "hw_atl_llh.h"
+#include "hw_atl_llh_internal.h"
+#include "../aq_hw_utils.h"
+
+void hw_atl_ts_reset_set(struct aq_hw_s *aq_hw, u32 val)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TS_RESET_ADR,
+ HW_ATL_TS_RESET_MSK,
+ HW_ATL_TS_RESET_SHIFT,
+ val);
+}
+
+void hw_atl_ts_power_down_set(struct aq_hw_s *aq_hw, u32 val)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TS_POWER_DOWN_ADR,
+ HW_ATL_TS_POWER_DOWN_MSK,
+ HW_ATL_TS_POWER_DOWN_SHIFT,
+ val);
+}
+
+u32 hw_atl_ts_power_down_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_TS_POWER_DOWN_ADR,
+ HW_ATL_TS_POWER_DOWN_MSK,
+ HW_ATL_TS_POWER_DOWN_SHIFT);
+}
+
+u32 hw_atl_ts_ready_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_TS_READY_ADR,
+ HW_ATL_TS_READY_MSK,
+ HW_ATL_TS_READY_SHIFT);
+}
+
+u32 hw_atl_ts_ready_latch_high_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_TS_READY_LATCH_HIGH_ADR,
+ HW_ATL_TS_READY_LATCH_HIGH_MSK,
+ HW_ATL_TS_READY_LATCH_HIGH_SHIFT);
+}
+
+u32 hw_atl_ts_data_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_TS_DATA_OUT_ADR,
+ HW_ATL_TS_DATA_OUT_MSK,
+ HW_ATL_TS_DATA_OUT_SHIFT);
+}
+
+/* global */
+void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
+ u32 semaphore)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_CPU_SEM_ADR(semaphore), glb_cpu_sem);
+}
+
+u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_CPU_SEM_ADR(semaphore));
+}
+
+void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_GLB_REG_RES_DIS_ADR,
+ HW_ATL_GLB_REG_RES_DIS_MSK,
+ HW_ATL_GLB_REG_RES_DIS_SHIFT,
+ glb_reg_res_dis);
+}
+
+void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_GLB_SOFT_RES_ADR,
+ HW_ATL_GLB_SOFT_RES_MSK,
+ HW_ATL_GLB_SOFT_RES_SHIFT, soft_res);
+}
+
+u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_GLB_SOFT_RES_ADR,
+ HW_ATL_GLB_SOFT_RES_MSK,
+ HW_ATL_GLB_SOFT_RES_SHIFT);
+}
+
+u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MIF_ID_ADR);
+}
+
+/* stats */
+u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_RPB_RX_DMA_DROP_PKT_CNT_ADR);
+}
+
+u64 hw_atl_stats_rx_dma_good_octet_counter_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg64(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERLSW);
+}
+
+u64 hw_atl_stats_rx_dma_good_pkt_counter_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg64(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERLSW);
+}
+
+u64 hw_atl_stats_tx_dma_good_octet_counter_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg64(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERLSW);
+}
+
+u64 hw_atl_stats_tx_dma_good_pkt_counter_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg64(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERLSW);
+}
+
+/* interrupt */
+void hw_atl_itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_auto_masklsw)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_IAMRLSW_ADR, irq_auto_masklsw);
+}
+
+void hw_atl_itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx,
+ u32 rx)
+{
+/* register address for bitfield imr_rx{r}_en */
+ static u32 itr_imr_rxren_adr[32] = {
+ 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
+ 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
+ 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
+ 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
+ 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
+ 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
+ 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
+ 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
+ };
+
+/* bitmask for bitfield imr_rx{r}_en */
+ static u32 itr_imr_rxren_msk[32] = {
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U
+ };
+
+/* lower bit position of bitfield imr_rx{r}_en */
+ static u32 itr_imr_rxren_shift[32] = {
+ 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U,
+ 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U,
+ 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U,
+ 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U
+ };
+
+ aq_hw_write_reg_bit(aq_hw, itr_imr_rxren_adr[rx],
+ itr_imr_rxren_msk[rx],
+ itr_imr_rxren_shift[rx],
+ irq_map_en_rx);
+}
+
+void hw_atl_itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx,
+ u32 tx)
+{
+/* register address for bitfield imr_tx{t}_en */
+ static u32 itr_imr_txten_adr[32] = {
+ 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
+ 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
+ 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
+ 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
+ 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
+ 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
+ 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
+ 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
+ };
+
+/* bitmask for bitfield imr_tx{t}_en */
+ static u32 itr_imr_txten_msk[32] = {
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U
+ };
+
+/* lower bit position of bitfield imr_tx{t}_en */
+ static u32 itr_imr_txten_shift[32] = {
+ 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U,
+ 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U,
+ 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U,
+ 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U
+ };
+
+ aq_hw_write_reg_bit(aq_hw, itr_imr_txten_adr[tx],
+ itr_imr_txten_msk[tx],
+ itr_imr_txten_shift[tx],
+ irq_map_en_tx);
+}
+
+void hw_atl_itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx)
+{
+/* register address for bitfield imr_rx{r}[4:0] */
+ static u32 itr_imr_rxr_adr[32] = {
+ 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
+ 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
+ 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
+ 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
+ 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
+ 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
+ 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
+ 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
+ };
+
+/* bitmask for bitfield imr_rx{r}[4:0] */
+ static u32 itr_imr_rxr_msk[32] = {
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU
+ };
+
+/* lower bit position of bitfield imr_rx{r}[4:0] */
+ static u32 itr_imr_rxr_shift[32] = {
+ 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U,
+ 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U,
+ 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U,
+ 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U
+ };
+
+ aq_hw_write_reg_bit(aq_hw, itr_imr_rxr_adr[rx],
+ itr_imr_rxr_msk[rx],
+ itr_imr_rxr_shift[rx],
+ irq_map_rx);
+}
+
+void hw_atl_itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx)
+{
+/* register address for bitfield imr_tx{t}[4:0] */
+ static u32 itr_imr_txt_adr[32] = {
+ 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
+ 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
+ 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
+ 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
+ 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
+ 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
+ 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
+ 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
+ };
+
+/* bitmask for bitfield imr_tx{t}[4:0] */
+ static u32 itr_imr_txt_msk[32] = {
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U
+ };
+
+/* lower bit position of bitfield imr_tx{t}[4:0] */
+ static u32 itr_imr_txt_shift[32] = {
+ 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U,
+ 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U,
+ 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U,
+ 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U
+ };
+
+ aq_hw_write_reg_bit(aq_hw, itr_imr_txt_adr[tx],
+ itr_imr_txt_msk[tx],
+ itr_imr_txt_shift[tx],
+ irq_map_tx);
+}
+
+void hw_atl_itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_msk_clearlsw)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_IMCRLSW_ADR, irq_msk_clearlsw);
+}
+
+void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_IMSRLSW_ADR, irq_msk_setlsw);
+}
+
+void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_REG_RES_DSBL_ADR,
+ HW_ATL_ITR_REG_RES_DSBL_MSK,
+ HW_ATL_ITR_REG_RES_DSBL_SHIFT, irq_reg_res_dis);
+}
+
+void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_status_clearlsw)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_ISCRLSW_ADR, irq_status_clearlsw);
+}
+
+u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_ITR_ISRLSW_ADR);
+}
+
+u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_ITR_RES_ADR, HW_ATL_ITR_RES_MSK,
+ HW_ATL_ITR_RES_SHIFT);
+}
+
+void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_RES_ADR, HW_ATL_ITR_RES_MSK,
+ HW_ATL_ITR_RES_SHIFT, res_irq);
+}
+
+/* set RSC interrupt */
+void hw_atl_itr_rsc_en_set(struct aq_hw_s *aq_hw, u32 enable)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_RSC_EN_ADR, enable);
+}
+
+/* set RSC delay */
+void hw_atl_itr_rsc_delay_set(struct aq_hw_s *aq_hw, u32 delay)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_RSC_DELAY_ADR,
+ HW_ATL_ITR_RSC_DELAY_MSK,
+ HW_ATL_ITR_RSC_DELAY_SHIFT,
+ delay);
+}
+
+/* rdm */
+void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADCPUID_ADR(dca),
+ HW_ATL_RDM_DCADCPUID_MSK,
+ HW_ATL_RDM_DCADCPUID_SHIFT, cpuid);
+}
+
+void hw_atl_rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCA_EN_ADR, HW_ATL_RDM_DCA_EN_MSK,
+ HW_ATL_RDM_DCA_EN_SHIFT, rx_dca_en);
+}
+
+void hw_atl_rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCA_MODE_ADR,
+ HW_ATL_RDM_DCA_MODE_MSK,
+ HW_ATL_RDM_DCA_MODE_SHIFT, rx_dca_mode);
+}
+
+void hw_atl_rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_data_buff_size,
+ u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDDATA_SIZE_ADR(descriptor),
+ HW_ATL_RDM_DESCDDATA_SIZE_MSK,
+ HW_ATL_RDM_DESCDDATA_SIZE_SHIFT,
+ rx_desc_data_buff_size);
+}
+
+void hw_atl_rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
+ u32 dca)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADDESC_EN_ADR(dca),
+ HW_ATL_RDM_DCADDESC_EN_MSK,
+ HW_ATL_RDM_DCADDESC_EN_SHIFT,
+ rx_desc_dca_en);
+}
+
+void hw_atl_rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
+ u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDEN_ADR(descriptor),
+ HW_ATL_RDM_DESCDEN_MSK,
+ HW_ATL_RDM_DESCDEN_SHIFT,
+ rx_desc_en);
+}
+
+void hw_atl_rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_buff_size,
+ u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDHDR_SIZE_ADR(descriptor),
+ HW_ATL_RDM_DESCDHDR_SIZE_MSK,
+ HW_ATL_RDM_DESCDHDR_SIZE_SHIFT,
+ rx_desc_head_buff_size);
+}
+
+void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_splitting,
+ u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDHDR_SPLIT_ADR(descriptor),
+ HW_ATL_RDM_DESCDHDR_SPLIT_MSK,
+ HW_ATL_RDM_DESCDHDR_SPLIT_SHIFT,
+ rx_desc_head_splitting);
+}
+
+u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RDM_DESCDHD_ADR(descriptor),
+ HW_ATL_RDM_DESCDHD_MSK,
+ HW_ATL_RDM_DESCDHD_SHIFT);
+}
+
+void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
+ u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDLEN_ADR(descriptor),
+ HW_ATL_RDM_DESCDLEN_MSK, HW_ATL_RDM_DESCDLEN_SHIFT,
+ rx_desc_len);
+}
+
+void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
+ u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDRESET_ADR(descriptor),
+ HW_ATL_RDM_DESCDRESET_MSK,
+ HW_ATL_RDM_DESCDRESET_SHIFT,
+ rx_desc_res);
+}
+
+void hw_atl_rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_wr_wb_irq_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_INT_DESC_WRB_EN_ADR,
+ HW_ATL_RDM_INT_DESC_WRB_EN_MSK,
+ HW_ATL_RDM_INT_DESC_WRB_EN_SHIFT,
+ rx_desc_wr_wb_irq_en);
+}
+
+void hw_atl_rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
+ u32 dca)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADHDR_EN_ADR(dca),
+ HW_ATL_RDM_DCADHDR_EN_MSK,
+ HW_ATL_RDM_DCADHDR_EN_SHIFT,
+ rx_head_dca_en);
+}
+
+void hw_atl_rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en,
+ u32 dca)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADPAY_EN_ADR(dca),
+ HW_ATL_RDM_DCADPAY_EN_MSK,
+ HW_ATL_RDM_DCADPAY_EN_SHIFT,
+ rx_pld_dca_en);
+}
+
+void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 rdm_intr_moder_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_INT_RIM_EN_ADR,
+ HW_ATL_RDM_INT_RIM_EN_MSK,
+ HW_ATL_RDM_INT_RIM_EN_SHIFT,
+ rdm_intr_moder_en);
+}
+
+/* reg */
+void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map,
+ u32 regidx)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_GEN_INTR_MAP_ADR(regidx), gen_intr_map);
+}
+
+u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_GEN_INTR_STAT_ADR);
+}
+
+void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_INTR_GLB_CTL_ADR, intr_glb_ctl);
+}
+
+void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_INTR_THR_ADR(throttle), intr_thr);
+}
+
+void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrlsw,
+ u32 descriptor)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor),
+ rx_dma_desc_base_addrlsw);
+}
+
+void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrmsw,
+ u32 descriptor)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor),
+ rx_dma_desc_base_addrmsw);
+}
+
+u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_RX_DMA_DESC_STAT_ADR(descriptor));
+}
+
+void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_tail_ptr,
+ u32 descriptor)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_TAIL_PTR_ADR(descriptor),
+ rx_dma_desc_tail_ptr);
+}
+
+void hw_atl_reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_mcst_flr_msk)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_MCST_FLR_MSK_ADR,
+ rx_flr_mcst_flr_msk);
+}
+
+void hw_atl_reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
+ u32 filter)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_MCST_FLR_ADR(filter),
+ rx_flr_mcst_flr);
+}
+
+void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_rss_control1)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_RSS_CONTROL1_ADR,
+ rx_flr_rss_control1);
+}
+
+void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw,
+ u32 rx_filter_control2)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_CONTROL2_ADR, rx_filter_control2);
+}
+
+void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 rx_intr_moderation_ctl,
+ u32 queue)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_INTR_MODERATION_CTL_ADR(queue),
+ rx_intr_moderation_ctl);
+}
+
+void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_debug_ctl)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DEBUG_CTL_ADR, tx_dma_debug_ctl);
+}
+
+void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrlsw,
+ u32 descriptor)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor),
+ tx_dma_desc_base_addrlsw);
+}
+
+void hw_atl_reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrmsw,
+ u32 descriptor)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor),
+ tx_dma_desc_base_addrmsw);
+}
+
+void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_tail_ptr,
+ u32 descriptor)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_TAIL_PTR_ADR(descriptor),
+ tx_dma_desc_tail_ptr);
+}
+
+void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 tx_intr_moderation_ctl,
+ u32 queue)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_INTR_MODERATION_CTL_ADR(queue),
+ tx_intr_moderation_ctl);
+}
+
+/* RPB: rx packet buffer */
+void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_DMA_SYS_LBK_ADR,
+ HW_ATL_RPB_DMA_SYS_LBK_MSK,
+ HW_ATL_RPB_DMA_SYS_LBK_SHIFT, dma_sys_lbk);
+}
+
+void hw_atl_rpb_dma_net_lbk_set(struct aq_hw_s *aq_hw, u32 dma_net_lbk)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_DMA_NET_LBK_ADR,
+ HW_ATL_RPB_DMA_NET_LBK_MSK,
+ HW_ATL_RPB_DMA_NET_LBK_SHIFT, dma_net_lbk);
+}
+
+void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
+ u32 rx_traf_class_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RPF_RX_TC_MODE_ADR,
+ HW_ATL_RPB_RPF_RX_TC_MODE_MSK,
+ HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT,
+ rx_traf_class_mode);
+}
+
+u32 hw_atl_rpb_rpf_rx_traf_class_mode_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPB_RPF_RX_TC_MODE_ADR,
+ HW_ATL_RPB_RPF_RX_TC_MODE_MSK,
+ HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT);
+}
+
+void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_BUF_EN_ADR,
+ HW_ATL_RPB_RX_BUF_EN_MSK,
+ HW_ATL_RPB_RX_BUF_EN_SHIFT, rx_buff_en);
+}
+
+void hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_hi_threshold_per_tc,
+ u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBHI_THRESH_ADR(buffer),
+ HW_ATL_RPB_RXBHI_THRESH_MSK,
+ HW_ATL_RPB_RXBHI_THRESH_SHIFT,
+ rx_buff_hi_threshold_per_tc);
+}
+
+void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_lo_threshold_per_tc,
+ u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBLO_THRESH_ADR(buffer),
+ HW_ATL_RPB_RXBLO_THRESH_MSK,
+ HW_ATL_RPB_RXBLO_THRESH_SHIFT,
+ rx_buff_lo_threshold_per_tc);
+}
+
+void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_FC_MODE_ADR,
+ HW_ATL_RPB_RX_FC_MODE_MSK,
+ HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
+}
+
+void hw_atl_rdm_rx_dma_desc_cache_init_tgl(struct aq_hw_s *aq_hw)
+{
+ u32 val;
+
+ val = aq_hw_read_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
+ HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
+ HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT);
+
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
+ HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
+ HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT,
+ val ^ 1);
+}
+
+u32 hw_atl_rdm_rx_dma_desc_cache_init_done_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, RDM_RX_DMA_DESC_CACHE_INIT_DONE_ADR,
+ RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSK,
+ RDM_RX_DMA_DESC_CACHE_INIT_DONE_SHIFT);
+}
+
+void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_pkt_buff_size_per_tc, u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBBUF_SIZE_ADR(buffer),
+ HW_ATL_RPB_RXBBUF_SIZE_MSK,
+ HW_ATL_RPB_RXBBUF_SIZE_SHIFT,
+ rx_pkt_buff_size_per_tc);
+}
+
+void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_xoff_en_per_tc, u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBXOFF_EN_ADR(buffer),
+ HW_ATL_RPB_RXBXOFF_EN_MSK,
+ HW_ATL_RPB_RXBXOFF_EN_SHIFT,
+ rx_xoff_en_per_tc);
+}
+
+/* rpf */
+
+void hw_atl_rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_count_threshold)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_THRESH_ADR,
+ HW_ATL_RPFL2BC_THRESH_MSK,
+ HW_ATL_RPFL2BC_THRESH_SHIFT,
+ l2broadcast_count_threshold);
+}
+
+void hw_atl_rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_EN_ADR, HW_ATL_RPFL2BC_EN_MSK,
+ HW_ATL_RPFL2BC_EN_SHIFT, l2broadcast_en);
+}
+
+void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_flr_act)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_ACT_ADR,
+ HW_ATL_RPFL2BC_ACT_MSK,
+ HW_ATL_RPFL2BC_ACT_SHIFT, l2broadcast_flr_act);
+}
+
+void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
+ u32 l2multicast_flr_en,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2MC_ENF_ADR(filter),
+ HW_ATL_RPFL2MC_ENF_MSK,
+ HW_ATL_RPFL2MC_ENF_SHIFT, l2multicast_flr_en);
+}
+
+u32 hw_atl_rpfl2promiscuous_mode_en_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPFL2PROMIS_MODE_ADR,
+ HW_ATL_RPFL2PROMIS_MODE_MSK,
+ HW_ATL_RPFL2PROMIS_MODE_SHIFT);
+}
+
+void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 l2promiscuous_mode_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2PROMIS_MODE_ADR,
+ HW_ATL_RPFL2PROMIS_MODE_MSK,
+ HW_ATL_RPFL2PROMIS_MODE_SHIFT,
+ l2promiscuous_mode_en);
+}
+
+void hw_atl_rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_flr_act,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_ACTF_ADR(filter),
+ HW_ATL_RPFL2UC_ACTF_MSK, HW_ATL_RPFL2UC_ACTF_SHIFT,
+ l2unicast_flr_act);
+}
+
+void hw_atl_rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_ENF_ADR(filter),
+ HW_ATL_RPFL2UC_ENF_MSK,
+ HW_ATL_RPFL2UC_ENF_SHIFT, l2unicast_flr_en);
+}
+
+void hw_atl_rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addresslsw,
+ u32 filter)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPFL2UC_DAFLSW_ADR(filter),
+ l2unicast_dest_addresslsw);
+}
+
+void hw_atl_rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addressmsw,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_DAFMSW_ADR(filter),
+ HW_ATL_RPFL2UC_DAFMSW_MSK,
+ HW_ATL_RPFL2UC_DAFMSW_SHIFT,
+ l2unicast_dest_addressmsw);
+}
+
+void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
+ u32 l2_accept_all_mc_packets)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2MC_ACCEPT_ALL_ADR,
+ HW_ATL_RPFL2MC_ACCEPT_ALL_MSK,
+ HW_ATL_RPFL2MC_ACCEPT_ALL_SHIFT,
+ l2_accept_all_mc_packets);
+}
+
+void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
+ u32 user_priority, u32 tc)
+{
+/* register address for bitfield rx_tc_up{t}[2:0] */
+ static u32 rpf_rpb_rx_tc_upt_adr[8] = {
+ 0x000054c4U, 0x000054C4U, 0x000054C4U, 0x000054C4U,
+ 0x000054c4U, 0x000054C4U, 0x000054C4U, 0x000054C4U
+ };
+
+/* bitmask for bitfield rx_tc_up{t}[2:0] */
+ static u32 rpf_rpb_rx_tc_upt_msk[8] = {
+ 0x00000007U, 0x00000070U, 0x00000700U, 0x00007000U,
+ 0x00070000U, 0x00700000U, 0x07000000U, 0x70000000U
+ };
+
+/* lower bit position of bitfield rx_tc_up{t}[2:0] */
+ static u32 rpf_rpb_rx_tc_upt_shft[8] = {
+ 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U
+ };
+
+ aq_hw_write_reg_bit(aq_hw, rpf_rpb_rx_tc_upt_adr[user_priority],
+ rpf_rpb_rx_tc_upt_msk[user_priority],
+ rpf_rpb_rx_tc_upt_shft[user_priority], tc);
+}
+
+void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_ADDR_ADR,
+ HW_ATL_RPF_RSS_KEY_ADDR_MSK,
+ HW_ATL_RPF_RSS_KEY_ADDR_SHIFT,
+ rss_key_addr);
+}
+
+void hw_atl_rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_RSS_KEY_WR_DATA_ADR,
+ rss_key_wr_data);
+}
+
+u32 hw_atl_rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_WR_ENI_ADR,
+ HW_ATL_RPF_RSS_KEY_WR_ENI_MSK,
+ HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT);
+}
+
+void hw_atl_rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_WR_ENI_ADR,
+ HW_ATL_RPF_RSS_KEY_WR_ENI_MSK,
+ HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT,
+ rss_key_wr_en);
+}
+
+void hw_atl_rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_addr)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_ADDR_ADR,
+ HW_ATL_RPF_RSS_REDIR_ADDR_MSK,
+ HW_ATL_RPF_RSS_REDIR_ADDR_SHIFT,
+ rss_redir_tbl_addr);
+}
+
+void hw_atl_rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_wr_data)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_DATA_ADR,
+ HW_ATL_RPF_RSS_REDIR_WR_DATA_MSK,
+ HW_ATL_RPF_RSS_REDIR_WR_DATA_SHIFT,
+ rss_redir_tbl_wr_data);
+}
+
+u32 hw_atl_rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR,
+ HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK,
+ HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT);
+}
+
+void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR,
+ HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK,
+ HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT, rss_redir_wr_en);
+}
+
+void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
+ u32 tpo_to_rpf_sys_lbk)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_TPO_RPF_SYS_LBK_ADR,
+ HW_ATL_RPF_TPO_RPF_SYS_LBK_MSK,
+ HW_ATL_RPF_TPO_RPF_SYS_LBK_SHIFT,
+ tpo_to_rpf_sys_lbk);
+}
+
+void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_INNER_TPID_ADR,
+ HW_ATL_RPF_VL_INNER_TPID_MSK,
+ HW_ATL_RPF_VL_INNER_TPID_SHIFT,
+ vlan_inner_etht);
+}
+
+void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_OUTER_TPID_ADR,
+ HW_ATL_RPF_VL_OUTER_TPID_MSK,
+ HW_ATL_RPF_VL_OUTER_TPID_SHIFT,
+ vlan_outer_etht);
+}
+
+void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 vlan_prom_mode_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_PROMIS_MODE_ADR,
+ HW_ATL_RPF_VL_PROMIS_MODE_MSK,
+ HW_ATL_RPF_VL_PROMIS_MODE_SHIFT,
+ vlan_prom_mode_en);
+}
+
+u32 hw_atl_rpf_vlan_prom_mode_en_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPF_VL_PROMIS_MODE_ADR,
+ HW_ATL_RPF_VL_PROMIS_MODE_MSK,
+ HW_ATL_RPF_VL_PROMIS_MODE_SHIFT);
+}
+
+void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
+ u32 vlan_acc_untagged_packets)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_ADR,
+ HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSK,
+ HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_SHIFT,
+ vlan_acc_untagged_packets);
+}
+
+void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
+ u32 vlan_untagged_act)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_UNTAGGED_ACT_ADR,
+ HW_ATL_RPF_VL_UNTAGGED_ACT_MSK,
+ HW_ATL_RPF_VL_UNTAGGED_ACT_SHIFT,
+ vlan_untagged_act);
+}
+
+void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_EN_F_ADR(filter),
+ HW_ATL_RPF_VL_EN_F_MSK,
+ HW_ATL_RPF_VL_EN_F_SHIFT,
+ vlan_flr_en);
+}
+
+void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACT_F_ADR(filter),
+ HW_ATL_RPF_VL_ACT_F_MSK,
+ HW_ATL_RPF_VL_ACT_F_SHIFT,
+ vlan_flr_act);
+}
+
+void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ID_F_ADR(filter),
+ HW_ATL_RPF_VL_ID_F_MSK,
+ HW_ATL_RPF_VL_ID_F_SHIFT,
+ vlan_id_flr);
+}
+
+void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq_en,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_RXQ_EN_F_ADR(filter),
+ HW_ATL_RPF_VL_RXQ_EN_F_MSK,
+ HW_ATL_RPF_VL_RXQ_EN_F_SHIFT,
+ vlan_rxq_en);
+}
+
+void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_RXQ_F_ADR(filter),
+ HW_ATL_RPF_VL_RXQ_F_MSK,
+ HW_ATL_RPF_VL_RXQ_F_SHIFT,
+ vlan_rxq);
+};
+
+void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ENF_ADR(filter),
+ HW_ATL_RPF_ET_ENF_MSK,
+ HW_ATL_RPF_ET_ENF_SHIFT, etht_flr_en);
+}
+
+void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority_en, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPFEN_ADR(filter),
+ HW_ATL_RPF_ET_UPFEN_MSK, HW_ATL_RPF_ET_UPFEN_SHIFT,
+ etht_user_priority_en);
+}
+
+void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_rx_queue_en,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQFEN_ADR(filter),
+ HW_ATL_RPF_ET_RXQFEN_MSK,
+ HW_ATL_RPF_ET_RXQFEN_SHIFT,
+ etht_rx_queue_en);
+}
+
+void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPF_ADR(filter),
+ HW_ATL_RPF_ET_UPF_MSK,
+ HW_ATL_RPF_ET_UPF_SHIFT, etht_user_priority);
+}
+
+void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQF_ADR(filter),
+ HW_ATL_RPF_ET_RXQF_MSK,
+ HW_ATL_RPF_ET_RXQF_SHIFT, etht_rx_queue);
+}
+
+void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_MNG_RXQF_ADR(filter),
+ HW_ATL_RPF_ET_MNG_RXQF_MSK,
+ HW_ATL_RPF_ET_MNG_RXQF_SHIFT,
+ etht_mgt_queue);
+}
+
+void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ACTF_ADR(filter),
+ HW_ATL_RPF_ET_ACTF_MSK,
+ HW_ATL_RPF_ET_ACTF_SHIFT, etht_flr_act);
+}
+
+void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_VALF_ADR(filter),
+ HW_ATL_RPF_ET_VALF_MSK,
+ HW_ATL_RPF_ET_VALF_SHIFT, etht_flr);
+}
+
+void hw_atl_rpf_l4_spd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_SPD_ADR(filter),
+ HW_ATL_RPF_L4_SPD_MSK,
+ HW_ATL_RPF_L4_SPD_SHIFT, val);
+}
+
+void hw_atl_rpf_l4_dpd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_DPD_ADR(filter),
+ HW_ATL_RPF_L4_DPD_MSK,
+ HW_ATL_RPF_L4_DPD_SHIFT, val);
+}
+
+/* RPO: rx packet offload */
+void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_IPV4CHK_EN_ADR,
+ HW_ATL_RPO_IPV4CHK_EN_MSK,
+ HW_ATL_RPO_IPV4CHK_EN_SHIFT,
+ ipv4header_crc_offload_en);
+}
+
+void hw_atl_rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_vlan_stripping,
+ u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_DESCDVL_STRIP_ADR(descriptor),
+ HW_ATL_RPO_DESCDVL_STRIP_MSK,
+ HW_ATL_RPO_DESCDVL_STRIP_SHIFT,
+ rx_desc_vlan_stripping);
+}
+
+void hw_atl_rpo_outer_vlan_tag_mode_set(void *context,
+ u32 outervlantagmode)
+{
+ aq_hw_write_reg_bit(context, HW_ATL_RPO_OUTER_VL_INS_MODE_ADR,
+ HW_ATL_RPO_OUTER_VL_INS_MODE_MSK,
+ HW_ATL_RPO_OUTER_VL_INS_MODE_SHIFT,
+ outervlantagmode);
+}
+
+u32 hw_atl_rpo_outer_vlan_tag_mode_get(void *context)
+{
+ return aq_hw_read_reg_bit(context, HW_ATL_RPO_OUTER_VL_INS_MODE_ADR,
+ HW_ATL_RPO_OUTER_VL_INS_MODE_MSK,
+ HW_ATL_RPO_OUTER_VL_INS_MODE_SHIFT);
+}
+
+void hw_atl_rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPOL4CHK_EN_ADR,
+ HW_ATL_RPOL4CHK_EN_MSK,
+ HW_ATL_RPOL4CHK_EN_SHIFT, tcp_udp_crc_offload_en);
+}
+
+void hw_atl_rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPO_LRO_EN_ADR, lro_en);
+}
+
+void hw_atl_rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
+ u32 lro_patch_optimization_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_PTOPT_EN_ADR,
+ HW_ATL_RPO_LRO_PTOPT_EN_MSK,
+ HW_ATL_RPO_LRO_PTOPT_EN_SHIFT,
+ lro_patch_optimization_en);
+}
+
+void hw_atl_rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
+ u32 lro_qsessions_lim)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_QSES_LMT_ADR,
+ HW_ATL_RPO_LRO_QSES_LMT_MSK,
+ HW_ATL_RPO_LRO_QSES_LMT_SHIFT,
+ lro_qsessions_lim);
+}
+
+void hw_atl_rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw,
+ u32 lro_total_desc_lim)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_TOT_DSC_LMT_ADR,
+ HW_ATL_RPO_LRO_TOT_DSC_LMT_MSK,
+ HW_ATL_RPO_LRO_TOT_DSC_LMT_SHIFT,
+ lro_total_desc_lim);
+}
+
+void hw_atl_rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lro_min_pld_of_first_pkt)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_PKT_MIN_ADR,
+ HW_ATL_RPO_LRO_PKT_MIN_MSK,
+ HW_ATL_RPO_LRO_PKT_MIN_SHIFT,
+ lro_min_pld_of_first_pkt);
+}
+
+void hw_atl_rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_pkt_lim)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPO_LRO_RSC_MAX_ADR, lro_pkt_lim);
+}
+
+void hw_atl_rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_number_of_descriptors,
+ u32 lro)
+{
+/* Register address for bitfield lro{L}_des_max[1:0] */
+ static u32 rpo_lro_ldes_max_adr[32] = {
+ 0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U,
+ 0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U,
+ 0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U,
+ 0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U,
+ 0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U,
+ 0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U,
+ 0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU,
+ 0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU
+ };
+
+/* Bitmask for bitfield lro{L}_des_max[1:0] */
+ static u32 rpo_lro_ldes_max_msk[32] = {
+ 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
+ 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U,
+ 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
+ 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U,
+ 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
+ 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U,
+ 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
+ 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U
+ };
+
+/* Lower bit position of bitfield lro{L}_des_max[1:0] */
+ static u32 rpo_lro_ldes_max_shift[32] = {
+ 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U,
+ 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U,
+ 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U,
+ 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U
+ };
+
+ aq_hw_write_reg_bit(aq_hw, rpo_lro_ldes_max_adr[lro],
+ rpo_lro_ldes_max_msk[lro],
+ rpo_lro_ldes_max_shift[lro],
+ lro_max_number_of_descriptors);
+}
+
+void hw_atl_rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
+ u32 lro_time_base_divider)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_TB_DIV_ADR,
+ HW_ATL_RPO_LRO_TB_DIV_MSK,
+ HW_ATL_RPO_LRO_TB_DIV_SHIFT,
+ lro_time_base_divider);
+}
+
+void hw_atl_rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_inactive_interval)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_INA_IVAL_ADR,
+ HW_ATL_RPO_LRO_INA_IVAL_MSK,
+ HW_ATL_RPO_LRO_INA_IVAL_SHIFT,
+ lro_inactive_interval);
+}
+
+void hw_atl_rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_coal_interval)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_MAX_IVAL_ADR,
+ HW_ATL_RPO_LRO_MAX_IVAL_MSK,
+ HW_ATL_RPO_LRO_MAX_IVAL_SHIFT,
+ lro_max_coal_interval);
+}
+
+/* rx */
+void hw_atl_rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RX_REG_RES_DSBL_ADR,
+ HW_ATL_RX_REG_RES_DSBL_MSK,
+ HW_ATL_RX_REG_RES_DSBL_SHIFT,
+ rx_reg_res_dis);
+}
+
+/* tdm */
+void hw_atl_tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCADCPUID_ADR(dca),
+ HW_ATL_TDM_DCADCPUID_MSK,
+ HW_ATL_TDM_DCADCPUID_SHIFT, cpuid);
+}
+
+void hw_atl_tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 large_send_offload_en)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_TDM_LSO_EN_ADR, large_send_offload_en);
+}
+
+void hw_atl_tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCA_EN_ADR, HW_ATL_TDM_DCA_EN_MSK,
+ HW_ATL_TDM_DCA_EN_SHIFT, tx_dca_en);
+}
+
+void hw_atl_tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCA_MODE_ADR,
+ HW_ATL_TDM_DCA_MODE_MSK,
+ HW_ATL_TDM_DCA_MODE_SHIFT, tx_dca_mode);
+}
+
+void hw_atl_tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en,
+ u32 dca)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCADDESC_EN_ADR(dca),
+ HW_ATL_TDM_DCADDESC_EN_MSK,
+ HW_ATL_TDM_DCADDESC_EN_SHIFT,
+ tx_desc_dca_en);
+}
+
+void hw_atl_tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en,
+ u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDEN_ADR(descriptor),
+ HW_ATL_TDM_DESCDEN_MSK,
+ HW_ATL_TDM_DESCDEN_SHIFT,
+ tx_desc_en);
+}
+
+u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_TDM_DESCDHD_ADR(descriptor),
+ HW_ATL_TDM_DESCDHD_MSK,
+ HW_ATL_TDM_DESCDHD_SHIFT);
+}
+
+void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
+ u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDLEN_ADR(descriptor),
+ HW_ATL_TDM_DESCDLEN_MSK,
+ HW_ATL_TDM_DESCDLEN_SHIFT,
+ tx_desc_len);
+}
+
+void hw_atl_tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_irq_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_INT_DESC_WRB_EN_ADR,
+ HW_ATL_TDM_INT_DESC_WRB_EN_MSK,
+ HW_ATL_TDM_INT_DESC_WRB_EN_SHIFT,
+ tx_desc_wr_wb_irq_en);
+}
+
+void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_threshold,
+ u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDWRB_THRESH_ADR(descriptor),
+ HW_ATL_TDM_DESCDWRB_THRESH_MSK,
+ HW_ATL_TDM_DESCDWRB_THRESH_SHIFT,
+ tx_desc_wr_wb_threshold);
+}
+
+void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 tdm_irq_moderation_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_INT_MOD_EN_ADR,
+ HW_ATL_TDM_INT_MOD_EN_MSK,
+ HW_ATL_TDM_INT_MOD_EN_SHIFT,
+ tdm_irq_moderation_en);
+}
+
+/* thm */
+void hw_atl_thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_first_pkt)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_FIRST_ADR,
+ HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSK,
+ HW_ATL_THM_LSO_TCP_FLAG_FIRST_SHIFT,
+ lso_tcp_flag_of_first_pkt);
+}
+
+void hw_atl_thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_last_pkt)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_LAST_ADR,
+ HW_ATL_THM_LSO_TCP_FLAG_LAST_MSK,
+ HW_ATL_THM_LSO_TCP_FLAG_LAST_SHIFT,
+ lso_tcp_flag_of_last_pkt);
+}
+
+void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_middle_pkt)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_MID_ADR,
+ HW_ATL_THM_LSO_TCP_FLAG_MID_MSK,
+ HW_ATL_THM_LSO_TCP_FLAG_MID_SHIFT,
+ lso_tcp_flag_of_middle_pkt);
+}
+
+/* TPB: tx packet buffer */
+void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_BUF_EN_ADR,
+ HW_ATL_TPB_TX_BUF_EN_MSK,
+ HW_ATL_TPB_TX_BUF_EN_SHIFT, tx_buff_en);
+}
+
+u32 hw_atl_tpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_TPB_TX_TC_MODE_ADDR,
+ HW_ATL_TPB_TX_TC_MODE_MSK,
+ HW_ATL_TPB_TX_TC_MODE_SHIFT);
+}
+
+void hw_atl_tpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_traf_class_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_TC_MODE_ADDR,
+ HW_ATL_TPB_TX_TC_MODE_MSK,
+ HW_ATL_TPB_TX_TC_MODE_SHIFT,
+ tx_traf_class_mode);
+}
+
+void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_hi_threshold_per_tc,
+ u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBHI_THRESH_ADR(buffer),
+ HW_ATL_TPB_TXBHI_THRESH_MSK,
+ HW_ATL_TPB_TXBHI_THRESH_SHIFT,
+ tx_buff_hi_threshold_per_tc);
+}
+
+void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_lo_threshold_per_tc,
+ u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBLO_THRESH_ADR(buffer),
+ HW_ATL_TPB_TXBLO_THRESH_MSK,
+ HW_ATL_TPB_TXBLO_THRESH_SHIFT,
+ tx_buff_lo_threshold_per_tc);
+}
+
+void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_DMA_SYS_LBK_ADR,
+ HW_ATL_TPB_DMA_SYS_LBK_MSK,
+ HW_ATL_TPB_DMA_SYS_LBK_SHIFT,
+ tx_dma_sys_lbk_en);
+}
+
+void hw_atl_tpb_tx_dma_net_lbk_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_net_lbk_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_DMA_NET_LBK_ADR,
+ HW_ATL_TPB_DMA_NET_LBK_MSK,
+ HW_ATL_TPB_DMA_NET_LBK_SHIFT,
+ tx_dma_net_lbk_en);
+}
+
+void hw_atl_tpb_tx_tx_clk_gate_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_clk_gate_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_CLK_GATE_EN_ADR,
+ HW_ATL_TPB_TX_CLK_GATE_EN_MSK,
+ HW_ATL_TPB_TX_CLK_GATE_EN_SHIFT,
+ tx_clk_gate_en);
+}
+
+void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+
+ u32 tx_pkt_buff_size_per_tc, u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBBUF_SIZE_ADR(buffer),
+ HW_ATL_TPB_TXBBUF_SIZE_MSK,
+ HW_ATL_TPB_TXBBUF_SIZE_SHIFT,
+ tx_pkt_buff_size_per_tc);
+}
+
+void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_SCP_INS_EN_ADR,
+ HW_ATL_TPB_TX_SCP_INS_EN_MSK,
+ HW_ATL_TPB_TX_SCP_INS_EN_SHIFT,
+ tx_path_scp_ins_en);
+}
+
+/* TPO: tx packet offload */
+void hw_atl_tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPO_IPV4CHK_EN_ADR,
+ HW_ATL_TPO_IPV4CHK_EN_MSK,
+ HW_ATL_TPO_IPV4CHK_EN_SHIFT,
+ ipv4header_crc_offload_en);
+}
+
+void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPOL4CHK_EN_ADR,
+ HW_ATL_TPOL4CHK_EN_MSK,
+ HW_ATL_TPOL4CHK_EN_SHIFT,
+ tcp_udp_crc_offload_en);
+}
+
+void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_sys_lbk_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPO_PKT_SYS_LBK_ADR,
+ HW_ATL_TPO_PKT_SYS_LBK_MSK,
+ HW_ATL_TPO_PKT_SYS_LBK_SHIFT,
+ tx_pkt_sys_lbk_en);
+}
+
+/* TPS: tx packet scheduler */
+void hw_atl_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_data_arb_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TC_ARB_MODE_ADR,
+ HW_ATL_TPS_DATA_TC_ARB_MODE_MSK,
+ HW_ATL_TPS_DATA_TC_ARB_MODE_SHIFT,
+ tx_pkt_shed_data_arb_mode);
+}
+
+void hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
+ u32 curr_time_res)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_TA_RST_ADR,
+ HW_ATL_TPS_DESC_RATE_TA_RST_MSK,
+ HW_ATL_TPS_DESC_RATE_TA_RST_SHIFT,
+ curr_time_res);
+}
+
+void hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_rate_lim)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_LIM_ADR,
+ HW_ATL_TPS_DESC_RATE_LIM_MSK,
+ HW_ATL_TPS_DESC_RATE_LIM_SHIFT,
+ tx_pkt_shed_desc_rate_lim);
+}
+
+void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 arb_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TC_ARB_MODE_ADR,
+ HW_ATL_TPS_DESC_TC_ARB_MODE_MSK,
+ HW_ATL_TPS_DESC_TC_ARB_MODE_SHIFT,
+ arb_mode);
+}
+
+void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
+ const u32 tc,
+ const u32 max_credit)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc),
+ HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSK,
+ HW_ATL_TPS_DESC_TCTCREDIT_MAX_SHIFT,
+ max_credit);
+}
+
+void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
+ const u32 tc,
+ const u32 weight)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc),
+ HW_ATL_TPS_DESC_TCTWEIGHT_MSK,
+ HW_ATL_TPS_DESC_TCTWEIGHT_SHIFT,
+ weight);
+}
+
+void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 arb_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_VM_ARB_MODE_ADR,
+ HW_ATL_TPS_DESC_VM_ARB_MODE_MSK,
+ HW_ATL_TPS_DESC_VM_ARB_MODE_SHIFT,
+ arb_mode);
+}
+
+void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+ const u32 tc,
+ const u32 max_credit)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc),
+ HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSK,
+ HW_ATL_TPS_DATA_TCTCREDIT_MAX_SHIFT,
+ max_credit);
+}
+
+void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+ const u32 tc,
+ const u32 weight)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc),
+ HW_ATL_TPS_DATA_TCTWEIGHT_MSK,
+ HW_ATL_TPS_DATA_TCTWEIGHT_SHIFT,
+ weight);
+}
+
+void hw_atl_tps_tx_desc_rate_mode_set(struct aq_hw_s *aq_hw,
+ const u32 rate_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_TX_DESC_RATE_MODE_ADR,
+ HW_ATL_TPS_TX_DESC_RATE_MODE_MSK,
+ HW_ATL_TPS_TX_DESC_RATE_MODE_SHIFT,
+ rate_mode);
+}
+
+void hw_atl_tps_tx_desc_rate_en_set(struct aq_hw_s *aq_hw, const u32 desc,
+ const u32 enable)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_EN_ADR(desc),
+ HW_ATL_TPS_DESC_RATE_EN_MSK,
+ HW_ATL_TPS_DESC_RATE_EN_SHIFT,
+ enable);
+}
+
+void hw_atl_tps_tx_desc_rate_x_set(struct aq_hw_s *aq_hw, const u32 desc,
+ const u32 rate_int)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_X_ADR(desc),
+ HW_ATL_TPS_DESC_RATE_X_MSK,
+ HW_ATL_TPS_DESC_RATE_X_SHIFT,
+ rate_int);
+}
+
+void hw_atl_tps_tx_desc_rate_y_set(struct aq_hw_s *aq_hw, const u32 desc,
+ const u32 rate_frac)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_Y_ADR(desc),
+ HW_ATL_TPS_DESC_RATE_Y_MSK,
+ HW_ATL_TPS_DESC_RATE_Y_SHIFT,
+ rate_frac);
+}
+
+/* tx */
+void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TX_REG_RES_DSBL_ADR,
+ HW_ATL_TX_REG_RES_DSBL_MSK,
+ HW_ATL_TX_REG_RES_DSBL_SHIFT, tx_reg_res_dis);
+}
+
+/* msm */
+u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_MSM_REG_ACCESS_BUSY_ADR,
+ HW_ATL_MSM_REG_ACCESS_BUSY_MSK,
+ HW_ATL_MSM_REG_ACCESS_BUSY_SHIFT);
+}
+
+void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
+ u32 reg_addr_for_indirect_addr)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_ADDR_ADR,
+ HW_ATL_MSM_REG_ADDR_MSK,
+ HW_ATL_MSM_REG_ADDR_SHIFT,
+ reg_addr_for_indirect_addr);
+}
+
+void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_RD_STROBE_ADR,
+ HW_ATL_MSM_REG_RD_STROBE_MSK,
+ HW_ATL_MSM_REG_RD_STROBE_SHIFT,
+ reg_rd_strobe);
+}
+
+u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_MSM_REG_RD_DATA_ADR);
+}
+
+void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_MSM_REG_WR_DATA_ADR, reg_wr_data);
+}
+
+void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_WR_STROBE_ADR,
+ HW_ATL_MSM_REG_WR_STROBE_MSK,
+ HW_ATL_MSM_REG_WR_STROBE_SHIFT,
+ reg_wr_strobe);
+}
+
+/* pci */
+void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_PCI_REG_RES_DSBL_ADR,
+ HW_ATL_PCI_REG_RES_DSBL_MSK,
+ HW_ATL_PCI_REG_RES_DSBL_SHIFT,
+ pci_reg_res_dis);
+}
+
+void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
+ u32 glb_cpu_scratch_scp,
+ u32 scratch_scp)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp),
+ glb_cpu_scratch_scp);
+}
+
+void hw_atl_pcs_ptp_clock_read_enable(struct aq_hw_s *aq_hw,
+ u32 ptp_clock_read_enable)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_ADR,
+ HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_MSK,
+ HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_SHIFT,
+ ptp_clock_read_enable);
+}
+
+u32 hw_atl_pcs_ptp_clock_get(struct aq_hw_s *aq_hw, u32 index)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_PCS_PTP_TS_VAL_ADDR(index));
+}
+
+void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_MCP_UP_FORCE_INTERRUPT_ADR,
+ HW_ATL_MCP_UP_FORCE_INTERRUPT_MSK,
+ HW_ATL_MCP_UP_FORCE_INTERRUPT_SHIFT,
+ up_force_intr);
+}
+
+void hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_DSTA_ADR(location), 0U);
+}
+
+void hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_SRCA_ADR(location), 0U);
+}
+
+void hw_atl_rpfl3l4_cmd_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_REG_CTRL_ADR(location), 0U);
+}
+
+void hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_DSTA_ADR(location + i),
+ 0U);
+}
+
+void hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_SRCA_ADR(location + i),
+ 0U);
+}
+
+void hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 ipv4_dest)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_DSTA_ADR(location),
+ ipv4_dest);
+}
+
+void hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 ipv4_src)
+{
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_SRCA_ADR(location),
+ ipv4_src);
+}
+
+void hw_atl_rpfl3l4_cmd_set(struct aq_hw_s *aq_hw, u8 location, u32 cmd)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_REG_CTRL_ADR(location), cmd);
+}
+
+void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 *ipv6_src)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_SRCA_ADR(location + i),
+ ipv6_src[3 - i]);
+}
+
+void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 *ipv6_dest)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_DSTA_ADR(location + i),
+ ipv6_dest[3 - i]);
+}
+
+u32 hw_atl_sem_ram_get(struct aq_hw_s *self)
+{
+ return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
+}
+
+u32 hw_atl_sem_mdio_get(struct aq_hw_s *self)
+{
+ return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_MDIO);
+}
+
+u32 hw_atl_sem_reset1_get(struct aq_hw_s *self)
+{
+ return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RESET1);
+}
+
+u32 hw_atl_sem_reset2_get(struct aq_hw_s *self)
+{
+ return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RESET2);
+}
+
+u32 hw_atl_scrpad_get(struct aq_hw_s *aq_hw, u32 scratch_scp)
+{
+ return aq_hw_read_reg(aq_hw,
+ HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp));
+}
+
+u32 hw_atl_scrpad12_get(struct aq_hw_s *self)
+{
+ return hw_atl_scrpad_get(self, 0xB);
+}
+
+u32 hw_atl_scrpad25_get(struct aq_hw_s *self)
+{
+ return hw_atl_scrpad_get(self, 0x18);
+}
+
+void hw_atl_glb_mdio_iface1_set(struct aq_hw_s *aq_hw, u32 value)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(1), value);
+}
+
+u32 hw_atl_glb_mdio_iface1_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(1));
+}
+
+void hw_atl_glb_mdio_iface2_set(struct aq_hw_s *aq_hw, u32 value)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(2), value);
+}
+
+u32 hw_atl_glb_mdio_iface2_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(2));
+}
+
+void hw_atl_glb_mdio_iface3_set(struct aq_hw_s *aq_hw, u32 value)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(3), value);
+}
+
+u32 hw_atl_glb_mdio_iface3_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(3));
+}
+
+void hw_atl_glb_mdio_iface4_set(struct aq_hw_s *aq_hw, u32 value)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(4), value);
+}
+
+u32 hw_atl_glb_mdio_iface4_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(4));
+}
+
+void hw_atl_glb_mdio_iface5_set(struct aq_hw_s *aq_hw, u32 value)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(5), value);
+}
+
+u32 hw_atl_glb_mdio_iface5_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(5));
+}
+
+u32 hw_atl_mdio_busy_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_MDIO_BUSY_ADR,
+ HW_ATL_MDIO_BUSY_MSK,
+ HW_ATL_MDIO_BUSY_SHIFT);
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
new file mode 100644
index 000000000..58f5ee0a6
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -0,0 +1,872 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+/* File hw_atl_llh.h: Declarations of bitfield and register access functions for
+ * Atlantic registers.
+ */
+
+#ifndef HW_ATL_LLH_H
+#define HW_ATL_LLH_H
+
+#include <linux/types.h>
+
+struct aq_hw_s;
+
+/* set temperature sense reset */
+void hw_atl_ts_reset_set(struct aq_hw_s *aq_hw, u32 val);
+
+/* set temperature sense power down */
+void hw_atl_ts_power_down_set(struct aq_hw_s *aq_hw, u32 val);
+
+/* get temperature sense power down */
+u32 hw_atl_ts_power_down_get(struct aq_hw_s *aq_hw);
+
+/* get temperature sense ready */
+u32 hw_atl_ts_ready_get(struct aq_hw_s *aq_hw);
+
+/* get temperature sense ready latch high */
+u32 hw_atl_ts_ready_latch_high_get(struct aq_hw_s *aq_hw);
+
+/* get temperature sense data */
+u32 hw_atl_ts_data_get(struct aq_hw_s *aq_hw);
+
+/* global */
+
+/* set global microprocessor semaphore */
+void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
+ u32 semaphore);
+
+/* get global microprocessor semaphore */
+u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore);
+
+/* set global register reset disable */
+void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis);
+
+/* set soft reset */
+void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res);
+
+/* get soft reset */
+u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw);
+
+/* stats */
+
+u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get rx dma good octet counter */
+u64 hw_atl_stats_rx_dma_good_octet_counter_get(struct aq_hw_s *aq_hw);
+
+/* get rx dma good packet counter */
+u64 hw_atl_stats_rx_dma_good_pkt_counter_get(struct aq_hw_s *aq_hw);
+
+/* get tx dma good octet counter */
+u64 hw_atl_stats_tx_dma_good_octet_counter_get(struct aq_hw_s *aq_hw);
+
+/* get tx dma good packet counter */
+u64 hw_atl_stats_tx_dma_good_pkt_counter_get(struct aq_hw_s *aq_hw);
+
+/* get msm rx errors counter register */
+u32 hw_atl_reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm rx unicast frames counter register */
+u32 hw_atl_reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm rx multicast frames counter register */
+u32 hw_atl_reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm rx broadcast frames counter register */
+u32 hw_atl_reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm rx broadcast octets counter register 1 */
+u32 hw_atl_reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
+
+/* get msm rx unicast octets counter register 0 */
+u32 hw_atl_reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
+
+/* get msm tx errors counter register */
+u32 hw_atl_reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm tx unicast frames counter register */
+u32 hw_atl_reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm tx multicast frames counter register */
+u32 hw_atl_reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm tx broadcast frames counter register */
+u32 hw_atl_reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm tx multicast octets counter register 1 */
+u32 hw_atl_reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw);
+
+/* get msm tx broadcast octets counter register 1 */
+u32 hw_atl_reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
+
+/* get msm tx unicast octets counter register 0 */
+u32 hw_atl_reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
+
+/* get global mif identification */
+u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw);
+
+/* interrupt */
+
+/* set interrupt auto mask lsw */
+void hw_atl_itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_auto_masklsw);
+
+/* set interrupt mapping enable rx */
+void hw_atl_itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx,
+ u32 rx);
+
+/* set interrupt mapping enable tx */
+void hw_atl_itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx,
+ u32 tx);
+
+/* set interrupt mapping rx */
+void hw_atl_itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx);
+
+/* set interrupt mapping tx */
+void hw_atl_itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx);
+
+/* set interrupt mask clear lsw */
+void hw_atl_itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_msk_clearlsw);
+
+/* set interrupt mask set lsw */
+void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw);
+
+/* set interrupt register reset disable */
+void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis);
+
+/* set interrupt status clear lsw */
+void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_status_clearlsw);
+
+/* get interrupt status lsw */
+u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw);
+
+/* get reset interrupt */
+u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw);
+
+/* set reset interrupt */
+void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq);
+
+/* set RSC interrupt */
+void hw_atl_itr_rsc_en_set(struct aq_hw_s *aq_hw, u32 enable);
+
+/* set RSC delay */
+void hw_atl_itr_rsc_delay_set(struct aq_hw_s *aq_hw, u32 delay);
+
+/* rdm */
+
+/* set cpu id */
+void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
+
+/* set rx dca enable */
+void hw_atl_rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en);
+
+/* set rx dca mode */
+void hw_atl_rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode);
+
+/* set rx descriptor data buffer size */
+void hw_atl_rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_data_buff_size,
+ u32 descriptor);
+
+/* set rx descriptor dca enable */
+void hw_atl_rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
+ u32 dca);
+
+/* set rx descriptor enable */
+void hw_atl_rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
+ u32 descriptor);
+
+/* set rx descriptor header splitting */
+void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_splitting,
+ u32 descriptor);
+
+/* get rx descriptor head pointer */
+u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
+
+/* set rx descriptor length */
+void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
+ u32 descriptor);
+
+/* set rx descriptor write-back interrupt enable */
+void hw_atl_rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_wr_wb_irq_en);
+
+/* set rx header dca enable */
+void hw_atl_rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
+ u32 dca);
+
+/* set rx payload dca enable */
+void hw_atl_rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en,
+ u32 dca);
+
+/* set rx descriptor header buffer size */
+void hw_atl_rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_buff_size,
+ u32 descriptor);
+
+/* set rx descriptor reset */
+void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
+ u32 descriptor);
+
+/* Set RDM Interrupt Moderation Enable */
+void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 rdm_intr_moder_en);
+
+/* reg */
+
+/* set general interrupt mapping register */
+void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map,
+ u32 regidx);
+
+/* get general interrupt status register */
+u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw);
+
+/* set interrupt global control register */
+void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl);
+
+/* set interrupt throttle register */
+void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle);
+
+/* set rx dma descriptor base address lsw */
+void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrlsw,
+ u32 descriptor);
+
+/* set rx dma descriptor base address msw */
+void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrmsw,
+ u32 descriptor);
+
+/* get rx dma descriptor status register */
+u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor);
+
+/* set rx dma descriptor tail pointer register */
+void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_tail_ptr,
+ u32 descriptor);
+
+/* set rx filter multicast filter mask register */
+void hw_atl_reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_mcst_flr_msk);
+
+/* set rx filter multicast filter register */
+void hw_atl_reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
+ u32 filter);
+
+/* set rx filter rss control register 1 */
+void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_rss_control1);
+
+/* Set RX Filter Control Register 2 */
+void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2);
+
+/* Set RX Interrupt Moderation Control Register */
+void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 rx_intr_moderation_ctl,
+ u32 queue);
+
+/* set tx dma debug control */
+void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_debug_ctl);
+
+/* set tx dma descriptor base address lsw */
+void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrlsw,
+ u32 descriptor);
+
+/* set tx dma descriptor base address msw */
+void hw_atl_reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrmsw,
+ u32 descriptor);
+
+/* set tx dma descriptor tail pointer register */
+void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_tail_ptr,
+ u32 descriptor);
+
+/* Set TX Interrupt Moderation Control Register */
+void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 tx_intr_moderation_ctl,
+ u32 queue);
+
+/* set global microprocessor scratch pad */
+void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
+ u32 glb_cpu_scratch_scp,
+ u32 scratch_scp);
+
+/* rpb */
+
+/* set dma system loopback */
+void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
+
+/* set dma network loopback */
+void hw_atl_rpb_dma_net_lbk_set(struct aq_hw_s *aq_hw, u32 dma_net_lbk);
+
+/* set rx traffic class mode */
+void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
+ u32 rx_traf_class_mode);
+
+/* get rx traffic class mode */
+u32 hw_atl_rpb_rpf_rx_traf_class_mode_get(struct aq_hw_s *aq_hw);
+
+/* set rx buffer enable */
+void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en);
+
+/* set rx buffer high threshold (per tc) */
+void hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_hi_threshold_per_tc,
+ u32 buffer);
+
+/* set rx buffer low threshold (per tc) */
+void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_lo_threshold_per_tc,
+ u32 buffer);
+
+/* set rx flow control mode */
+void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw,
+ u32 rx_flow_ctl_mode);
+
+/* set rx packet buffer size (per tc) */
+void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_pkt_buff_size_per_tc,
+ u32 buffer);
+
+/* toggle rdm rx dma descriptor cache init */
+void hw_atl_rdm_rx_dma_desc_cache_init_tgl(struct aq_hw_s *aq_hw);
+
+/* get rdm rx dma descriptor cache init done */
+u32 hw_atl_rdm_rx_dma_desc_cache_init_done_get(struct aq_hw_s *aq_hw);
+
+/* set rx xoff enable (per tc) */
+void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_xoff_en_per_tc,
+ u32 buffer);
+
+/* rpf */
+
+/* set l2 broadcast count threshold */
+void hw_atl_rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_count_threshold);
+
+/* set l2 broadcast enable */
+void hw_atl_rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en);
+
+/* set l2 broadcast filter action */
+void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_flr_act);
+
+/* set l2 multicast filter enable */
+void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
+ u32 l2multicast_flr_en,
+ u32 filter);
+
+/* get l2 promiscuous mode enable */
+u32 hw_atl_rpfl2promiscuous_mode_en_get(struct aq_hw_s *aq_hw);
+
+/* set l2 promiscuous mode enable */
+void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 l2promiscuous_mode_en);
+
+/* set l2 unicast filter action */
+void hw_atl_rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_flr_act,
+ u32 filter);
+
+/* set l2 unicast filter enable */
+void hw_atl_rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
+ u32 filter);
+
+/* set l2 unicast destination address lsw */
+void hw_atl_rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addresslsw,
+ u32 filter);
+
+/* set l2 unicast destination address msw */
+void hw_atl_rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addressmsw,
+ u32 filter);
+
+/* Set L2 Accept all Multicast packets */
+void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
+ u32 l2_accept_all_mc_packets);
+
+/* set user-priority tc mapping */
+void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
+ u32 user_priority_tc_map, u32 tc);
+
+/* set rss key address */
+void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr);
+
+/* set rss key write data */
+void hw_atl_rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data);
+
+/* get rss key write enable */
+u32 hw_atl_rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw);
+
+/* set rss key write enable */
+void hw_atl_rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en);
+
+/* set rss redirection table address */
+void hw_atl_rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_addr);
+
+/* set rss redirection table write data */
+void hw_atl_rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_wr_data);
+
+/* get rss redirection write enable */
+u32 hw_atl_rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw);
+
+/* set rss redirection write enable */
+void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en);
+
+/* set tpo to rpf system loopback */
+void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
+ u32 tpo_to_rpf_sys_lbk);
+
+/* set vlan inner ethertype */
+void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht);
+
+/* set vlan outer ethertype */
+void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht);
+
+/* set vlan promiscuous mode enable */
+void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 vlan_prom_mode_en);
+
+/* Get VLAN promiscuous mode enable */
+u32 hw_atl_rpf_vlan_prom_mode_en_get(struct aq_hw_s *aq_hw);
+
+/* Set VLAN untagged action */
+void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
+ u32 vlan_untagged_act);
+
+/* Set VLAN accept untagged packets */
+void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
+ u32 vlan_acc_untagged_packets);
+
+/* Set VLAN filter enable */
+void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en,
+ u32 filter);
+
+/* Set VLAN Filter Action */
+void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
+ u32 filter);
+
+/* Set VLAN ID Filter */
+void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
+ u32 filter);
+
+/* Set VLAN RX queue assignment enable */
+void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq_en,
+ u32 filter);
+
+/* Set VLAN RX queue */
+void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq,
+ u32 filter);
+
+/* set ethertype filter enable */
+void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
+ u32 filter);
+
+/* set ethertype user-priority enable */
+void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority_en,
+ u32 filter);
+
+/* set ethertype rx queue enable */
+void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_rx_queue_en,
+ u32 filter);
+
+/* set ethertype rx queue */
+void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
+ u32 filter);
+
+/* set ethertype user-priority */
+void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority,
+ u32 filter);
+
+/* set ethertype management queue */
+void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
+ u32 filter);
+
+/* set ethertype filter action */
+void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
+ u32 filter);
+
+/* set ethertype filter */
+void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
+
+/* set L4 source port */
+void hw_atl_rpf_l4_spd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter);
+
+/* set L4 destination port */
+void hw_atl_rpf_l4_dpd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter);
+
+/* rpo */
+
+/* set ipv4 header checksum offload enable */
+void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en);
+
+/* set rx descriptor vlan stripping */
+void hw_atl_rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_vlan_stripping,
+ u32 descriptor);
+
+void hw_atl_rpo_outer_vlan_tag_mode_set(void *context,
+ u32 outervlantagmode);
+
+u32 hw_atl_rpo_outer_vlan_tag_mode_get(void *context);
+
+/* set tcp/udp checksum offload enable */
+void hw_atl_rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en);
+
+/* Set LRO Patch Optimization Enable. */
+void hw_atl_rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
+ u32 lro_patch_optimization_en);
+
+/* Set Large Receive Offload Enable */
+void hw_atl_rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en);
+
+/* Set LRO Q Sessions Limit */
+void hw_atl_rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
+ u32 lro_qsessions_lim);
+
+/* Set LRO Total Descriptor Limit */
+void hw_atl_rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw,
+ u32 lro_total_desc_lim);
+
+/* Set LRO Min Payload of First Packet */
+void hw_atl_rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lro_min_pld_of_first_pkt);
+
+/* Set LRO Packet Limit */
+void hw_atl_rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim);
+
+/* Set LRO Max Number of Descriptors */
+void hw_atl_rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_desc_num, u32 lro);
+
+/* Set LRO Time Base Divider */
+void hw_atl_rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
+ u32 lro_time_base_divider);
+
+/*Set LRO Inactive Interval */
+void hw_atl_rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_inactive_interval);
+
+/*Set LRO Max Coalescing Interval */
+void hw_atl_rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_coal_interval);
+
+/* rx */
+
+/* set rx register reset disable */
+void hw_atl_rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis);
+
+/* tdm */
+
+/* set cpu id */
+void hw_atl_tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
+
+/* set large send offload enable */
+void hw_atl_tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 large_send_offload_en);
+
+/* set tx descriptor enable */
+void hw_atl_tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en,
+ u32 descriptor);
+
+/* set tx dca enable */
+void hw_atl_tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en);
+
+/* set tx dca mode */
+void hw_atl_tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode);
+
+/* set tx descriptor dca enable */
+void hw_atl_tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en,
+ u32 dca);
+
+/* get tx descriptor head pointer */
+u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
+
+/* set tx descriptor length */
+void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
+ u32 descriptor);
+
+/* set tx descriptor write-back interrupt enable */
+void hw_atl_tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_irq_en);
+
+/* set tx descriptor write-back threshold */
+void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_threshold,
+ u32 descriptor);
+
+/* Set TDM Interrupt Moderation Enable */
+void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 tdm_irq_moderation_en);
+/* thm */
+
+/* set lso tcp flag of first packet */
+void hw_atl_thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_first_pkt);
+
+/* set lso tcp flag of last packet */
+void hw_atl_thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_last_pkt);
+
+/* set lso tcp flag of middle packet */
+void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_middle_pkt);
+
+/* tpb */
+
+/* set TX Traffic Class Mode */
+void hw_atl_tpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_traf_class_mode);
+
+/* get TX Traffic Class Mode */
+u32 hw_atl_tpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw);
+
+/* set tx buffer enable */
+void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
+
+/* set tx buffer high threshold (per tc) */
+void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_hi_threshold_per_tc,
+ u32 buffer);
+
+/* set tx buffer low threshold (per tc) */
+void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_lo_threshold_per_tc,
+ u32 buffer);
+
+/* set tx dma system loopback enable */
+void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en);
+
+/* set tx dma network loopback enable */
+void hw_atl_tpb_tx_dma_net_lbk_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_net_lbk_en);
+
+/* set tx clock gating enable */
+void hw_atl_tpb_tx_tx_clk_gate_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_clk_gate_en);
+
+/* set tx packet buffer size (per tc) */
+void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_buff_size_per_tc,
+ u32 buffer);
+
+/* set tx path pad insert enable */
+void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en);
+
+/* tpo */
+
+/* set ipv4 header checksum offload enable */
+void hw_atl_tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en);
+
+/* set tcp/udp checksum offload enable */
+void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en);
+
+/* set tx pkt system loopback enable */
+void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_sys_lbk_en);
+
+/* tps */
+
+/* set tx packet scheduler data arbitration mode */
+void hw_atl_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_data_arb_mode);
+
+/* set tx packet scheduler descriptor rate current time reset */
+void hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
+ u32 curr_time_res);
+
+/* set tx packet scheduler descriptor rate limit */
+void hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_rate_lim);
+
+/* set tx packet scheduler descriptor tc arbitration mode */
+void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 arb_mode);
+
+/* set tx packet scheduler descriptor tc max credit */
+void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
+ const u32 tc,
+ const u32 max_credit);
+
+/* set tx packet scheduler descriptor tc weight */
+void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
+ const u32 tc,
+ const u32 weight);
+
+/* set tx packet scheduler descriptor vm arbitration mode */
+void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 arb_mode);
+
+/* set tx packet scheduler tc data max credit */
+void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+ const u32 tc,
+ const u32 max_credit);
+
+/* set tx packet scheduler tc data weight */
+void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+ const u32 tc,
+ const u32 weight);
+
+/* set tx descriptor rate mode */
+void hw_atl_tps_tx_desc_rate_mode_set(struct aq_hw_s *aq_hw,
+ const u32 rate_mode);
+
+/* set tx packet scheduler descriptor rate enable */
+void hw_atl_tps_tx_desc_rate_en_set(struct aq_hw_s *aq_hw, const u32 desc,
+ const u32 enable);
+
+/* set tx packet scheduler descriptor rate integral value */
+void hw_atl_tps_tx_desc_rate_x_set(struct aq_hw_s *aq_hw, const u32 desc,
+ const u32 rate_int);
+
+/* set tx packet scheduler descriptor rate fractional value */
+void hw_atl_tps_tx_desc_rate_y_set(struct aq_hw_s *aq_hw, const u32 desc,
+ const u32 rate_frac);
+
+/* tx */
+
+/* set tx register reset disable */
+void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis);
+
+/* msm */
+
+/* get register access status */
+u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw);
+
+/* set register address for indirect address */
+void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
+ u32 reg_addr_for_indirect_addr);
+
+/* set register read strobe */
+void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe);
+
+/* get register read data */
+u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw);
+
+/* set register write data */
+void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data);
+
+/* set register write strobe */
+void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
+
+/* pci */
+
+/* set pci register reset disable */
+void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
+
+/* pcs */
+void hw_atl_pcs_ptp_clock_read_enable(struct aq_hw_s *aq_hw,
+ u32 ptp_clock_read_enable);
+
+u32 hw_atl_pcs_ptp_clock_get(struct aq_hw_s *aq_hw, u32 index);
+
+/* set uP Force Interrupt */
+void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr);
+
+/* clear ipv4 filter destination address */
+void hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear ipv4 filter source address */
+void hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear command for filter l3-l4 */
+void hw_atl_rpfl3l4_cmd_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear ipv6 filter destination address */
+void hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear ipv6 filter source address */
+void hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* set ipv4 filter destination address */
+void hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 ipv4_dest);
+
+/* set ipv4 filter source address */
+void hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 ipv4_src);
+
+/* set command for filter l3-l4 */
+void hw_atl_rpfl3l4_cmd_set(struct aq_hw_s *aq_hw, u8 location, u32 cmd);
+
+/* set ipv6 filter source address */
+void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 *ipv6_src);
+
+/* set ipv6 filter destination address */
+void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 *ipv6_dest);
+
+/* set Global MDIO Interface 1 */
+void hw_atl_glb_mdio_iface1_set(struct aq_hw_s *hw, u32 value);
+
+/* get Global MDIO Interface 1 */
+u32 hw_atl_glb_mdio_iface1_get(struct aq_hw_s *hw);
+
+/* set Global MDIO Interface 2 */
+void hw_atl_glb_mdio_iface2_set(struct aq_hw_s *hw, u32 value);
+
+/* get Global MDIO Interface 2 */
+u32 hw_atl_glb_mdio_iface2_get(struct aq_hw_s *hw);
+
+/* set Global MDIO Interface 3 */
+void hw_atl_glb_mdio_iface3_set(struct aq_hw_s *hw, u32 value);
+
+/* get Global MDIO Interface 3 */
+u32 hw_atl_glb_mdio_iface3_get(struct aq_hw_s *hw);
+
+/* set Global MDIO Interface 4 */
+void hw_atl_glb_mdio_iface4_set(struct aq_hw_s *hw, u32 value);
+
+/* get Global MDIO Interface 4 */
+u32 hw_atl_glb_mdio_iface4_get(struct aq_hw_s *hw);
+
+/* set Global MDIO Interface 5 */
+void hw_atl_glb_mdio_iface5_set(struct aq_hw_s *hw, u32 value);
+
+/* get Global MDIO Interface 5 */
+u32 hw_atl_glb_mdio_iface5_get(struct aq_hw_s *hw);
+
+u32 hw_atl_mdio_busy_get(struct aq_hw_s *aq_hw);
+
+/* get global microprocessor ram semaphore */
+u32 hw_atl_sem_ram_get(struct aq_hw_s *self);
+
+/* get global microprocessor mdio semaphore */
+u32 hw_atl_sem_mdio_get(struct aq_hw_s *self);
+
+u32 hw_atl_sem_reset1_get(struct aq_hw_s *self);
+u32 hw_atl_sem_reset2_get(struct aq_hw_s *self);
+
+/* get global microprocessor scratch pad register */
+u32 hw_atl_scrpad_get(struct aq_hw_s *aq_hw, u32 scratch_scp);
+
+/* get global microprocessor scratch pad 12 register */
+u32 hw_atl_scrpad12_get(struct aq_hw_s *self);
+
+/* get global microprocessor scratch pad 25 register */
+u32 hw_atl_scrpad25_get(struct aq_hw_s *self);
+
+#endif /* HW_ATL_LLH_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
new file mode 100644
index 000000000..4a6467031
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -0,0 +1,2878 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+/* File hw_atl_llh_internal.h: Preprocessor definitions
+ * for Atlantic registers.
+ */
+
+#ifndef HW_ATL_LLH_INTERNAL_H
+#define HW_ATL_LLH_INTERNAL_H
+
+/* COM Temperature Sense Reset Bitfield Definitions */
+#define HW_ATL_TS_RESET_ADR 0x00003100
+#define HW_ATL_TS_RESET_MSK 0x00000004
+#define HW_ATL_TS_RESET_SHIFT 2
+#define HW_ATL_TS_RESET_WIDTH 1
+
+/* COM Temperature Sense Power Down Bitfield Definitions */
+#define HW_ATL_TS_POWER_DOWN_ADR 0x00003100
+#define HW_ATL_TS_POWER_DOWN_MSK 0x00000001
+#define HW_ATL_TS_POWER_DOWN_SHIFT 0
+#define HW_ATL_TS_POWER_DOWN_WIDTH 1
+
+/* COM Temperature Sense Ready Bitfield Definitions */
+#define HW_ATL_TS_READY_ADR 0x00003120
+#define HW_ATL_TS_READY_MSK 0x80000000
+#define HW_ATL_TS_READY_SHIFT 31
+#define HW_ATL_TS_READY_WIDTH 1
+
+/* COM Temperature Sense Ready Latch High Bitfield Definitions */
+#define HW_ATL_TS_READY_LATCH_HIGH_ADR 0x00003120
+#define HW_ATL_TS_READY_LATCH_HIGH_MSK 0x40000000
+#define HW_ATL_TS_READY_LATCH_HIGH_SHIFT 30
+#define HW_ATL_TS_READY_LATCH_HIGH_WIDTH 1
+
+/* COM Temperature Sense Data Out [B:0] Bitfield Definitions */
+#define HW_ATL_TS_DATA_OUT_ADR 0x00003120
+#define HW_ATL_TS_DATA_OUT_MSK 0x00000FFF
+#define HW_ATL_TS_DATA_OUT_SHIFT 0
+#define HW_ATL_TS_DATA_OUT_WIDTH 12
+
+/* global microprocessor semaphore definitions
+ * base address: 0x000003a0
+ * parameter: semaphore {s} | stride size 0x4 | range [0, 15]
+ */
+#define HW_ATL_GLB_CPU_SEM_ADR(semaphore) (0x000003a0u + (semaphore) * 0x4)
+/* register address for bitfield rx dma good octet counter lsw [1f:0] */
+#define HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERLSW 0x00006808
+/* register address for bitfield rx dma good packet counter lsw [1f:0] */
+#define HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERLSW 0x00006800
+/* register address for bitfield tx dma good octet counter lsw [1f:0] */
+#define HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERLSW 0x00008808
+/* register address for bitfield tx dma good packet counter lsw [1f:0] */
+#define HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERLSW 0x00008800
+
+/* register address for bitfield rx dma good octet counter msw [3f:20] */
+#define HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERMSW 0x0000680c
+/* register address for bitfield rx dma good packet counter msw [3f:20] */
+#define HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERMSW 0x00006804
+/* register address for bitfield tx dma good octet counter msw [3f:20] */
+#define HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERMSW 0x0000880c
+/* register address for bitfield tx dma good packet counter msw [3f:20] */
+#define HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERMSW 0x00008804
+
+/* preprocessor definitions for msm rx errors counter register */
+#define HW_ATL_MAC_MSM_RX_ERRS_CNT_ADR 0x00000120u
+
+/* preprocessor definitions for msm rx unicast frames counter register */
+#define HW_ATL_MAC_MSM_RX_UCST_FRM_CNT_ADR 0x000000e0u
+
+/* preprocessor definitions for msm rx multicast frames counter register */
+#define HW_ATL_MAC_MSM_RX_MCST_FRM_CNT_ADR 0x000000e8u
+
+/* preprocessor definitions for msm rx broadcast frames counter register */
+#define HW_ATL_MAC_MSM_RX_BCST_FRM_CNT_ADR 0x000000f0u
+
+/* preprocessor definitions for msm rx broadcast octets counter register 1 */
+#define HW_ATL_MAC_MSM_RX_BCST_OCTETS_COUNTER1_ADR 0x000001b0u
+
+/* preprocessor definitions for msm rx broadcast octets counter register 2 */
+#define HW_ATL_MAC_MSM_RX_BCST_OCTETS_COUNTER2_ADR 0x000001b4u
+
+/* preprocessor definitions for msm rx unicast octets counter register 0 */
+#define HW_ATL_MAC_MSM_RX_UCST_OCTETS_COUNTER0_ADR 0x000001b8u
+
+/* preprocessor definitions for msm tx unicast frames counter register */
+#define HW_ATL_MAC_MSM_TX_UCST_FRM_CNT_ADR 0x00000108u
+
+/* preprocessor definitions for msm tx multicast frames counter register */
+#define HW_ATL_MAC_MSM_TX_MCST_FRM_CNT_ADR 0x00000110u
+
+/* preprocessor definitions for global mif identification */
+#define HW_ATL_GLB_MIF_ID_ADR 0x0000001cu
+
+/* register address for bitfield iamr_lsw[1f:0] */
+#define HW_ATL_ITR_IAMRLSW_ADR 0x00002090
+/* register address for bitfield rx dma drop packet counter [1f:0] */
+#define HW_ATL_RPB_RX_DMA_DROP_PKT_CNT_ADR 0x00006818
+
+/* register address for bitfield imcr_lsw[1f:0] */
+#define HW_ATL_ITR_IMCRLSW_ADR 0x00002070
+/* register address for bitfield imsr_lsw[1f:0] */
+#define HW_ATL_ITR_IMSRLSW_ADR 0x00002060
+/* register address for bitfield itr_reg_res_dsbl */
+#define HW_ATL_ITR_REG_RES_DSBL_ADR 0x00002300
+/* bitmask for bitfield itr_reg_res_dsbl */
+#define HW_ATL_ITR_REG_RES_DSBL_MSK 0x20000000
+/* lower bit position of bitfield itr_reg_res_dsbl */
+#define HW_ATL_ITR_REG_RES_DSBL_SHIFT 29
+/* register address for bitfield iscr_lsw[1f:0] */
+#define HW_ATL_ITR_ISCRLSW_ADR 0x00002050
+/* register address for bitfield isr_lsw[1f:0] */
+#define HW_ATL_ITR_ISRLSW_ADR 0x00002000
+/* register address for bitfield itr_reset */
+#define HW_ATL_ITR_RES_ADR 0x00002300
+/* bitmask for bitfield itr_reset */
+#define HW_ATL_ITR_RES_MSK 0x80000000
+/* lower bit position of bitfield itr_reset */
+#define HW_ATL_ITR_RES_SHIFT 31
+
+/* register address for bitfield rsc_en */
+#define HW_ATL_ITR_RSC_EN_ADR 0x00002200
+
+/* register address for bitfield rsc_delay */
+#define HW_ATL_ITR_RSC_DELAY_ADR 0x00002204
+/* bitmask for bitfield rsc_delay */
+#define HW_ATL_ITR_RSC_DELAY_MSK 0x0000000f
+/* width of bitfield rsc_delay */
+#define HW_ATL_ITR_RSC_DELAY_WIDTH 4
+/* lower bit position of bitfield rsc_delay */
+#define HW_ATL_ITR_RSC_DELAY_SHIFT 0
+
+/* register address for bitfield dca{d}_cpuid[7:0] */
+#define HW_ATL_RDM_DCADCPUID_ADR(dca) (0x00006100 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_cpuid[7:0] */
+#define HW_ATL_RDM_DCADCPUID_MSK 0x000000ff
+/* lower bit position of bitfield dca{d}_cpuid[7:0] */
+#define HW_ATL_RDM_DCADCPUID_SHIFT 0
+/* register address for bitfield dca_en */
+#define HW_ATL_RDM_DCA_EN_ADR 0x00006180
+
+/* rx dca_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca_en".
+ * port="pif_rdm_dca_en_i"
+ */
+
+/* register address for bitfield dca_en */
+#define HW_ATL_RDM_DCA_EN_ADR 0x00006180
+/* bitmask for bitfield dca_en */
+#define HW_ATL_RDM_DCA_EN_MSK 0x80000000
+/* inverted bitmask for bitfield dca_en */
+#define HW_ATL_RDM_DCA_EN_MSKN 0x7fffffff
+/* lower bit position of bitfield dca_en */
+#define HW_ATL_RDM_DCA_EN_SHIFT 31
+/* width of bitfield dca_en */
+#define HW_ATL_RDM_DCA_EN_WIDTH 1
+/* default value of bitfield dca_en */
+#define HW_ATL_RDM_DCA_EN_DEFAULT 0x1
+
+/* rx dca_mode[3:0] bitfield definitions
+ * preprocessor definitions for the bitfield "dca_mode[3:0]".
+ * port="pif_rdm_dca_mode_i[3:0]"
+ */
+
+/* register address for bitfield dca_mode[3:0] */
+#define HW_ATL_RDM_DCA_MODE_ADR 0x00006180
+/* bitmask for bitfield dca_mode[3:0] */
+#define HW_ATL_RDM_DCA_MODE_MSK 0x0000000f
+/* inverted bitmask for bitfield dca_mode[3:0] */
+#define HW_ATL_RDM_DCA_MODE_MSKN 0xfffffff0
+/* lower bit position of bitfield dca_mode[3:0] */
+#define HW_ATL_RDM_DCA_MODE_SHIFT 0
+/* width of bitfield dca_mode[3:0] */
+#define HW_ATL_RDM_DCA_MODE_WIDTH 4
+/* default value of bitfield dca_mode[3:0] */
+#define HW_ATL_RDM_DCA_MODE_DEFAULT 0x0
+
+/* rx desc{d}_data_size[4:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_data_size[4:0]".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_desc0_data_size_i[4:0]"
+ */
+
+/* register address for bitfield desc{d}_data_size[4:0] */
+#define HW_ATL_RDM_DESCDDATA_SIZE_ADR(descriptor) \
+ (0x00005b18 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_data_size[4:0] */
+#define HW_ATL_RDM_DESCDDATA_SIZE_MSK 0x0000001f
+/* inverted bitmask for bitfield desc{d}_data_size[4:0] */
+#define HW_ATL_RDM_DESCDDATA_SIZE_MSKN 0xffffffe0
+/* lower bit position of bitfield desc{d}_data_size[4:0] */
+#define HW_ATL_RDM_DESCDDATA_SIZE_SHIFT 0
+/* width of bitfield desc{d}_data_size[4:0] */
+#define HW_ATL_RDM_DESCDDATA_SIZE_WIDTH 5
+/* default value of bitfield desc{d}_data_size[4:0] */
+#define HW_ATL_RDM_DESCDDATA_SIZE_DEFAULT 0x0
+
+/* rx dca{d}_desc_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca{d}_desc_en".
+ * parameter: dca {d} | stride size 0x4 | range [0, 31]
+ * port="pif_rdm_dca_desc_en_i[0]"
+ */
+
+/* register address for bitfield dca{d}_desc_en */
+#define HW_ATL_RDM_DCADDESC_EN_ADR(dca) (0x00006100 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_desc_en */
+#define HW_ATL_RDM_DCADDESC_EN_MSK 0x80000000
+/* inverted bitmask for bitfield dca{d}_desc_en */
+#define HW_ATL_RDM_DCADDESC_EN_MSKN 0x7fffffff
+/* lower bit position of bitfield dca{d}_desc_en */
+#define HW_ATL_RDM_DCADDESC_EN_SHIFT 31
+/* width of bitfield dca{d}_desc_en */
+#define HW_ATL_RDM_DCADDESC_EN_WIDTH 1
+/* default value of bitfield dca{d}_desc_en */
+#define HW_ATL_RDM_DCADDESC_EN_DEFAULT 0x0
+
+/* rx desc{d}_en bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_en".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_desc_en_i[0]"
+ */
+
+/* register address for bitfield desc{d}_en */
+#define HW_ATL_RDM_DESCDEN_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_en */
+#define HW_ATL_RDM_DESCDEN_MSK 0x80000000
+/* inverted bitmask for bitfield desc{d}_en */
+#define HW_ATL_RDM_DESCDEN_MSKN 0x7fffffff
+/* lower bit position of bitfield desc{d}_en */
+#define HW_ATL_RDM_DESCDEN_SHIFT 31
+/* width of bitfield desc{d}_en */
+#define HW_ATL_RDM_DESCDEN_WIDTH 1
+/* default value of bitfield desc{d}_en */
+#define HW_ATL_RDM_DESCDEN_DEFAULT 0x0
+
+/* rx desc{d}_hdr_size[4:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_hdr_size[4:0]".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_desc0_hdr_size_i[4:0]"
+ */
+
+/* register address for bitfield desc{d}_hdr_size[4:0] */
+#define HW_ATL_RDM_DESCDHDR_SIZE_ADR(descriptor) \
+ (0x00005b18 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_hdr_size[4:0] */
+#define HW_ATL_RDM_DESCDHDR_SIZE_MSK 0x00001f00
+/* inverted bitmask for bitfield desc{d}_hdr_size[4:0] */
+#define HW_ATL_RDM_DESCDHDR_SIZE_MSKN 0xffffe0ff
+/* lower bit position of bitfield desc{d}_hdr_size[4:0] */
+#define HW_ATL_RDM_DESCDHDR_SIZE_SHIFT 8
+/* width of bitfield desc{d}_hdr_size[4:0] */
+#define HW_ATL_RDM_DESCDHDR_SIZE_WIDTH 5
+/* default value of bitfield desc{d}_hdr_size[4:0] */
+#define HW_ATL_RDM_DESCDHDR_SIZE_DEFAULT 0x0
+
+/* rx desc{d}_hdr_split bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_hdr_split".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_desc_hdr_split_i[0]"
+ */
+
+/* register address for bitfield desc{d}_hdr_split */
+#define HW_ATL_RDM_DESCDHDR_SPLIT_ADR(descriptor) \
+ (0x00005b08 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_hdr_split */
+#define HW_ATL_RDM_DESCDHDR_SPLIT_MSK 0x10000000
+/* inverted bitmask for bitfield desc{d}_hdr_split */
+#define HW_ATL_RDM_DESCDHDR_SPLIT_MSKN 0xefffffff
+/* lower bit position of bitfield desc{d}_hdr_split */
+#define HW_ATL_RDM_DESCDHDR_SPLIT_SHIFT 28
+/* width of bitfield desc{d}_hdr_split */
+#define HW_ATL_RDM_DESCDHDR_SPLIT_WIDTH 1
+/* default value of bitfield desc{d}_hdr_split */
+#define HW_ATL_RDM_DESCDHDR_SPLIT_DEFAULT 0x0
+
+/* rx desc{d}_hd[c:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_hd[c:0]".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="rdm_pif_desc0_hd_o[12:0]"
+ */
+
+/* register address for bitfield desc{d}_hd[c:0] */
+#define HW_ATL_RDM_DESCDHD_ADR(descriptor) (0x00005b0c + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_hd[c:0] */
+#define HW_ATL_RDM_DESCDHD_MSK 0x00001fff
+/* inverted bitmask for bitfield desc{d}_hd[c:0] */
+#define HW_ATL_RDM_DESCDHD_MSKN 0xffffe000
+/* lower bit position of bitfield desc{d}_hd[c:0] */
+#define HW_ATL_RDM_DESCDHD_SHIFT 0
+/* width of bitfield desc{d}_hd[c:0] */
+#define HW_ATL_RDM_DESCDHD_WIDTH 13
+
+/* rx desc{d}_len[9:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_len[9:0]".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_desc0_len_i[9:0]"
+ */
+
+/* register address for bitfield desc{d}_len[9:0] */
+#define HW_ATL_RDM_DESCDLEN_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_len[9:0] */
+#define HW_ATL_RDM_DESCDLEN_MSK 0x00001ff8
+/* inverted bitmask for bitfield desc{d}_len[9:0] */
+#define HW_ATL_RDM_DESCDLEN_MSKN 0xffffe007
+/* lower bit position of bitfield desc{d}_len[9:0] */
+#define HW_ATL_RDM_DESCDLEN_SHIFT 3
+/* width of bitfield desc{d}_len[9:0] */
+#define HW_ATL_RDM_DESCDLEN_WIDTH 10
+/* default value of bitfield desc{d}_len[9:0] */
+#define HW_ATL_RDM_DESCDLEN_DEFAULT 0x0
+
+/* rx desc{d}_reset bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_reset".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_q_pf_res_i[0]"
+ */
+
+/* register address for bitfield desc{d}_reset */
+#define HW_ATL_RDM_DESCDRESET_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_reset */
+#define HW_ATL_RDM_DESCDRESET_MSK 0x02000000
+/* inverted bitmask for bitfield desc{d}_reset */
+#define HW_ATL_RDM_DESCDRESET_MSKN 0xfdffffff
+/* lower bit position of bitfield desc{d}_reset */
+#define HW_ATL_RDM_DESCDRESET_SHIFT 25
+/* width of bitfield desc{d}_reset */
+#define HW_ATL_RDM_DESCDRESET_WIDTH 1
+/* default value of bitfield desc{d}_reset */
+#define HW_ATL_RDM_DESCDRESET_DEFAULT 0x0
+
+/* rdm_desc_init_i bitfield definitions
+ * preprocessor definitions for the bitfield rdm_desc_init_i.
+ * port="pif_rdm_desc_init_i"
+ */
+
+/* register address for bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR 0x00005a00
+/* bitmask for bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK 0xffffffff
+/* inverted bitmask for bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSKN 0x00000000
+/* lower bit position of bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT 0
+/* width of bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_WIDTH 32
+/* default value of bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_DEFAULT 0x0
+
+/* rdm_desc_init_done_i bitfield definitions
+ * preprocessor definitions for the bitfield rdm_desc_init_done_i.
+ * port="pif_rdm_desc_init_done_i"
+ */
+
+/* register address for bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_ADR 0x00005a10
+/* bitmask for bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSK 0x00000001U
+/* inverted bitmask for bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSKN 0xfffffffe
+/* lower bit position of bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_SHIFT 0U
+/* width of bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_WIDTH 1
+/* default value of bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_DEFAULT 0x0
+
+
+/* rx int_desc_wrb_en bitfield definitions
+ * preprocessor definitions for the bitfield "int_desc_wrb_en".
+ * port="pif_rdm_int_desc_wrb_en_i"
+ */
+
+/* register address for bitfield int_desc_wrb_en */
+#define HW_ATL_RDM_INT_DESC_WRB_EN_ADR 0x00005a30
+/* bitmask for bitfield int_desc_wrb_en */
+#define HW_ATL_RDM_INT_DESC_WRB_EN_MSK 0x00000004
+/* inverted bitmask for bitfield int_desc_wrb_en */
+#define HW_ATL_RDM_INT_DESC_WRB_EN_MSKN 0xfffffffb
+/* lower bit position of bitfield int_desc_wrb_en */
+#define HW_ATL_RDM_INT_DESC_WRB_EN_SHIFT 2
+/* width of bitfield int_desc_wrb_en */
+#define HW_ATL_RDM_INT_DESC_WRB_EN_WIDTH 1
+/* default value of bitfield int_desc_wrb_en */
+#define HW_ATL_RDM_INT_DESC_WRB_EN_DEFAULT 0x0
+
+/* rx dca{d}_hdr_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca{d}_hdr_en".
+ * parameter: dca {d} | stride size 0x4 | range [0, 31]
+ * port="pif_rdm_dca_hdr_en_i[0]"
+ */
+
+/* register address for bitfield dca{d}_hdr_en */
+#define HW_ATL_RDM_DCADHDR_EN_ADR(dca) (0x00006100 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_hdr_en */
+#define HW_ATL_RDM_DCADHDR_EN_MSK 0x40000000
+/* inverted bitmask for bitfield dca{d}_hdr_en */
+#define HW_ATL_RDM_DCADHDR_EN_MSKN 0xbfffffff
+/* lower bit position of bitfield dca{d}_hdr_en */
+#define HW_ATL_RDM_DCADHDR_EN_SHIFT 30
+/* width of bitfield dca{d}_hdr_en */
+#define HW_ATL_RDM_DCADHDR_EN_WIDTH 1
+/* default value of bitfield dca{d}_hdr_en */
+#define HW_ATL_RDM_DCADHDR_EN_DEFAULT 0x0
+
+/* rx dca{d}_pay_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca{d}_pay_en".
+ * parameter: dca {d} | stride size 0x4 | range [0, 31]
+ * port="pif_rdm_dca_pay_en_i[0]"
+ */
+
+/* register address for bitfield dca{d}_pay_en */
+#define HW_ATL_RDM_DCADPAY_EN_ADR(dca) (0x00006100 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_pay_en */
+#define HW_ATL_RDM_DCADPAY_EN_MSK 0x20000000
+/* inverted bitmask for bitfield dca{d}_pay_en */
+#define HW_ATL_RDM_DCADPAY_EN_MSKN 0xdfffffff
+/* lower bit position of bitfield dca{d}_pay_en */
+#define HW_ATL_RDM_DCADPAY_EN_SHIFT 29
+/* width of bitfield dca{d}_pay_en */
+#define HW_ATL_RDM_DCADPAY_EN_WIDTH 1
+/* default value of bitfield dca{d}_pay_en */
+#define HW_ATL_RDM_DCADPAY_EN_DEFAULT 0x0
+
+/* RX rdm_int_rim_en Bitfield Definitions
+ * Preprocessor definitions for the bitfield "rdm_int_rim_en".
+ * PORT="pif_rdm_int_rim_en_i"
+ */
+
+/* Register address for bitfield rdm_int_rim_en */
+#define HW_ATL_RDM_INT_RIM_EN_ADR 0x00005A30
+/* Bitmask for bitfield rdm_int_rim_en */
+#define HW_ATL_RDM_INT_RIM_EN_MSK 0x00000008
+/* Inverted bitmask for bitfield rdm_int_rim_en */
+#define HW_ATL_RDM_INT_RIM_EN_MSKN 0xFFFFFFF7
+/* Lower bit position of bitfield rdm_int_rim_en */
+#define HW_ATL_RDM_INT_RIM_EN_SHIFT 3
+/* Width of bitfield rdm_int_rim_en */
+#define HW_ATL_RDM_INT_RIM_EN_WIDTH 1
+/* Default value of bitfield rdm_int_rim_en */
+#define HW_ATL_RDM_INT_RIM_EN_DEFAULT 0x0
+
+/* general interrupt mapping register definitions
+ * preprocessor definitions for general interrupt mapping register
+ * base address: 0x00002180
+ * parameter: regidx {f} | stride size 0x4 | range [0, 3]
+ */
+#define HW_ATL_GEN_INTR_MAP_ADR(regidx) (0x00002180u + (regidx) * 0x4)
+
+/* general interrupt status register definitions
+ * preprocessor definitions for general interrupt status register
+ * address: 0x000021A0
+ */
+
+#define HW_ATL_GEN_INTR_STAT_ADR 0x000021A4U
+
+/* interrupt global control register definitions
+ * preprocessor definitions for interrupt global control register
+ * address: 0x00002300
+ */
+#define HW_ATL_INTR_GLB_CTL_ADR 0x00002300u
+
+/* interrupt throttle register definitions
+ * preprocessor definitions for interrupt throttle register
+ * base address: 0x00002800
+ * parameter: throttle {t} | stride size 0x4 | range [0, 31]
+ */
+#define HW_ATL_INTR_THR_ADR(throttle) (0x00002800u + (throttle) * 0x4)
+
+/* rx dma descriptor base address lsw definitions
+ * preprocessor definitions for rx dma descriptor base address lsw
+ * base address: 0x00005b00
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ */
+#define HW_ATL_RX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor) \
+(0x00005b00u + (descriptor) * 0x20)
+
+/* rx dma descriptor base address msw definitions
+ * preprocessor definitions for rx dma descriptor base address msw
+ * base address: 0x00005b04
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ */
+#define HW_ATL_RX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor) \
+(0x00005b04u + (descriptor) * 0x20)
+
+/* rx dma descriptor status register definitions
+ * preprocessor definitions for rx dma descriptor status register
+ * base address: 0x00005b14
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ */
+#define HW_ATL_RX_DMA_DESC_STAT_ADR(descriptor) \
+ (0x00005b14u + (descriptor) * 0x20)
+
+/* rx dma descriptor tail pointer register definitions
+ * preprocessor definitions for rx dma descriptor tail pointer register
+ * base address: 0x00005b10
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ */
+#define HW_ATL_RX_DMA_DESC_TAIL_PTR_ADR(descriptor) \
+ (0x00005b10u + (descriptor) * 0x20)
+
+/* rx interrupt moderation control register definitions
+ * Preprocessor definitions for RX Interrupt Moderation Control Register
+ * Base Address: 0x00005A40
+ * Parameter: RIM {R} | stride size 0x4 | range [0, 31]
+ */
+#define HW_ATL_RX_INTR_MODERATION_CTL_ADR(rim) (0x00005A40u + (rim) * 0x4)
+
+/* rx filter multicast filter mask register definitions
+ * preprocessor definitions for rx filter multicast filter mask register
+ * address: 0x00005270
+ */
+#define HW_ATL_RX_FLR_MCST_FLR_MSK_ADR 0x00005270u
+
+/* rx filter multicast filter register definitions
+ * preprocessor definitions for rx filter multicast filter register
+ * base address: 0x00005250
+ * parameter: filter {f} | stride size 0x4 | range [0, 7]
+ */
+#define HW_ATL_RX_FLR_MCST_FLR_ADR(filter) (0x00005250u + (filter) * 0x4)
+
+/* RX Filter RSS Control Register 1 Definitions
+ * Preprocessor definitions for RX Filter RSS Control Register 1
+ * Address: 0x000054C0
+ */
+#define HW_ATL_RX_FLR_RSS_CONTROL1_ADR 0x000054C0u
+
+/* RX Filter Control Register 2 Definitions
+ * Preprocessor definitions for RX Filter Control Register 2
+ * Address: 0x00005104
+ */
+#define HW_ATL_RX_FLR_CONTROL2_ADR 0x00005104u
+
+/* tx tx dma debug control [1f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "tx dma debug control [1f:0]".
+ * port="pif_tdm_debug_cntl_i[31:0]"
+ */
+
+/* register address for bitfield tx dma debug control [1f:0] */
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_ADR 0x00008920
+/* bitmask for bitfield tx dma debug control [1f:0] */
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_MSK 0xffffffff
+/* inverted bitmask for bitfield tx dma debug control [1f:0] */
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_MSKN 0x00000000
+/* lower bit position of bitfield tx dma debug control [1f:0] */
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_SHIFT 0
+/* width of bitfield tx dma debug control [1f:0] */
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_WIDTH 32
+/* default value of bitfield tx dma debug control [1f:0] */
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_DEFAULT 0x0
+
+/* tx dma descriptor base address lsw definitions
+ * preprocessor definitions for tx dma descriptor base address lsw
+ * base address: 0x00007c00
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ */
+#define HW_ATL_TX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor) \
+ (0x00007c00u + (descriptor) * 0x40)
+
+/* tx dma descriptor tail pointer register definitions
+ * preprocessor definitions for tx dma descriptor tail pointer register
+ * base address: 0x00007c10
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ */
+#define HW_ATL_TX_DMA_DESC_TAIL_PTR_ADR(descriptor) \
+ (0x00007c10u + (descriptor) * 0x40)
+
+/* rx dma_sys_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "dma_sys_loopback".
+ * port="pif_rpb_dma_sys_lbk_i"
+ */
+
+/* register address for bitfield dma_sys_loopback */
+#define HW_ATL_RPB_DMA_SYS_LBK_ADR 0x00005000
+/* bitmask for bitfield dma_sys_loopback */
+#define HW_ATL_RPB_DMA_SYS_LBK_MSK 0x00000040
+/* inverted bitmask for bitfield dma_sys_loopback */
+#define HW_ATL_RPB_DMA_SYS_LBK_MSKN 0xffffffbf
+/* lower bit position of bitfield dma_sys_loopback */
+#define HW_ATL_RPB_DMA_SYS_LBK_SHIFT 6
+/* width of bitfield dma_sys_loopback */
+#define HW_ATL_RPB_DMA_SYS_LBK_WIDTH 1
+/* default value of bitfield dma_sys_loopback */
+#define HW_ATL_RPB_DMA_SYS_LBK_DEFAULT 0x0
+
+/* rx dma_net_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "dma_net_loopback".
+ * port="pif_rpb_dma_net_lbk_i"
+ */
+
+/* register address for bitfield dma_net_loopback */
+#define HW_ATL_RPB_DMA_NET_LBK_ADR 0x00005000
+/* bitmask for bitfield dma_net_loopback */
+#define HW_ATL_RPB_DMA_NET_LBK_MSK 0x00000010
+/* inverted bitmask for bitfield dma_net_loopback */
+#define HW_ATL_RPB_DMA_NET_LBK_MSKN 0xffffffef
+/* lower bit position of bitfield dma_net_loopback */
+#define HW_ATL_RPB_DMA_NET_LBK_SHIFT 4
+/* width of bitfield dma_net_loopback */
+#define HW_ATL_RPB_DMA_NET_LBK_WIDTH 1
+/* default value of bitfield dma_net_loopback */
+#define HW_ATL_RPB_DMA_NET_LBK_DEFAULT 0x0
+
+/* rx rx_tc_mode bitfield definitions
+ * preprocessor definitions for the bitfield "rx_tc_mode".
+ * port="pif_rpb_rx_tc_mode_i,pif_rpf_rx_tc_mode_i"
+ */
+
+/* register address for bitfield rx_tc_mode */
+#define HW_ATL_RPB_RPF_RX_TC_MODE_ADR 0x00005700
+/* bitmask for bitfield rx_tc_mode */
+#define HW_ATL_RPB_RPF_RX_TC_MODE_MSK 0x00000100
+/* inverted bitmask for bitfield rx_tc_mode */
+#define HW_ATL_RPB_RPF_RX_TC_MODE_MSKN 0xfffffeff
+/* lower bit position of bitfield rx_tc_mode */
+#define HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT 8
+/* width of bitfield rx_tc_mode */
+#define HW_ATL_RPB_RPF_RX_TC_MODE_WIDTH 1
+/* default value of bitfield rx_tc_mode */
+#define HW_ATL_RPB_RPF_RX_TC_MODE_DEFAULT 0x0
+
+/* rx rx_buf_en bitfield definitions
+ * preprocessor definitions for the bitfield "rx_buf_en".
+ * port="pif_rpb_rx_buf_en_i"
+ */
+
+/* register address for bitfield rx_buf_en */
+#define HW_ATL_RPB_RX_BUF_EN_ADR 0x00005700
+/* bitmask for bitfield rx_buf_en */
+#define HW_ATL_RPB_RX_BUF_EN_MSK 0x00000001
+/* inverted bitmask for bitfield rx_buf_en */
+#define HW_ATL_RPB_RX_BUF_EN_MSKN 0xfffffffe
+/* lower bit position of bitfield rx_buf_en */
+#define HW_ATL_RPB_RX_BUF_EN_SHIFT 0
+/* width of bitfield rx_buf_en */
+#define HW_ATL_RPB_RX_BUF_EN_WIDTH 1
+/* default value of bitfield rx_buf_en */
+#define HW_ATL_RPB_RX_BUF_EN_DEFAULT 0x0
+
+/* rx rx{b}_hi_thresh[d:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rx{b}_hi_thresh[d:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_rpb_rx0_hi_thresh_i[13:0]"
+ */
+
+/* register address for bitfield rx{b}_hi_thresh[d:0] */
+#define HW_ATL_RPB_RXBHI_THRESH_ADR(buffer) (0x00005714 + (buffer) * 0x10)
+/* bitmask for bitfield rx{b}_hi_thresh[d:0] */
+#define HW_ATL_RPB_RXBHI_THRESH_MSK 0x3fff0000
+/* inverted bitmask for bitfield rx{b}_hi_thresh[d:0] */
+#define HW_ATL_RPB_RXBHI_THRESH_MSKN 0xc000ffff
+/* lower bit position of bitfield rx{b}_hi_thresh[d:0] */
+#define HW_ATL_RPB_RXBHI_THRESH_SHIFT 16
+/* width of bitfield rx{b}_hi_thresh[d:0] */
+#define HW_ATL_RPB_RXBHI_THRESH_WIDTH 14
+/* default value of bitfield rx{b}_hi_thresh[d:0] */
+#define HW_ATL_RPB_RXBHI_THRESH_DEFAULT 0x0
+
+/* rx rx{b}_lo_thresh[d:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rx{b}_lo_thresh[d:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_rpb_rx0_lo_thresh_i[13:0]"
+ */
+
+/* register address for bitfield rx{b}_lo_thresh[d:0] */
+#define HW_ATL_RPB_RXBLO_THRESH_ADR(buffer) (0x00005714 + (buffer) * 0x10)
+/* bitmask for bitfield rx{b}_lo_thresh[d:0] */
+#define HW_ATL_RPB_RXBLO_THRESH_MSK 0x00003fff
+/* inverted bitmask for bitfield rx{b}_lo_thresh[d:0] */
+#define HW_ATL_RPB_RXBLO_THRESH_MSKN 0xffffc000
+/* lower bit position of bitfield rx{b}_lo_thresh[d:0] */
+#define HW_ATL_RPB_RXBLO_THRESH_SHIFT 0
+/* width of bitfield rx{b}_lo_thresh[d:0] */
+#define HW_ATL_RPB_RXBLO_THRESH_WIDTH 14
+/* default value of bitfield rx{b}_lo_thresh[d:0] */
+#define HW_ATL_RPB_RXBLO_THRESH_DEFAULT 0x0
+
+/* rx rx_fc_mode[1:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rx_fc_mode[1:0]".
+ * port="pif_rpb_rx_fc_mode_i[1:0]"
+ */
+
+/* register address for bitfield rx_fc_mode[1:0] */
+#define HW_ATL_RPB_RX_FC_MODE_ADR 0x00005700
+/* bitmask for bitfield rx_fc_mode[1:0] */
+#define HW_ATL_RPB_RX_FC_MODE_MSK 0x00000030
+/* inverted bitmask for bitfield rx_fc_mode[1:0] */
+#define HW_ATL_RPB_RX_FC_MODE_MSKN 0xffffffcf
+/* lower bit position of bitfield rx_fc_mode[1:0] */
+#define HW_ATL_RPB_RX_FC_MODE_SHIFT 4
+/* width of bitfield rx_fc_mode[1:0] */
+#define HW_ATL_RPB_RX_FC_MODE_WIDTH 2
+/* default value of bitfield rx_fc_mode[1:0] */
+#define HW_ATL_RPB_RX_FC_MODE_DEFAULT 0x0
+
+/* rx rx{b}_buf_size[8:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rx{b}_buf_size[8:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_rpb_rx0_buf_size_i[8:0]"
+ */
+
+/* register address for bitfield rx{b}_buf_size[8:0] */
+#define HW_ATL_RPB_RXBBUF_SIZE_ADR(buffer) (0x00005710 + (buffer) * 0x10)
+/* bitmask for bitfield rx{b}_buf_size[8:0] */
+#define HW_ATL_RPB_RXBBUF_SIZE_MSK 0x000001ff
+/* inverted bitmask for bitfield rx{b}_buf_size[8:0] */
+#define HW_ATL_RPB_RXBBUF_SIZE_MSKN 0xfffffe00
+/* lower bit position of bitfield rx{b}_buf_size[8:0] */
+#define HW_ATL_RPB_RXBBUF_SIZE_SHIFT 0
+/* width of bitfield rx{b}_buf_size[8:0] */
+#define HW_ATL_RPB_RXBBUF_SIZE_WIDTH 9
+/* default value of bitfield rx{b}_buf_size[8:0] */
+#define HW_ATL_RPB_RXBBUF_SIZE_DEFAULT 0x0
+
+/* rx rx{b}_xoff_en bitfield definitions
+ * preprocessor definitions for the bitfield "rx{b}_xoff_en".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_rpb_rx_xoff_en_i[0]"
+ */
+
+/* register address for bitfield rx{b}_xoff_en */
+#define HW_ATL_RPB_RXBXOFF_EN_ADR(buffer) (0x00005714 + (buffer) * 0x10)
+/* bitmask for bitfield rx{b}_xoff_en */
+#define HW_ATL_RPB_RXBXOFF_EN_MSK 0x80000000
+/* inverted bitmask for bitfield rx{b}_xoff_en */
+#define HW_ATL_RPB_RXBXOFF_EN_MSKN 0x7fffffff
+/* lower bit position of bitfield rx{b}_xoff_en */
+#define HW_ATL_RPB_RXBXOFF_EN_SHIFT 31
+/* width of bitfield rx{b}_xoff_en */
+#define HW_ATL_RPB_RXBXOFF_EN_WIDTH 1
+/* default value of bitfield rx{b}_xoff_en */
+#define HW_ATL_RPB_RXBXOFF_EN_DEFAULT 0x0
+
+/* rx l2_bc_thresh[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "l2_bc_thresh[f:0]".
+ * port="pif_rpf_l2_bc_thresh_i[15:0]"
+ */
+
+/* register address for bitfield l2_bc_thresh[f:0] */
+#define HW_ATL_RPFL2BC_THRESH_ADR 0x00005100
+/* bitmask for bitfield l2_bc_thresh[f:0] */
+#define HW_ATL_RPFL2BC_THRESH_MSK 0xffff0000
+/* inverted bitmask for bitfield l2_bc_thresh[f:0] */
+#define HW_ATL_RPFL2BC_THRESH_MSKN 0x0000ffff
+/* lower bit position of bitfield l2_bc_thresh[f:0] */
+#define HW_ATL_RPFL2BC_THRESH_SHIFT 16
+/* width of bitfield l2_bc_thresh[f:0] */
+#define HW_ATL_RPFL2BC_THRESH_WIDTH 16
+/* default value of bitfield l2_bc_thresh[f:0] */
+#define HW_ATL_RPFL2BC_THRESH_DEFAULT 0x0
+
+/* rx l2_bc_en bitfield definitions
+ * preprocessor definitions for the bitfield "l2_bc_en".
+ * port="pif_rpf_l2_bc_en_i"
+ */
+
+/* register address for bitfield l2_bc_en */
+#define HW_ATL_RPFL2BC_EN_ADR 0x00005100
+/* bitmask for bitfield l2_bc_en */
+#define HW_ATL_RPFL2BC_EN_MSK 0x00000001
+/* inverted bitmask for bitfield l2_bc_en */
+#define HW_ATL_RPFL2BC_EN_MSKN 0xfffffffe
+/* lower bit position of bitfield l2_bc_en */
+#define HW_ATL_RPFL2BC_EN_SHIFT 0
+/* width of bitfield l2_bc_en */
+#define HW_ATL_RPFL2BC_EN_WIDTH 1
+/* default value of bitfield l2_bc_en */
+#define HW_ATL_RPFL2BC_EN_DEFAULT 0x0
+
+/* rx l2_bc_act[2:0] bitfield definitions
+ * preprocessor definitions for the bitfield "l2_bc_act[2:0]".
+ * port="pif_rpf_l2_bc_act_i[2:0]"
+ */
+
+/* register address for bitfield l2_bc_act[2:0] */
+#define HW_ATL_RPFL2BC_ACT_ADR 0x00005100
+/* bitmask for bitfield l2_bc_act[2:0] */
+#define HW_ATL_RPFL2BC_ACT_MSK 0x00007000
+/* inverted bitmask for bitfield l2_bc_act[2:0] */
+#define HW_ATL_RPFL2BC_ACT_MSKN 0xffff8fff
+/* lower bit position of bitfield l2_bc_act[2:0] */
+#define HW_ATL_RPFL2BC_ACT_SHIFT 12
+/* width of bitfield l2_bc_act[2:0] */
+#define HW_ATL_RPFL2BC_ACT_WIDTH 3
+/* default value of bitfield l2_bc_act[2:0] */
+#define HW_ATL_RPFL2BC_ACT_DEFAULT 0x0
+
+/* rx l2_mc_en{f} bitfield definitions
+ * preprocessor definitions for the bitfield "l2_mc_en{f}".
+ * parameter: filter {f} | stride size 0x4 | range [0, 7]
+ * port="pif_rpf_l2_mc_en_i[0]"
+ */
+
+/* register address for bitfield l2_mc_en{f} */
+#define HW_ATL_RPFL2MC_ENF_ADR(filter) (0x00005250 + (filter) * 0x4)
+/* bitmask for bitfield l2_mc_en{f} */
+#define HW_ATL_RPFL2MC_ENF_MSK 0x80000000
+/* inverted bitmask for bitfield l2_mc_en{f} */
+#define HW_ATL_RPFL2MC_ENF_MSKN 0x7fffffff
+/* lower bit position of bitfield l2_mc_en{f} */
+#define HW_ATL_RPFL2MC_ENF_SHIFT 31
+/* width of bitfield l2_mc_en{f} */
+#define HW_ATL_RPFL2MC_ENF_WIDTH 1
+/* default value of bitfield l2_mc_en{f} */
+#define HW_ATL_RPFL2MC_ENF_DEFAULT 0x0
+
+/* rx l2_promis_mode bitfield definitions
+ * preprocessor definitions for the bitfield "l2_promis_mode".
+ * port="pif_rpf_l2_promis_mode_i"
+ */
+
+/* register address for bitfield l2_promis_mode */
+#define HW_ATL_RPFL2PROMIS_MODE_ADR 0x00005100
+/* bitmask for bitfield l2_promis_mode */
+#define HW_ATL_RPFL2PROMIS_MODE_MSK 0x00000008
+/* inverted bitmask for bitfield l2_promis_mode */
+#define HW_ATL_RPFL2PROMIS_MODE_MSKN 0xfffffff7
+/* lower bit position of bitfield l2_promis_mode */
+#define HW_ATL_RPFL2PROMIS_MODE_SHIFT 3
+/* width of bitfield l2_promis_mode */
+#define HW_ATL_RPFL2PROMIS_MODE_WIDTH 1
+/* default value of bitfield l2_promis_mode */
+#define HW_ATL_RPFL2PROMIS_MODE_DEFAULT 0x0
+
+/* rx l2_uc_act{f}[2:0] bitfield definitions
+ * preprocessor definitions for the bitfield "l2_uc_act{f}[2:0]".
+ * parameter: filter {f} | stride size 0x8 | range [0, 37]
+ * port="pif_rpf_l2_uc_act0_i[2:0]"
+ */
+
+/* register address for bitfield l2_uc_act{f}[2:0] */
+#define HW_ATL_RPFL2UC_ACTF_ADR(filter) (0x00005114 + (filter) * 0x8)
+/* bitmask for bitfield l2_uc_act{f}[2:0] */
+#define HW_ATL_RPFL2UC_ACTF_MSK 0x00070000
+/* inverted bitmask for bitfield l2_uc_act{f}[2:0] */
+#define HW_ATL_RPFL2UC_ACTF_MSKN 0xfff8ffff
+/* lower bit position of bitfield l2_uc_act{f}[2:0] */
+#define HW_ATL_RPFL2UC_ACTF_SHIFT 16
+/* width of bitfield l2_uc_act{f}[2:0] */
+#define HW_ATL_RPFL2UC_ACTF_WIDTH 3
+/* default value of bitfield l2_uc_act{f}[2:0] */
+#define HW_ATL_RPFL2UC_ACTF_DEFAULT 0x0
+
+/* rx l2_uc_en{f} bitfield definitions
+ * preprocessor definitions for the bitfield "l2_uc_en{f}".
+ * parameter: filter {f} | stride size 0x8 | range [0, 37]
+ * port="pif_rpf_l2_uc_en_i[0]"
+ */
+
+/* register address for bitfield l2_uc_en{f} */
+#define HW_ATL_RPFL2UC_ENF_ADR(filter) (0x00005114 + (filter) * 0x8)
+/* bitmask for bitfield l2_uc_en{f} */
+#define HW_ATL_RPFL2UC_ENF_MSK 0x80000000
+/* inverted bitmask for bitfield l2_uc_en{f} */
+#define HW_ATL_RPFL2UC_ENF_MSKN 0x7fffffff
+/* lower bit position of bitfield l2_uc_en{f} */
+#define HW_ATL_RPFL2UC_ENF_SHIFT 31
+/* width of bitfield l2_uc_en{f} */
+#define HW_ATL_RPFL2UC_ENF_WIDTH 1
+/* default value of bitfield l2_uc_en{f} */
+#define HW_ATL_RPFL2UC_ENF_DEFAULT 0x0
+
+/* register address for bitfield l2_uc_da{f}_lsw[1f:0] */
+#define HW_ATL_RPFL2UC_DAFLSW_ADR(filter) (0x00005110 + (filter) * 0x8)
+/* register address for bitfield l2_uc_da{f}_msw[f:0] */
+#define HW_ATL_RPFL2UC_DAFMSW_ADR(filter) (0x00005114 + (filter) * 0x8)
+/* bitmask for bitfield l2_uc_da{f}_msw[f:0] */
+#define HW_ATL_RPFL2UC_DAFMSW_MSK 0x0000ffff
+/* lower bit position of bitfield l2_uc_da{f}_msw[f:0] */
+#define HW_ATL_RPFL2UC_DAFMSW_SHIFT 0
+
+/* rx l2_mc_accept_all bitfield definitions
+ * Preprocessor definitions for the bitfield "l2_mc_accept_all".
+ * PORT="pif_rpf_l2_mc_all_accept_i"
+ */
+
+/* Register address for bitfield l2_mc_accept_all */
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_ADR 0x00005270
+/* Bitmask for bitfield l2_mc_accept_all */
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_MSK 0x00004000
+/* Inverted bitmask for bitfield l2_mc_accept_all */
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_MSKN 0xFFFFBFFF
+/* Lower bit position of bitfield l2_mc_accept_all */
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_SHIFT 14
+/* Width of bitfield l2_mc_accept_all */
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_WIDTH 1
+/* Default value of bitfield l2_mc_accept_all */
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_DEFAULT 0x0
+
+/* width of bitfield rx_tc_up{t}[2:0] */
+#define HW_ATL_RPF_RPB_RX_TC_UPT_WIDTH 3
+/* default value of bitfield rx_tc_up{t}[2:0] */
+#define HW_ATL_RPF_RPB_RX_TC_UPT_DEFAULT 0x0
+
+/* rx rss_key_addr[4:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rss_key_addr[4:0]".
+ * port="pif_rpf_rss_key_addr_i[4:0]"
+ */
+
+/* register address for bitfield rss_key_addr[4:0] */
+#define HW_ATL_RPF_RSS_KEY_ADDR_ADR 0x000054d0
+/* bitmask for bitfield rss_key_addr[4:0] */
+#define HW_ATL_RPF_RSS_KEY_ADDR_MSK 0x0000001f
+/* inverted bitmask for bitfield rss_key_addr[4:0] */
+#define HW_ATL_RPF_RSS_KEY_ADDR_MSKN 0xffffffe0
+/* lower bit position of bitfield rss_key_addr[4:0] */
+#define HW_ATL_RPF_RSS_KEY_ADDR_SHIFT 0
+/* width of bitfield rss_key_addr[4:0] */
+#define HW_ATL_RPF_RSS_KEY_ADDR_WIDTH 5
+/* default value of bitfield rss_key_addr[4:0] */
+#define HW_ATL_RPF_RSS_KEY_ADDR_DEFAULT 0x0
+
+/* rx rss_key_wr_data[1f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rss_key_wr_data[1f:0]".
+ * port="pif_rpf_rss_key_wr_data_i[31:0]"
+ */
+
+/* register address for bitfield rss_key_wr_data[1f:0] */
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_ADR 0x000054d4
+/* bitmask for bitfield rss_key_wr_data[1f:0] */
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_MSK 0xffffffff
+/* inverted bitmask for bitfield rss_key_wr_data[1f:0] */
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_MSKN 0x00000000
+/* lower bit position of bitfield rss_key_wr_data[1f:0] */
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_SHIFT 0
+/* width of bitfield rss_key_wr_data[1f:0] */
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_WIDTH 32
+/* default value of bitfield rss_key_wr_data[1f:0] */
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_DEFAULT 0x0
+
+/* rx rss_key_wr_en_i bitfield definitions
+ * preprocessor definitions for the bitfield "rss_key_wr_en_i".
+ * port="pif_rpf_rss_key_wr_en_i"
+ */
+
+/* register address for bitfield rss_key_wr_en_i */
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_ADR 0x000054d0
+/* bitmask for bitfield rss_key_wr_en_i */
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_MSK 0x00000020
+/* inverted bitmask for bitfield rss_key_wr_en_i */
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_MSKN 0xffffffdf
+/* lower bit position of bitfield rss_key_wr_en_i */
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT 5
+/* width of bitfield rss_key_wr_en_i */
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_WIDTH 1
+/* default value of bitfield rss_key_wr_en_i */
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_DEFAULT 0x0
+
+/* rx rss_redir_addr[3:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rss_redir_addr[3:0]".
+ * port="pif_rpf_rss_redir_addr_i[3:0]"
+ */
+
+/* register address for bitfield rss_redir_addr[3:0] */
+#define HW_ATL_RPF_RSS_REDIR_ADDR_ADR 0x000054e0
+/* bitmask for bitfield rss_redir_addr[3:0] */
+#define HW_ATL_RPF_RSS_REDIR_ADDR_MSK 0x0000000f
+/* inverted bitmask for bitfield rss_redir_addr[3:0] */
+#define HW_ATL_RPF_RSS_REDIR_ADDR_MSKN 0xfffffff0
+/* lower bit position of bitfield rss_redir_addr[3:0] */
+#define HW_ATL_RPF_RSS_REDIR_ADDR_SHIFT 0
+/* width of bitfield rss_redir_addr[3:0] */
+#define HW_ATL_RPF_RSS_REDIR_ADDR_WIDTH 4
+/* default value of bitfield rss_redir_addr[3:0] */
+#define HW_ATL_RPF_RSS_REDIR_ADDR_DEFAULT 0x0
+
+/* rx rss_redir_wr_data[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rss_redir_wr_data[f:0]".
+ * port="pif_rpf_rss_redir_wr_data_i[15:0]"
+ */
+
+/* register address for bitfield rss_redir_wr_data[f:0] */
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_ADR 0x000054e4
+/* bitmask for bitfield rss_redir_wr_data[f:0] */
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_MSK 0x0000ffff
+/* inverted bitmask for bitfield rss_redir_wr_data[f:0] */
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_MSKN 0xffff0000
+/* lower bit position of bitfield rss_redir_wr_data[f:0] */
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_SHIFT 0
+/* width of bitfield rss_redir_wr_data[f:0] */
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_WIDTH 16
+/* default value of bitfield rss_redir_wr_data[f:0] */
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_DEFAULT 0x0
+
+/* rx rss_redir_wr_en_i bitfield definitions
+ * preprocessor definitions for the bitfield "rss_redir_wr_en_i".
+ * port="pif_rpf_rss_redir_wr_en_i"
+ */
+
+/* register address for bitfield rss_redir_wr_en_i */
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR 0x000054e0
+/* bitmask for bitfield rss_redir_wr_en_i */
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK 0x00000010
+/* inverted bitmask for bitfield rss_redir_wr_en_i */
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_MSKN 0xffffffef
+/* lower bit position of bitfield rss_redir_wr_en_i */
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT 4
+/* width of bitfield rss_redir_wr_en_i */
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_WIDTH 1
+/* default value of bitfield rss_redir_wr_en_i */
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_DEFAULT 0x0
+
+/* rx tpo_rpf_sys_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "tpo_rpf_sys_loopback".
+ * port="pif_rpf_tpo_pkt_sys_lbk_i"
+ */
+
+/* register address for bitfield tpo_rpf_sys_loopback */
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_ADR 0x00005000
+/* bitmask for bitfield tpo_rpf_sys_loopback */
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_MSK 0x00000100
+/* inverted bitmask for bitfield tpo_rpf_sys_loopback */
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_MSKN 0xfffffeff
+/* lower bit position of bitfield tpo_rpf_sys_loopback */
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_SHIFT 8
+/* width of bitfield tpo_rpf_sys_loopback */
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_WIDTH 1
+/* default value of bitfield tpo_rpf_sys_loopback */
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_DEFAULT 0x0
+
+/* rx vl_inner_tpid[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "vl_inner_tpid[f:0]".
+ * port="pif_rpf_vl_inner_tpid_i[15:0]"
+ */
+
+/* register address for bitfield vl_inner_tpid[f:0] */
+#define HW_ATL_RPF_VL_INNER_TPID_ADR 0x00005284
+/* bitmask for bitfield vl_inner_tpid[f:0] */
+#define HW_ATL_RPF_VL_INNER_TPID_MSK 0x0000ffff
+/* inverted bitmask for bitfield vl_inner_tpid[f:0] */
+#define HW_ATL_RPF_VL_INNER_TPID_MSKN 0xffff0000
+/* lower bit position of bitfield vl_inner_tpid[f:0] */
+#define HW_ATL_RPF_VL_INNER_TPID_SHIFT 0
+/* width of bitfield vl_inner_tpid[f:0] */
+#define HW_ATL_RPF_VL_INNER_TPID_WIDTH 16
+/* default value of bitfield vl_inner_tpid[f:0] */
+#define HW_ATL_RPF_VL_INNER_TPID_DEFAULT 0x8100
+
+/* rx vl_outer_tpid[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "vl_outer_tpid[f:0]".
+ * port="pif_rpf_vl_outer_tpid_i[15:0]"
+ */
+
+/* register address for bitfield vl_outer_tpid[f:0] */
+#define HW_ATL_RPF_VL_OUTER_TPID_ADR 0x00005284
+/* bitmask for bitfield vl_outer_tpid[f:0] */
+#define HW_ATL_RPF_VL_OUTER_TPID_MSK 0xffff0000
+/* inverted bitmask for bitfield vl_outer_tpid[f:0] */
+#define HW_ATL_RPF_VL_OUTER_TPID_MSKN 0x0000ffff
+/* lower bit position of bitfield vl_outer_tpid[f:0] */
+#define HW_ATL_RPF_VL_OUTER_TPID_SHIFT 16
+/* width of bitfield vl_outer_tpid[f:0] */
+#define HW_ATL_RPF_VL_OUTER_TPID_WIDTH 16
+/* default value of bitfield vl_outer_tpid[f:0] */
+#define HW_ATL_RPF_VL_OUTER_TPID_DEFAULT 0x88a8
+
+/* rx vl_promis_mode bitfield definitions
+ * preprocessor definitions for the bitfield "vl_promis_mode".
+ * port="pif_rpf_vl_promis_mode_i"
+ */
+
+/* register address for bitfield vl_promis_mode */
+#define HW_ATL_RPF_VL_PROMIS_MODE_ADR 0x00005280
+/* bitmask for bitfield vl_promis_mode */
+#define HW_ATL_RPF_VL_PROMIS_MODE_MSK 0x00000002
+/* inverted bitmask for bitfield vl_promis_mode */
+#define HW_ATL_RPF_VL_PROMIS_MODE_MSKN 0xfffffffd
+/* lower bit position of bitfield vl_promis_mode */
+#define HW_ATL_RPF_VL_PROMIS_MODE_SHIFT 1
+/* width of bitfield vl_promis_mode */
+#define HW_ATL_RPF_VL_PROMIS_MODE_WIDTH 1
+/* default value of bitfield vl_promis_mode */
+#define HW_ATL_RPF_VL_PROMIS_MODE_DEFAULT 0x0
+
+/* RX vl_accept_untagged_mode Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_accept_untagged_mode".
+ * PORT="pif_rpf_vl_accept_untagged_i"
+ */
+
+/* Register address for bitfield vl_accept_untagged_mode */
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_ADR 0x00005280
+/* Bitmask for bitfield vl_accept_untagged_mode */
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSK 0x00000004
+/* Inverted bitmask for bitfield vl_accept_untagged_mode */
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSKN 0xFFFFFFFB
+/* Lower bit position of bitfield vl_accept_untagged_mode */
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_SHIFT 2
+/* Width of bitfield vl_accept_untagged_mode */
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_WIDTH 1
+/* Default value of bitfield vl_accept_untagged_mode */
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_DEFAULT 0x0
+
+/* rX vl_untagged_act[2:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_untagged_act[2:0]".
+ * PORT="pif_rpf_vl_untagged_act_i[2:0]"
+ */
+
+/* Register address for bitfield vl_untagged_act[2:0] */
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_ADR 0x00005280
+/* Bitmask for bitfield vl_untagged_act[2:0] */
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_MSK 0x00000038
+/* Inverted bitmask for bitfield vl_untagged_act[2:0] */
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_MSKN 0xFFFFFFC7
+/* Lower bit position of bitfield vl_untagged_act[2:0] */
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_SHIFT 3
+/* Width of bitfield vl_untagged_act[2:0] */
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_WIDTH 3
+/* Default value of bitfield vl_untagged_act[2:0] */
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_DEFAULT 0x0
+
+/* RX vl_en{F} Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_en{F}".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 15]
+ * PORT="pif_rpf_vl_en_i[0]"
+ */
+
+/* Register address for bitfield vl_en{F} */
+#define HW_ATL_RPF_VL_EN_F_ADR(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_en{F} */
+#define HW_ATL_RPF_VL_EN_F_MSK 0x80000000
+/* Inverted bitmask for bitfield vl_en{F} */
+#define HW_ATL_RPF_VL_EN_F_MSKN 0x7FFFFFFF
+/* Lower bit position of bitfield vl_en{F} */
+#define HW_ATL_RPF_VL_EN_F_SHIFT 31
+/* Width of bitfield vl_en{F} */
+#define HW_ATL_RPF_VL_EN_F_WIDTH 1
+/* Default value of bitfield vl_en{F} */
+#define HW_ATL_RPF_VL_EN_F_DEFAULT 0x0
+
+/* RX vl_act{F}[2:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_act{F}[2:0]".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 15]
+ * PORT="pif_rpf_vl_act0_i[2:0]"
+ */
+
+/* Register address for bitfield vl_act{F}[2:0] */
+#define HW_ATL_RPF_VL_ACT_F_ADR(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_act{F}[2:0] */
+#define HW_ATL_RPF_VL_ACT_F_MSK 0x00070000
+/* Inverted bitmask for bitfield vl_act{F}[2:0] */
+#define HW_ATL_RPF_VL_ACT_F_MSKN 0xFFF8FFFF
+/* Lower bit position of bitfield vl_act{F}[2:0] */
+#define HW_ATL_RPF_VL_ACT_F_SHIFT 16
+/* Width of bitfield vl_act{F}[2:0] */
+#define HW_ATL_RPF_VL_ACT_F_WIDTH 3
+/* Default value of bitfield vl_act{F}[2:0] */
+#define HW_ATL_RPF_VL_ACT_F_DEFAULT 0x0
+
+/* RX vl_id{F}[B:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_id{F}[B:0]".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 15]
+ * PORT="pif_rpf_vl_id0_i[11:0]"
+ */
+
+/* Register address for bitfield vl_id{F}[B:0] */
+#define HW_ATL_RPF_VL_ID_F_ADR(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_id{F}[B:0] */
+#define HW_ATL_RPF_VL_ID_F_MSK 0x00000FFF
+/* Inverted bitmask for bitfield vl_id{F}[B:0] */
+#define HW_ATL_RPF_VL_ID_F_MSKN 0xFFFFF000
+/* Lower bit position of bitfield vl_id{F}[B:0] */
+#define HW_ATL_RPF_VL_ID_F_SHIFT 0
+/* Width of bitfield vl_id{F}[B:0] */
+#define HW_ATL_RPF_VL_ID_F_WIDTH 12
+/* Default value of bitfield vl_id{F}[B:0] */
+#define HW_ATL_RPF_VL_ID_F_DEFAULT 0x0
+
+/* RX vl_rxq_en{F} Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_rxq{F}".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 15]
+ * PORT="pif_rpf_vl_rxq_en_i"
+ */
+
+/* Register address for bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_ADR(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_MSK 0x10000000
+/* Inverted bitmask for bitfield vl_rxq_en{F}[ */
+#define HW_ATL_RPF_VL_RXQ_EN_F_MSKN 0xEFFFFFFF
+/* Lower bit position of bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_SHIFT 28
+/* Width of bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_WIDTH 1
+/* Default value of bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_DEFAULT 0x0
+
+/* RX vl_rxq{F}[4:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_rxq{F}[4:0]".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 15]
+ * PORT="pif_rpf_vl_rxq0_i[4:0]"
+ */
+
+/* Register address for bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_ADR(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_MSK 0x01F00000
+/* Inverted bitmask for bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_MSKN 0xFE0FFFFF
+/* Lower bit position of bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_SHIFT 20
+/* Width of bitfield vl_rxw{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_WIDTH 5
+/* Default value of bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_DEFAULT 0x0
+
+/* rx et_en{f} bitfield definitions
+ * preprocessor definitions for the bitfield "et_en{f}".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_en_i[0]"
+ */
+
+/* register address for bitfield et_en{f} */
+#define HW_ATL_RPF_ET_ENF_ADR(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_en{f} */
+#define HW_ATL_RPF_ET_ENF_MSK 0x80000000
+/* inverted bitmask for bitfield et_en{f} */
+#define HW_ATL_RPF_ET_ENF_MSKN 0x7fffffff
+/* lower bit position of bitfield et_en{f} */
+#define HW_ATL_RPF_ET_ENF_SHIFT 31
+/* width of bitfield et_en{f} */
+#define HW_ATL_RPF_ET_ENF_WIDTH 1
+/* default value of bitfield et_en{f} */
+#define HW_ATL_RPF_ET_ENF_DEFAULT 0x0
+
+/* rx et_up{f}_en bitfield definitions
+ * preprocessor definitions for the bitfield "et_up{f}_en".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_up_en_i[0]"
+ */
+
+/* register address for bitfield et_up{f}_en */
+#define HW_ATL_RPF_ET_UPFEN_ADR(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_up{f}_en */
+#define HW_ATL_RPF_ET_UPFEN_MSK 0x40000000
+/* inverted bitmask for bitfield et_up{f}_en */
+#define HW_ATL_RPF_ET_UPFEN_MSKN 0xbfffffff
+/* lower bit position of bitfield et_up{f}_en */
+#define HW_ATL_RPF_ET_UPFEN_SHIFT 30
+/* width of bitfield et_up{f}_en */
+#define HW_ATL_RPF_ET_UPFEN_WIDTH 1
+/* default value of bitfield et_up{f}_en */
+#define HW_ATL_RPF_ET_UPFEN_DEFAULT 0x0
+
+/* rx et_rxq{f}_en bitfield definitions
+ * preprocessor definitions for the bitfield "et_rxq{f}_en".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_rxq_en_i[0]"
+ */
+
+/* register address for bitfield et_rxq{f}_en */
+#define HW_ATL_RPF_ET_RXQFEN_ADR(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_rxq{f}_en */
+#define HW_ATL_RPF_ET_RXQFEN_MSK 0x20000000
+/* inverted bitmask for bitfield et_rxq{f}_en */
+#define HW_ATL_RPF_ET_RXQFEN_MSKN 0xdfffffff
+/* lower bit position of bitfield et_rxq{f}_en */
+#define HW_ATL_RPF_ET_RXQFEN_SHIFT 29
+/* width of bitfield et_rxq{f}_en */
+#define HW_ATL_RPF_ET_RXQFEN_WIDTH 1
+/* default value of bitfield et_rxq{f}_en */
+#define HW_ATL_RPF_ET_RXQFEN_DEFAULT 0x0
+
+/* rx et_up{f}[2:0] bitfield definitions
+ * preprocessor definitions for the bitfield "et_up{f}[2:0]".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_up0_i[2:0]"
+ */
+
+/* register address for bitfield et_up{f}[2:0] */
+#define HW_ATL_RPF_ET_UPF_ADR(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_up{f}[2:0] */
+#define HW_ATL_RPF_ET_UPF_MSK 0x1c000000
+/* inverted bitmask for bitfield et_up{f}[2:0] */
+#define HW_ATL_RPF_ET_UPF_MSKN 0xe3ffffff
+/* lower bit position of bitfield et_up{f}[2:0] */
+#define HW_ATL_RPF_ET_UPF_SHIFT 26
+/* width of bitfield et_up{f}[2:0] */
+#define HW_ATL_RPF_ET_UPF_WIDTH 3
+/* default value of bitfield et_up{f}[2:0] */
+#define HW_ATL_RPF_ET_UPF_DEFAULT 0x0
+
+/* rx et_rxq{f}[4:0] bitfield definitions
+ * preprocessor definitions for the bitfield "et_rxq{f}[4:0]".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_rxq0_i[4:0]"
+ */
+
+/* register address for bitfield et_rxq{f}[4:0] */
+#define HW_ATL_RPF_ET_RXQF_ADR(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_rxq{f}[4:0] */
+#define HW_ATL_RPF_ET_RXQF_MSK 0x01f00000
+/* inverted bitmask for bitfield et_rxq{f}[4:0] */
+#define HW_ATL_RPF_ET_RXQF_MSKN 0xfe0fffff
+/* lower bit position of bitfield et_rxq{f}[4:0] */
+#define HW_ATL_RPF_ET_RXQF_SHIFT 20
+/* width of bitfield et_rxq{f}[4:0] */
+#define HW_ATL_RPF_ET_RXQF_WIDTH 5
+/* default value of bitfield et_rxq{f}[4:0] */
+#define HW_ATL_RPF_ET_RXQF_DEFAULT 0x0
+
+/* rx et_mng_rxq{f} bitfield definitions
+ * preprocessor definitions for the bitfield "et_mng_rxq{f}".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_mng_rxq_i[0]"
+ */
+
+/* register address for bitfield et_mng_rxq{f} */
+#define HW_ATL_RPF_ET_MNG_RXQF_ADR(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_mng_rxq{f} */
+#define HW_ATL_RPF_ET_MNG_RXQF_MSK 0x00080000
+/* inverted bitmask for bitfield et_mng_rxq{f} */
+#define HW_ATL_RPF_ET_MNG_RXQF_MSKN 0xfff7ffff
+/* lower bit position of bitfield et_mng_rxq{f} */
+#define HW_ATL_RPF_ET_MNG_RXQF_SHIFT 19
+/* width of bitfield et_mng_rxq{f} */
+#define HW_ATL_RPF_ET_MNG_RXQF_WIDTH 1
+/* default value of bitfield et_mng_rxq{f} */
+#define HW_ATL_RPF_ET_MNG_RXQF_DEFAULT 0x0
+
+/* rx et_act{f}[2:0] bitfield definitions
+ * preprocessor definitions for the bitfield "et_act{f}[2:0]".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_act0_i[2:0]"
+ */
+
+/* register address for bitfield et_act{f}[2:0] */
+#define HW_ATL_RPF_ET_ACTF_ADR(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_act{f}[2:0] */
+#define HW_ATL_RPF_ET_ACTF_MSK 0x00070000
+/* inverted bitmask for bitfield et_act{f}[2:0] */
+#define HW_ATL_RPF_ET_ACTF_MSKN 0xfff8ffff
+/* lower bit position of bitfield et_act{f}[2:0] */
+#define HW_ATL_RPF_ET_ACTF_SHIFT 16
+/* width of bitfield et_act{f}[2:0] */
+#define HW_ATL_RPF_ET_ACTF_WIDTH 3
+/* default value of bitfield et_act{f}[2:0] */
+#define HW_ATL_RPF_ET_ACTF_DEFAULT 0x0
+
+/* rx et_val{f}[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "et_val{f}[f:0]".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_val0_i[15:0]"
+ */
+
+/* register address for bitfield et_val{f}[f:0] */
+#define HW_ATL_RPF_ET_VALF_ADR(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_val{f}[f:0] */
+#define HW_ATL_RPF_ET_VALF_MSK 0x0000ffff
+/* inverted bitmask for bitfield et_val{f}[f:0] */
+#define HW_ATL_RPF_ET_VALF_MSKN 0xffff0000
+/* lower bit position of bitfield et_val{f}[f:0] */
+#define HW_ATL_RPF_ET_VALF_SHIFT 0
+/* width of bitfield et_val{f}[f:0] */
+#define HW_ATL_RPF_ET_VALF_WIDTH 16
+/* default value of bitfield et_val{f}[f:0] */
+#define HW_ATL_RPF_ET_VALF_DEFAULT 0x0
+
+/* RX l3_l4_en{F} Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l3_l4_en{F}".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l3_l4_en_i[0]"
+ */
+
+#define HW_ATL_RPF_L3_REG_CTRL_ADR(filter) (0x00005380 + (filter) * 0x4)
+
+/* RX rpf_l3_sa{D}[1F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l3_sa{D}[1F:0]".
+ * Parameter: location {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l3_sa0_i[31:0]"
+ */
+
+/* Register address for bitfield pif_rpf_l3_sa0_i[31:0] */
+#define HW_ATL_RPF_L3_SRCA_ADR(filter) (0x000053B0 + (filter) * 0x4)
+/* Bitmask for bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_MSKN 0xFFFFFFFFu
+/* Lower bit position of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_SHIFT 0
+/* Width of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_WIDTH 32
+/* Default value of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_DEFAULT 0x0
+
+/* RX rpf_l3_da{D}[1F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l3_da{D}[1F:0]".
+ * Parameter: location {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l3_da0_i[31:0]"
+ */
+
+ /* Register address for bitfield pif_rpf_l3_da0_i[31:0] */
+#define HW_ATL_RPF_L3_DSTA_ADR(filter) (0x000053D0 + (filter) * 0x4)
+/* Bitmask for bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_MSKN 0xFFFFFFFFu
+/* Lower bit position of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_SHIFT 0
+/* Width of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_WIDTH 32
+/* Default value of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_DEFAULT 0x0
+
+/* RX l4_sp{D}[F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l4_sp{D}[F:0]".
+ * Parameter: srcport {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l4_sp0_i[15:0]"
+ */
+
+/* Register address for bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_ADR(srcport) (0x00005400u + (srcport) * 0x4)
+/* Bitmask for bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_MSK 0x0000FFFFu
+/* Inverted bitmask for bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_MSKN 0xFFFF0000u
+/* Lower bit position of bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_SHIFT 0
+/* Width of bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_WIDTH 16
+/* Default value of bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_DEFAULT 0x0
+
+/* RX l4_dp{D}[F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l4_dp{D}[F:0]".
+ * Parameter: destport {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l4_dp0_i[15:0]"
+ */
+
+/* Register address for bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_ADR(destport) (0x00005420u + (destport) * 0x4)
+/* Bitmask for bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_MSK 0x0000FFFFu
+/* Inverted bitmask for bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_MSKN 0xFFFF0000u
+/* Lower bit position of bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_SHIFT 0
+/* Width of bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_WIDTH 16
+/* Default value of bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_DEFAULT 0x0
+
+/* rx ipv4_chk_en bitfield definitions
+ * preprocessor definitions for the bitfield "ipv4_chk_en".
+ * port="pif_rpo_ipv4_chk_en_i"
+ */
+
+/* register address for bitfield ipv4_chk_en */
+#define HW_ATL_RPO_IPV4CHK_EN_ADR 0x00005580
+/* bitmask for bitfield ipv4_chk_en */
+#define HW_ATL_RPO_IPV4CHK_EN_MSK 0x00000002
+/* inverted bitmask for bitfield ipv4_chk_en */
+#define HW_ATL_RPO_IPV4CHK_EN_MSKN 0xfffffffd
+/* lower bit position of bitfield ipv4_chk_en */
+#define HW_ATL_RPO_IPV4CHK_EN_SHIFT 1
+/* width of bitfield ipv4_chk_en */
+#define HW_ATL_RPO_IPV4CHK_EN_WIDTH 1
+/* default value of bitfield ipv4_chk_en */
+#define HW_ATL_RPO_IPV4CHK_EN_DEFAULT 0x0
+
+/* rx desc{d}_vl_strip bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_vl_strip".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rpo_desc_vl_strip_i[0]"
+ */
+
+/* register address for bitfield desc{d}_vl_strip */
+#define HW_ATL_RPO_DESCDVL_STRIP_ADR(descriptor) \
+ (0x00005b08 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_vl_strip */
+#define HW_ATL_RPO_DESCDVL_STRIP_MSK 0x20000000
+/* inverted bitmask for bitfield desc{d}_vl_strip */
+#define HW_ATL_RPO_DESCDVL_STRIP_MSKN 0xdfffffff
+/* lower bit position of bitfield desc{d}_vl_strip */
+#define HW_ATL_RPO_DESCDVL_STRIP_SHIFT 29
+/* width of bitfield desc{d}_vl_strip */
+#define HW_ATL_RPO_DESCDVL_STRIP_WIDTH 1
+/* default value of bitfield desc{d}_vl_strip */
+#define HW_ATL_RPO_DESCDVL_STRIP_DEFAULT 0x0
+
+/* rx l4_chk_en bitfield definitions
+ * preprocessor definitions for the bitfield "l4_chk_en".
+ * port="pif_rpo_l4_chk_en_i"
+ */
+
+/* register address for bitfield l4_chk_en */
+#define HW_ATL_RPOL4CHK_EN_ADR 0x00005580
+/* bitmask for bitfield l4_chk_en */
+#define HW_ATL_RPOL4CHK_EN_MSK 0x00000001
+/* inverted bitmask for bitfield l4_chk_en */
+#define HW_ATL_RPOL4CHK_EN_MSKN 0xfffffffe
+/* lower bit position of bitfield l4_chk_en */
+#define HW_ATL_RPOL4CHK_EN_SHIFT 0
+/* width of bitfield l4_chk_en */
+#define HW_ATL_RPOL4CHK_EN_WIDTH 1
+/* default value of bitfield l4_chk_en */
+#define HW_ATL_RPOL4CHK_EN_DEFAULT 0x0
+
+/* RX outer_vl_ins_mode Bitfield Definitions
+ * Preprocessor definitions for the bitfield "outer_vl_ins_mode".
+ * PORT="pif_rpo_outer_vl_mode_i"
+ */
+
+/* Register address for bitfield outer_vl_ins_mode */
+#define HW_ATL_RPO_OUTER_VL_INS_MODE_ADR 0x00005580
+/* Bitmask for bitfield outer_vl_ins_mode */
+#define HW_ATL_RPO_OUTER_VL_INS_MODE_MSK 0x00000004
+/* Inverted bitmask for bitfield outer_vl_ins_mode */
+#define HW_ATL_RPO_OUTER_VL_INS_MODE_MSKN 0xFFFFFFFB
+/* Lower bit position of bitfield outer_vl_ins_mode */
+#define HW_ATL_RPO_OUTER_VL_INS_MODE_SHIFT 2
+/* Width of bitfield outer_vl_ins_mode */
+#define HW_ATL_RPO_OUTER_VL_INS_MODE_WIDTH 1
+/* Default value of bitfield outer_vl_ins_mode */
+#define HW_ATL_RPO_OUTER_VL_INS_MODE_DEFAULT 0x0
+
+/* rx reg_res_dsbl bitfield definitions
+ * preprocessor definitions for the bitfield "reg_res_dsbl".
+ * port="pif_rx_reg_res_dsbl_i"
+ */
+
+/* register address for bitfield reg_res_dsbl */
+#define HW_ATL_RX_REG_RES_DSBL_ADR 0x00005000
+/* bitmask for bitfield reg_res_dsbl */
+#define HW_ATL_RX_REG_RES_DSBL_MSK 0x20000000
+/* inverted bitmask for bitfield reg_res_dsbl */
+#define HW_ATL_RX_REG_RES_DSBL_MSKN 0xdfffffff
+/* lower bit position of bitfield reg_res_dsbl */
+#define HW_ATL_RX_REG_RES_DSBL_SHIFT 29
+/* width of bitfield reg_res_dsbl */
+#define HW_ATL_RX_REG_RES_DSBL_WIDTH 1
+/* default value of bitfield reg_res_dsbl */
+#define HW_ATL_RX_REG_RES_DSBL_DEFAULT 0x1
+
+/* tx dca{d}_cpuid[7:0] bitfield definitions
+ * preprocessor definitions for the bitfield "dca{d}_cpuid[7:0]".
+ * parameter: dca {d} | stride size 0x4 | range [0, 31]
+ * port="pif_tdm_dca0_cpuid_i[7:0]"
+ */
+
+/* register address for bitfield dca{d}_cpuid[7:0] */
+#define HW_ATL_TDM_DCADCPUID_ADR(dca) (0x00008400 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_cpuid[7:0] */
+#define HW_ATL_TDM_DCADCPUID_MSK 0x000000ff
+/* inverted bitmask for bitfield dca{d}_cpuid[7:0] */
+#define HW_ATL_TDM_DCADCPUID_MSKN 0xffffff00
+/* lower bit position of bitfield dca{d}_cpuid[7:0] */
+#define HW_ATL_TDM_DCADCPUID_SHIFT 0
+/* width of bitfield dca{d}_cpuid[7:0] */
+#define HW_ATL_TDM_DCADCPUID_WIDTH 8
+/* default value of bitfield dca{d}_cpuid[7:0] */
+#define HW_ATL_TDM_DCADCPUID_DEFAULT 0x0
+
+/* tx lso_en[1f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "lso_en[1f:0]".
+ * port="pif_tdm_lso_en_i[31:0]"
+ */
+
+/* register address for bitfield lso_en[1f:0] */
+#define HW_ATL_TDM_LSO_EN_ADR 0x00007810
+/* bitmask for bitfield lso_en[1f:0] */
+#define HW_ATL_TDM_LSO_EN_MSK 0xffffffff
+/* inverted bitmask for bitfield lso_en[1f:0] */
+#define HW_ATL_TDM_LSO_EN_MSKN 0x00000000
+/* lower bit position of bitfield lso_en[1f:0] */
+#define HW_ATL_TDM_LSO_EN_SHIFT 0
+/* width of bitfield lso_en[1f:0] */
+#define HW_ATL_TDM_LSO_EN_WIDTH 32
+/* default value of bitfield lso_en[1f:0] */
+#define HW_ATL_TDM_LSO_EN_DEFAULT 0x0
+
+/* tx dca_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca_en".
+ * port="pif_tdm_dca_en_i"
+ */
+
+/* register address for bitfield dca_en */
+#define HW_ATL_TDM_DCA_EN_ADR 0x00008480
+/* bitmask for bitfield dca_en */
+#define HW_ATL_TDM_DCA_EN_MSK 0x80000000
+/* inverted bitmask for bitfield dca_en */
+#define HW_ATL_TDM_DCA_EN_MSKN 0x7fffffff
+/* lower bit position of bitfield dca_en */
+#define HW_ATL_TDM_DCA_EN_SHIFT 31
+/* width of bitfield dca_en */
+#define HW_ATL_TDM_DCA_EN_WIDTH 1
+/* default value of bitfield dca_en */
+#define HW_ATL_TDM_DCA_EN_DEFAULT 0x1
+
+/* tx dca_mode[3:0] bitfield definitions
+ * preprocessor definitions for the bitfield "dca_mode[3:0]".
+ * port="pif_tdm_dca_mode_i[3:0]"
+ */
+
+/* register address for bitfield dca_mode[3:0] */
+#define HW_ATL_TDM_DCA_MODE_ADR 0x00008480
+/* bitmask for bitfield dca_mode[3:0] */
+#define HW_ATL_TDM_DCA_MODE_MSK 0x0000000f
+/* inverted bitmask for bitfield dca_mode[3:0] */
+#define HW_ATL_TDM_DCA_MODE_MSKN 0xfffffff0
+/* lower bit position of bitfield dca_mode[3:0] */
+#define HW_ATL_TDM_DCA_MODE_SHIFT 0
+/* width of bitfield dca_mode[3:0] */
+#define HW_ATL_TDM_DCA_MODE_WIDTH 4
+/* default value of bitfield dca_mode[3:0] */
+#define HW_ATL_TDM_DCA_MODE_DEFAULT 0x0
+
+/* tx dca{d}_desc_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca{d}_desc_en".
+ * parameter: dca {d} | stride size 0x4 | range [0, 31]
+ * port="pif_tdm_dca_desc_en_i[0]"
+ */
+
+/* register address for bitfield dca{d}_desc_en */
+#define HW_ATL_TDM_DCADDESC_EN_ADR(dca) (0x00008400 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_desc_en */
+#define HW_ATL_TDM_DCADDESC_EN_MSK 0x80000000
+/* inverted bitmask for bitfield dca{d}_desc_en */
+#define HW_ATL_TDM_DCADDESC_EN_MSKN 0x7fffffff
+/* lower bit position of bitfield dca{d}_desc_en */
+#define HW_ATL_TDM_DCADDESC_EN_SHIFT 31
+/* width of bitfield dca{d}_desc_en */
+#define HW_ATL_TDM_DCADDESC_EN_WIDTH 1
+/* default value of bitfield dca{d}_desc_en */
+#define HW_ATL_TDM_DCADDESC_EN_DEFAULT 0x0
+
+/* tx desc{d}_en bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_en".
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ * port="pif_tdm_desc_en_i[0]"
+ */
+
+/* register address for bitfield desc{d}_en */
+#define HW_ATL_TDM_DESCDEN_ADR(descriptor) (0x00007c08 + (descriptor) * 0x40)
+/* bitmask for bitfield desc{d}_en */
+#define HW_ATL_TDM_DESCDEN_MSK 0x80000000
+/* inverted bitmask for bitfield desc{d}_en */
+#define HW_ATL_TDM_DESCDEN_MSKN 0x7fffffff
+/* lower bit position of bitfield desc{d}_en */
+#define HW_ATL_TDM_DESCDEN_SHIFT 31
+/* width of bitfield desc{d}_en */
+#define HW_ATL_TDM_DESCDEN_WIDTH 1
+/* default value of bitfield desc{d}_en */
+#define HW_ATL_TDM_DESCDEN_DEFAULT 0x0
+
+/* tx desc{d}_hd[c:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_hd[c:0]".
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ * port="tdm_pif_desc0_hd_o[12:0]"
+ */
+
+/* register address for bitfield desc{d}_hd[c:0] */
+#define HW_ATL_TDM_DESCDHD_ADR(descriptor) (0x00007c0c + (descriptor) * 0x40)
+/* bitmask for bitfield desc{d}_hd[c:0] */
+#define HW_ATL_TDM_DESCDHD_MSK 0x00001fff
+/* inverted bitmask for bitfield desc{d}_hd[c:0] */
+#define HW_ATL_TDM_DESCDHD_MSKN 0xffffe000
+/* lower bit position of bitfield desc{d}_hd[c:0] */
+#define HW_ATL_TDM_DESCDHD_SHIFT 0
+/* width of bitfield desc{d}_hd[c:0] */
+#define HW_ATL_TDM_DESCDHD_WIDTH 13
+
+/* tx desc{d}_len[9:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_len[9:0]".
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ * port="pif_tdm_desc0_len_i[9:0]"
+ */
+
+/* register address for bitfield desc{d}_len[9:0] */
+#define HW_ATL_TDM_DESCDLEN_ADR(descriptor) (0x00007c08 + (descriptor) * 0x40)
+/* bitmask for bitfield desc{d}_len[9:0] */
+#define HW_ATL_TDM_DESCDLEN_MSK 0x00001ff8
+/* inverted bitmask for bitfield desc{d}_len[9:0] */
+#define HW_ATL_TDM_DESCDLEN_MSKN 0xffffe007
+/* lower bit position of bitfield desc{d}_len[9:0] */
+#define HW_ATL_TDM_DESCDLEN_SHIFT 3
+/* width of bitfield desc{d}_len[9:0] */
+#define HW_ATL_TDM_DESCDLEN_WIDTH 10
+/* default value of bitfield desc{d}_len[9:0] */
+#define HW_ATL_TDM_DESCDLEN_DEFAULT 0x0
+
+/* tx int_desc_wrb_en bitfield definitions
+ * preprocessor definitions for the bitfield "int_desc_wrb_en".
+ * port="pif_tdm_int_desc_wrb_en_i"
+ */
+
+/* register address for bitfield int_desc_wrb_en */
+#define HW_ATL_TDM_INT_DESC_WRB_EN_ADR 0x00007b40
+/* bitmask for bitfield int_desc_wrb_en */
+#define HW_ATL_TDM_INT_DESC_WRB_EN_MSK 0x00000002
+/* inverted bitmask for bitfield int_desc_wrb_en */
+#define HW_ATL_TDM_INT_DESC_WRB_EN_MSKN 0xfffffffd
+/* lower bit position of bitfield int_desc_wrb_en */
+#define HW_ATL_TDM_INT_DESC_WRB_EN_SHIFT 1
+/* width of bitfield int_desc_wrb_en */
+#define HW_ATL_TDM_INT_DESC_WRB_EN_WIDTH 1
+/* default value of bitfield int_desc_wrb_en */
+#define HW_ATL_TDM_INT_DESC_WRB_EN_DEFAULT 0x0
+
+/* tx desc{d}_wrb_thresh[6:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_wrb_thresh[6:0]".
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ * port="pif_tdm_desc0_wrb_thresh_i[6:0]"
+ */
+
+/* register address for bitfield desc{d}_wrb_thresh[6:0] */
+#define HW_ATL_TDM_DESCDWRB_THRESH_ADR(descriptor) \
+ (0x00007c18 + (descriptor) * 0x40)
+/* bitmask for bitfield desc{d}_wrb_thresh[6:0] */
+#define HW_ATL_TDM_DESCDWRB_THRESH_MSK 0x00007f00
+/* inverted bitmask for bitfield desc{d}_wrb_thresh[6:0] */
+#define HW_ATL_TDM_DESCDWRB_THRESH_MSKN 0xffff80ff
+/* lower bit position of bitfield desc{d}_wrb_thresh[6:0] */
+#define HW_ATL_TDM_DESCDWRB_THRESH_SHIFT 8
+/* width of bitfield desc{d}_wrb_thresh[6:0] */
+#define HW_ATL_TDM_DESCDWRB_THRESH_WIDTH 7
+/* default value of bitfield desc{d}_wrb_thresh[6:0] */
+#define HW_ATL_TDM_DESCDWRB_THRESH_DEFAULT 0x0
+
+/* tx lso_tcp_flag_first[b:0] bitfield definitions
+ * preprocessor definitions for the bitfield "lso_tcp_flag_first[b:0]".
+ * port="pif_thm_lso_tcp_flag_first_i[11:0]"
+ */
+
+/* register address for bitfield lso_tcp_flag_first[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_ADR 0x00007820
+/* bitmask for bitfield lso_tcp_flag_first[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSK 0x00000fff
+/* inverted bitmask for bitfield lso_tcp_flag_first[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSKN 0xfffff000
+/* lower bit position of bitfield lso_tcp_flag_first[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_SHIFT 0
+/* width of bitfield lso_tcp_flag_first[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_WIDTH 12
+/* default value of bitfield lso_tcp_flag_first[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_DEFAULT 0x0
+
+/* tx lso_tcp_flag_last[b:0] bitfield definitions
+ * preprocessor definitions for the bitfield "lso_tcp_flag_last[b:0]".
+ * port="pif_thm_lso_tcp_flag_last_i[11:0]"
+ */
+
+/* register address for bitfield lso_tcp_flag_last[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_ADR 0x00007824
+/* bitmask for bitfield lso_tcp_flag_last[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_MSK 0x00000fff
+/* inverted bitmask for bitfield lso_tcp_flag_last[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_MSKN 0xfffff000
+/* lower bit position of bitfield lso_tcp_flag_last[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_SHIFT 0
+/* width of bitfield lso_tcp_flag_last[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_WIDTH 12
+/* default value of bitfield lso_tcp_flag_last[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_DEFAULT 0x0
+
+/* tx lso_tcp_flag_mid[b:0] bitfield definitions
+ * preprocessor definitions for the bitfield "lso_tcp_flag_mid[b:0]".
+ * port="pif_thm_lso_tcp_flag_mid_i[11:0]"
+ */
+
+/* Register address for bitfield lro_rsc_max[1F:0] */
+#define HW_ATL_RPO_LRO_RSC_MAX_ADR 0x00005598
+/* Bitmask for bitfield lro_rsc_max[1F:0] */
+#define HW_ATL_RPO_LRO_RSC_MAX_MSK 0xFFFFFFFF
+/* Inverted bitmask for bitfield lro_rsc_max[1F:0] */
+#define HW_ATL_RPO_LRO_RSC_MAX_MSKN 0x00000000
+/* Lower bit position of bitfield lro_rsc_max[1F:0] */
+#define HW_ATL_RPO_LRO_RSC_MAX_SHIFT 0
+/* Width of bitfield lro_rsc_max[1F:0] */
+#define HW_ATL_RPO_LRO_RSC_MAX_WIDTH 32
+/* Default value of bitfield lro_rsc_max[1F:0] */
+#define HW_ATL_RPO_LRO_RSC_MAX_DEFAULT 0x0
+
+/* RX lro_en[1F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_en[1F:0]".
+ * PORT="pif_rpo_lro_en_i[31:0]"
+ */
+
+/* Register address for bitfield lro_en[1F:0] */
+#define HW_ATL_RPO_LRO_EN_ADR 0x00005590
+/* Bitmask for bitfield lro_en[1F:0] */
+#define HW_ATL_RPO_LRO_EN_MSK 0xFFFFFFFF
+/* Inverted bitmask for bitfield lro_en[1F:0] */
+#define HW_ATL_RPO_LRO_EN_MSKN 0x00000000
+/* Lower bit position of bitfield lro_en[1F:0] */
+#define HW_ATL_RPO_LRO_EN_SHIFT 0
+/* Width of bitfield lro_en[1F:0] */
+#define HW_ATL_RPO_LRO_EN_WIDTH 32
+/* Default value of bitfield lro_en[1F:0] */
+#define HW_ATL_RPO_LRO_EN_DEFAULT 0x0
+
+/* RX lro_ptopt_en Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_ptopt_en".
+ * PORT="pif_rpo_lro_ptopt_en_i"
+ */
+
+/* Register address for bitfield lro_ptopt_en */
+#define HW_ATL_RPO_LRO_PTOPT_EN_ADR 0x00005594
+/* Bitmask for bitfield lro_ptopt_en */
+#define HW_ATL_RPO_LRO_PTOPT_EN_MSK 0x00008000
+/* Inverted bitmask for bitfield lro_ptopt_en */
+#define HW_ATL_RPO_LRO_PTOPT_EN_MSKN 0xFFFF7FFF
+/* Lower bit position of bitfield lro_ptopt_en */
+#define HW_ATL_RPO_LRO_PTOPT_EN_SHIFT 15
+/* Width of bitfield lro_ptopt_en */
+#define HW_ATL_RPO_LRO_PTOPT_EN_WIDTH 1
+/* Default value of bitfield lro_ptopt_en */
+#define HW_ATL_RPO_LRO_PTOPT_EN_DEFALT 0x1
+
+/* RX lro_q_ses_lmt Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_q_ses_lmt".
+ * PORT="pif_rpo_lro_q_ses_lmt_i[1:0]"
+ */
+
+/* Register address for bitfield lro_q_ses_lmt */
+#define HW_ATL_RPO_LRO_QSES_LMT_ADR 0x00005594
+/* Bitmask for bitfield lro_q_ses_lmt */
+#define HW_ATL_RPO_LRO_QSES_LMT_MSK 0x00003000
+/* Inverted bitmask for bitfield lro_q_ses_lmt */
+#define HW_ATL_RPO_LRO_QSES_LMT_MSKN 0xFFFFCFFF
+/* Lower bit position of bitfield lro_q_ses_lmt */
+#define HW_ATL_RPO_LRO_QSES_LMT_SHIFT 12
+/* Width of bitfield lro_q_ses_lmt */
+#define HW_ATL_RPO_LRO_QSES_LMT_WIDTH 2
+/* Default value of bitfield lro_q_ses_lmt */
+#define HW_ATL_RPO_LRO_QSES_LMT_DEFAULT 0x1
+
+/* RX lro_tot_dsc_lmt[1:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_tot_dsc_lmt[1:0]".
+ * PORT="pif_rpo_lro_tot_dsc_lmt_i[1:0]"
+ */
+
+/* Register address for bitfield lro_tot_dsc_lmt[1:0] */
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_ADR 0x00005594
+/* Bitmask for bitfield lro_tot_dsc_lmt[1:0] */
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_MSK 0x00000060
+/* Inverted bitmask for bitfield lro_tot_dsc_lmt[1:0] */
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_MSKN 0xFFFFFF9F
+/* Lower bit position of bitfield lro_tot_dsc_lmt[1:0] */
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_SHIFT 5
+/* Width of bitfield lro_tot_dsc_lmt[1:0] */
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_WIDTH 2
+/* Default value of bitfield lro_tot_dsc_lmt[1:0] */
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_DEFALT 0x1
+
+/* RX lro_pkt_min[4:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_pkt_min[4:0]".
+ * PORT="pif_rpo_lro_pkt_min_i[4:0]"
+ */
+
+/* Register address for bitfield lro_pkt_min[4:0] */
+#define HW_ATL_RPO_LRO_PKT_MIN_ADR 0x00005594
+/* Bitmask for bitfield lro_pkt_min[4:0] */
+#define HW_ATL_RPO_LRO_PKT_MIN_MSK 0x0000001F
+/* Inverted bitmask for bitfield lro_pkt_min[4:0] */
+#define HW_ATL_RPO_LRO_PKT_MIN_MSKN 0xFFFFFFE0
+/* Lower bit position of bitfield lro_pkt_min[4:0] */
+#define HW_ATL_RPO_LRO_PKT_MIN_SHIFT 0
+/* Width of bitfield lro_pkt_min[4:0] */
+#define HW_ATL_RPO_LRO_PKT_MIN_WIDTH 5
+/* Default value of bitfield lro_pkt_min[4:0] */
+#define HW_ATL_RPO_LRO_PKT_MIN_DEFAULT 0x8
+
+/* Width of bitfield lro{L}_des_max[1:0] */
+#define HW_ATL_RPO_LRO_LDES_MAX_WIDTH 2
+/* Default value of bitfield lro{L}_des_max[1:0] */
+#define HW_ATL_RPO_LRO_LDES_MAX_DEFAULT 0x0
+
+/* RX lro_tb_div[11:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_tb_div[11:0]".
+ * PORT="pif_rpo_lro_tb_div_i[11:0]"
+ */
+
+/* Register address for bitfield lro_tb_div[11:0] */
+#define HW_ATL_RPO_LRO_TB_DIV_ADR 0x00005620
+/* Bitmask for bitfield lro_tb_div[11:0] */
+#define HW_ATL_RPO_LRO_TB_DIV_MSK 0xFFF00000
+/* Inverted bitmask for bitfield lro_tb_div[11:0] */
+#define HW_ATL_RPO_LRO_TB_DIV_MSKN 0x000FFFFF
+/* Lower bit position of bitfield lro_tb_div[11:0] */
+#define HW_ATL_RPO_LRO_TB_DIV_SHIFT 20
+/* Width of bitfield lro_tb_div[11:0] */
+#define HW_ATL_RPO_LRO_TB_DIV_WIDTH 12
+/* Default value of bitfield lro_tb_div[11:0] */
+#define HW_ATL_RPO_LRO_TB_DIV_DEFAULT 0xC35
+
+/* RX lro_ina_ival[9:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_ina_ival[9:0]".
+ * PORT="pif_rpo_lro_ina_ival_i[9:0]"
+ */
+
+/* Register address for bitfield lro_ina_ival[9:0] */
+#define HW_ATL_RPO_LRO_INA_IVAL_ADR 0x00005620
+/* Bitmask for bitfield lro_ina_ival[9:0] */
+#define HW_ATL_RPO_LRO_INA_IVAL_MSK 0x000FFC00
+/* Inverted bitmask for bitfield lro_ina_ival[9:0] */
+#define HW_ATL_RPO_LRO_INA_IVAL_MSKN 0xFFF003FF
+/* Lower bit position of bitfield lro_ina_ival[9:0] */
+#define HW_ATL_RPO_LRO_INA_IVAL_SHIFT 10
+/* Width of bitfield lro_ina_ival[9:0] */
+#define HW_ATL_RPO_LRO_INA_IVAL_WIDTH 10
+/* Default value of bitfield lro_ina_ival[9:0] */
+#define HW_ATL_RPO_LRO_INA_IVAL_DEFAULT 0xA
+
+/* RX lro_max_ival[9:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_max_ival[9:0]".
+ * PORT="pif_rpo_lro_max_ival_i[9:0]"
+ */
+
+/* Register address for bitfield lro_max_ival[9:0] */
+#define HW_ATL_RPO_LRO_MAX_IVAL_ADR 0x00005620
+/* Bitmask for bitfield lro_max_ival[9:0] */
+#define HW_ATL_RPO_LRO_MAX_IVAL_MSK 0x000003FF
+/* Inverted bitmask for bitfield lro_max_ival[9:0] */
+#define HW_ATL_RPO_LRO_MAX_IVAL_MSKN 0xFFFFFC00
+/* Lower bit position of bitfield lro_max_ival[9:0] */
+#define HW_ATL_RPO_LRO_MAX_IVAL_SHIFT 0
+/* Width of bitfield lro_max_ival[9:0] */
+#define HW_ATL_RPO_LRO_MAX_IVAL_WIDTH 10
+/* Default value of bitfield lro_max_ival[9:0] */
+#define HW_ATL_RPO_LRO_MAX_IVAL_DEFAULT 0x19
+
+/* TX dca{D}_cpuid[7:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "dca{D}_cpuid[7:0]".
+ * Parameter: DCA {D} | stride size 0x4 | range [0, 31]
+ * PORT="pif_tdm_dca0_cpuid_i[7:0]"
+ */
+
+/* Register address for bitfield dca{D}_cpuid[7:0] */
+#define HW_ATL_TDM_DCA_DCPUID_ADR(dca) (0x00008400 + (dca) * 0x4)
+/* Bitmask for bitfield dca{D}_cpuid[7:0] */
+#define HW_ATL_TDM_DCA_DCPUID_MSK 0x000000FF
+/* Inverted bitmask for bitfield dca{D}_cpuid[7:0] */
+#define HW_ATL_TDM_DCA_DCPUID_MSKN 0xFFFFFF00
+/* Lower bit position of bitfield dca{D}_cpuid[7:0] */
+#define HW_ATL_TDM_DCA_DCPUID_SHIFT 0
+/* Width of bitfield dca{D}_cpuid[7:0] */
+#define HW_ATL_TDM_DCA_DCPUID_WIDTH 8
+/* Default value of bitfield dca{D}_cpuid[7:0] */
+#define HW_ATL_TDM_DCA_DCPUID_DEFAULT 0x0
+
+/* TX dca{D}_desc_en Bitfield Definitions
+ * Preprocessor definitions for the bitfield "dca{D}_desc_en".
+ * Parameter: DCA {D} | stride size 0x4 | range [0, 31]
+ * PORT="pif_tdm_dca_desc_en_i[0]"
+ */
+
+/* Register address for bitfield dca{D}_desc_en */
+#define HW_ATL_TDM_DCA_DDESC_EN_ADR(dca) (0x00008400 + (dca) * 0x4)
+/* Bitmask for bitfield dca{D}_desc_en */
+#define HW_ATL_TDM_DCA_DDESC_EN_MSK 0x80000000
+/* Inverted bitmask for bitfield dca{D}_desc_en */
+#define HW_ATL_TDM_DCA_DDESC_EN_MSKN 0x7FFFFFFF
+/* Lower bit position of bitfield dca{D}_desc_en */
+#define HW_ATL_TDM_DCA_DDESC_EN_SHIFT 31
+/* Width of bitfield dca{D}_desc_en */
+#define HW_ATL_TDM_DCA_DDESC_EN_WIDTH 1
+/* Default value of bitfield dca{D}_desc_en */
+#define HW_ATL_TDM_DCA_DDESC_EN_DEFAULT 0x0
+
+/* TX desc{D}_en Bitfield Definitions
+ * Preprocessor definitions for the bitfield "desc{D}_en".
+ * Parameter: descriptor {D} | stride size 0x40 | range [0, 31]
+ * PORT="pif_tdm_desc_en_i[0]"
+ */
+
+/* Register address for bitfield desc{D}_en */
+#define HW_ATL_TDM_DESC_DEN_ADR(descriptor) (0x00007C08 + (descriptor) * 0x40)
+/* Bitmask for bitfield desc{D}_en */
+#define HW_ATL_TDM_DESC_DEN_MSK 0x80000000
+/* Inverted bitmask for bitfield desc{D}_en */
+#define HW_ATL_TDM_DESC_DEN_MSKN 0x7FFFFFFF
+/* Lower bit position of bitfield desc{D}_en */
+#define HW_ATL_TDM_DESC_DEN_SHIFT 31
+/* Width of bitfield desc{D}_en */
+#define HW_ATL_TDM_DESC_DEN_WIDTH 1
+/* Default value of bitfield desc{D}_en */
+#define HW_ATL_TDM_DESC_DEN_DEFAULT 0x0
+
+/* TX desc{D}_hd[C:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "desc{D}_hd[C:0]".
+ * Parameter: descriptor {D} | stride size 0x40 | range [0, 31]
+ * PORT="tdm_pif_desc0_hd_o[12:0]"
+ */
+
+/* Register address for bitfield desc{D}_hd[C:0] */
+#define HW_ATL_TDM_DESC_DHD_ADR(descriptor) (0x00007C0C + (descriptor) * 0x40)
+/* Bitmask for bitfield desc{D}_hd[C:0] */
+#define HW_ATL_TDM_DESC_DHD_MSK 0x00001FFF
+/* Inverted bitmask for bitfield desc{D}_hd[C:0] */
+#define HW_ATL_TDM_DESC_DHD_MSKN 0xFFFFE000
+/* Lower bit position of bitfield desc{D}_hd[C:0] */
+#define HW_ATL_TDM_DESC_DHD_SHIFT 0
+/* Width of bitfield desc{D}_hd[C:0] */
+#define HW_ATL_TDM_DESC_DHD_WIDTH 13
+
+/* TX desc{D}_len[9:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "desc{D}_len[9:0]".
+ * Parameter: descriptor {D} | stride size 0x40 | range [0, 31]
+ * PORT="pif_tdm_desc0_len_i[9:0]"
+ */
+
+/* Register address for bitfield desc{D}_len[9:0] */
+#define HW_ATL_TDM_DESC_DLEN_ADR(descriptor) (0x00007C08 + (descriptor) * 0x40)
+/* Bitmask for bitfield desc{D}_len[9:0] */
+#define HW_ATL_TDM_DESC_DLEN_MSK 0x00001FF8
+/* Inverted bitmask for bitfield desc{D}_len[9:0] */
+#define HW_ATL_TDM_DESC_DLEN_MSKN 0xFFFFE007
+/* Lower bit position of bitfield desc{D}_len[9:0] */
+#define HW_ATL_TDM_DESC_DLEN_SHIFT 3
+/* Width of bitfield desc{D}_len[9:0] */
+#define HW_ATL_TDM_DESC_DLEN_WIDTH 10
+/* Default value of bitfield desc{D}_len[9:0] */
+#define HW_ATL_TDM_DESC_DLEN_DEFAULT 0x0
+
+/* TX desc{D}_wrb_thresh[6:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "desc{D}_wrb_thresh[6:0]".
+ * Parameter: descriptor {D} | stride size 0x40 | range [0, 31]
+ * PORT="pif_tdm_desc0_wrb_thresh_i[6:0]"
+ */
+
+/* Register address for bitfield desc{D}_wrb_thresh[6:0] */
+#define HW_ATL_TDM_DESC_DWRB_THRESH_ADR(descriptor) \
+ (0x00007C18 + (descriptor) * 0x40)
+/* Bitmask for bitfield desc{D}_wrb_thresh[6:0] */
+#define HW_ATL_TDM_DESC_DWRB_THRESH_MSK 0x00007F00
+/* Inverted bitmask for bitfield desc{D}_wrb_thresh[6:0] */
+#define HW_ATL_TDM_DESC_DWRB_THRESH_MSKN 0xFFFF80FF
+/* Lower bit position of bitfield desc{D}_wrb_thresh[6:0] */
+#define HW_ATL_TDM_DESC_DWRB_THRESH_SHIFT 8
+/* Width of bitfield desc{D}_wrb_thresh[6:0] */
+#define HW_ATL_TDM_DESC_DWRB_THRESH_WIDTH 7
+/* Default value of bitfield desc{D}_wrb_thresh[6:0] */
+#define HW_ATL_TDM_DESC_DWRB_THRESH_DEFAULT 0x0
+
+/* TX tdm_int_mod_en Bitfield Definitions
+ * Preprocessor definitions for the bitfield "tdm_int_mod_en".
+ * PORT="pif_tdm_int_mod_en_i"
+ */
+
+/* Register address for bitfield tdm_int_mod_en */
+#define HW_ATL_TDM_INT_MOD_EN_ADR 0x00007B40
+/* Bitmask for bitfield tdm_int_mod_en */
+#define HW_ATL_TDM_INT_MOD_EN_MSK 0x00000010
+/* Inverted bitmask for bitfield tdm_int_mod_en */
+#define HW_ATL_TDM_INT_MOD_EN_MSKN 0xFFFFFFEF
+/* Lower bit position of bitfield tdm_int_mod_en */
+#define HW_ATL_TDM_INT_MOD_EN_SHIFT 4
+/* Width of bitfield tdm_int_mod_en */
+#define HW_ATL_TDM_INT_MOD_EN_WIDTH 1
+/* Default value of bitfield tdm_int_mod_en */
+#define HW_ATL_TDM_INT_MOD_EN_DEFAULT 0x0
+
+/* TX lso_tcp_flag_mid[B:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lso_tcp_flag_mid[B:0]".
+ * PORT="pif_thm_lso_tcp_flag_mid_i[11:0]"
+ */
+/* register address for bitfield lso_tcp_flag_mid[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_ADR 0x00007820
+/* bitmask for bitfield lso_tcp_flag_mid[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_MSK 0x0fff0000
+/* inverted bitmask for bitfield lso_tcp_flag_mid[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_MSKN 0xf000ffff
+/* lower bit position of bitfield lso_tcp_flag_mid[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_SHIFT 16
+/* width of bitfield lso_tcp_flag_mid[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_WIDTH 12
+/* default value of bitfield lso_tcp_flag_mid[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_DEFAULT 0x0
+
+/* tx tx_tc_mode bitfield definitions
+ * preprocessor definitions for the bitfield "tx_tc_mode".
+ * port="pif_tpb_tx_tc_mode_i,pif_tps_tx_tc_mode_i"
+ */
+
+/* register address for bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_ADDR 0x00007900
+/* bitmask for bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_MSK 0x00000100
+/* inverted bitmask for bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_MSKN 0xFFFFFEFF
+/* lower bit position of bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_SHIFT 8
+/* width of bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_WIDTH 1
+/* default value of bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_DEFAULT 0x0
+
+/* tx tx_desc_rate_mode bitfield definitions
+ * preprocessor definitions for the bitfield "tx_desc_rate_mode".
+ * port="pif_tps_desc_rate_mode_i"
+ */
+
+/* register address for bitfield tx_desc_rate_mode */
+#define HW_ATL_TPS_TX_DESC_RATE_MODE_ADR 0x00007900
+/* bitmask for bitfield tx_desc_rate_mode */
+#define HW_ATL_TPS_TX_DESC_RATE_MODE_MSK 0x00000080
+/* inverted bitmask for bitfield tx_desc_rate_mode */
+#define HW_ATL_TPS_TX_DESC_RATE_MODE_MSKN 0xFFFFFF7F
+/* lower bit position of bitfield tx_desc_rate_mode */
+#define HW_ATL_TPS_TX_DESC_RATE_MODE_SHIFT 7
+/* width of bitfield tx_desc_rate_mode */
+#define HW_ATL_TPS_TX_DESC_RATE_MODE_WIDTH 1
+/* default value of bitfield tx_desc_rate_mode */
+#define HW_ATL_TPS_TX_DESC_RATE_MODE_DEFAULT 0x0
+
+/* tx tx_buf_en bitfield definitions
+ * preprocessor definitions for the bitfield "tx_buf_en".
+ * port="pif_tpb_tx_buf_en_i"
+ */
+
+/* register address for bitfield tx_buf_en */
+#define HW_ATL_TPB_TX_BUF_EN_ADR 0x00007900
+/* bitmask for bitfield tx_buf_en */
+#define HW_ATL_TPB_TX_BUF_EN_MSK 0x00000001
+/* inverted bitmask for bitfield tx_buf_en */
+#define HW_ATL_TPB_TX_BUF_EN_MSKN 0xfffffffe
+/* lower bit position of bitfield tx_buf_en */
+#define HW_ATL_TPB_TX_BUF_EN_SHIFT 0
+/* width of bitfield tx_buf_en */
+#define HW_ATL_TPB_TX_BUF_EN_WIDTH 1
+/* default value of bitfield tx_buf_en */
+#define HW_ATL_TPB_TX_BUF_EN_DEFAULT 0x0
+
+/* tx tx{b}_hi_thresh[c:0] bitfield definitions
+ * preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_tpb_tx0_hi_thresh_i[12:0]"
+ */
+
+/* register address for bitfield tx{b}_hi_thresh[c:0] */
+#define HW_ATL_TPB_TXBHI_THRESH_ADR(buffer) (0x00007914 + (buffer) * 0x10)
+/* bitmask for bitfield tx{b}_hi_thresh[c:0] */
+#define HW_ATL_TPB_TXBHI_THRESH_MSK 0x1fff0000
+/* inverted bitmask for bitfield tx{b}_hi_thresh[c:0] */
+#define HW_ATL_TPB_TXBHI_THRESH_MSKN 0xe000ffff
+/* lower bit position of bitfield tx{b}_hi_thresh[c:0] */
+#define HW_ATL_TPB_TXBHI_THRESH_SHIFT 16
+/* width of bitfield tx{b}_hi_thresh[c:0] */
+#define HW_ATL_TPB_TXBHI_THRESH_WIDTH 13
+/* default value of bitfield tx{b}_hi_thresh[c:0] */
+#define HW_ATL_TPB_TXBHI_THRESH_DEFAULT 0x0
+
+/* tx tx{b}_lo_thresh[c:0] bitfield definitions
+ * preprocessor definitions for the bitfield "tx{b}_lo_thresh[c:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_tpb_tx0_lo_thresh_i[12:0]"
+ */
+
+/* register address for bitfield tx{b}_lo_thresh[c:0] */
+#define HW_ATL_TPB_TXBLO_THRESH_ADR(buffer) (0x00007914 + (buffer) * 0x10)
+/* bitmask for bitfield tx{b}_lo_thresh[c:0] */
+#define HW_ATL_TPB_TXBLO_THRESH_MSK 0x00001fff
+/* inverted bitmask for bitfield tx{b}_lo_thresh[c:0] */
+#define HW_ATL_TPB_TXBLO_THRESH_MSKN 0xffffe000
+/* lower bit position of bitfield tx{b}_lo_thresh[c:0] */
+#define HW_ATL_TPB_TXBLO_THRESH_SHIFT 0
+/* width of bitfield tx{b}_lo_thresh[c:0] */
+#define HW_ATL_TPB_TXBLO_THRESH_WIDTH 13
+/* default value of bitfield tx{b}_lo_thresh[c:0] */
+#define HW_ATL_TPB_TXBLO_THRESH_DEFAULT 0x0
+
+/* tx dma_sys_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "dma_sys_loopback".
+ * port="pif_tpb_dma_sys_lbk_i"
+ */
+
+/* register address for bitfield dma_sys_loopback */
+#define HW_ATL_TPB_DMA_SYS_LBK_ADR 0x00007000
+/* bitmask for bitfield dma_sys_loopback */
+#define HW_ATL_TPB_DMA_SYS_LBK_MSK 0x00000040
+/* inverted bitmask for bitfield dma_sys_loopback */
+#define HW_ATL_TPB_DMA_SYS_LBK_MSKN 0xffffffbf
+/* lower bit position of bitfield dma_sys_loopback */
+#define HW_ATL_TPB_DMA_SYS_LBK_SHIFT 6
+/* width of bitfield dma_sys_loopback */
+#define HW_ATL_TPB_DMA_SYS_LBK_WIDTH 1
+/* default value of bitfield dma_sys_loopback */
+#define HW_ATL_TPB_DMA_SYS_LBK_DEFAULT 0x0
+
+/* tx dma_net_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "dma_net_loopback".
+ * port="pif_tpb_dma_net_lbk_i"
+ */
+
+/* register address for bitfield dma_net_loopback */
+#define HW_ATL_TPB_DMA_NET_LBK_ADR 0x00007000
+/* bitmask for bitfield dma_net_loopback */
+#define HW_ATL_TPB_DMA_NET_LBK_MSK 0x00000010
+/* inverted bitmask for bitfield dma_net_loopback */
+#define HW_ATL_TPB_DMA_NET_LBK_MSKN 0xffffffef
+/* lower bit position of bitfield dma_net_loopback */
+#define HW_ATL_TPB_DMA_NET_LBK_SHIFT 4
+/* width of bitfield dma_net_loopback */
+#define HW_ATL_TPB_DMA_NET_LBK_WIDTH 1
+/* default value of bitfield dma_net_loopback */
+#define HW_ATL_TPB_DMA_NET_LBK_DEFAULT 0x0
+
+/* tx tx{b}_buf_size[7:0] bitfield definitions
+ * preprocessor definitions for the bitfield "tx{b}_buf_size[7:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_tpb_tx0_buf_size_i[7:0]"
+ */
+
+/* register address for bitfield tx{b}_buf_size[7:0] */
+#define HW_ATL_TPB_TXBBUF_SIZE_ADR(buffer) (0x00007910 + (buffer) * 0x10)
+/* bitmask for bitfield tx{b}_buf_size[7:0] */
+#define HW_ATL_TPB_TXBBUF_SIZE_MSK 0x000000ff
+/* inverted bitmask for bitfield tx{b}_buf_size[7:0] */
+#define HW_ATL_TPB_TXBBUF_SIZE_MSKN 0xffffff00
+/* lower bit position of bitfield tx{b}_buf_size[7:0] */
+#define HW_ATL_TPB_TXBBUF_SIZE_SHIFT 0
+/* width of bitfield tx{b}_buf_size[7:0] */
+#define HW_ATL_TPB_TXBBUF_SIZE_WIDTH 8
+/* default value of bitfield tx{b}_buf_size[7:0] */
+#define HW_ATL_TPB_TXBBUF_SIZE_DEFAULT 0x0
+
+/* tx tx_scp_ins_en bitfield definitions
+ * preprocessor definitions for the bitfield "tx_scp_ins_en".
+ * port="pif_tpb_scp_ins_en_i"
+ */
+
+/* register address for bitfield tx_scp_ins_en */
+#define HW_ATL_TPB_TX_SCP_INS_EN_ADR 0x00007900
+/* bitmask for bitfield tx_scp_ins_en */
+#define HW_ATL_TPB_TX_SCP_INS_EN_MSK 0x00000004
+/* inverted bitmask for bitfield tx_scp_ins_en */
+#define HW_ATL_TPB_TX_SCP_INS_EN_MSKN 0xfffffffb
+/* lower bit position of bitfield tx_scp_ins_en */
+#define HW_ATL_TPB_TX_SCP_INS_EN_SHIFT 2
+/* width of bitfield tx_scp_ins_en */
+#define HW_ATL_TPB_TX_SCP_INS_EN_WIDTH 1
+/* default value of bitfield tx_scp_ins_en */
+#define HW_ATL_TPB_TX_SCP_INS_EN_DEFAULT 0x0
+
+/* tx tx_clk_gate_en bitfield definitions
+ * preprocessor definitions for the bitfield "tx_clk_gate_en".
+ * port="pif_tpb_clk_gate_en_i"
+ */
+
+/* register address for bitfield tx_clk_gate_en */
+#define HW_ATL_TPB_TX_CLK_GATE_EN_ADR 0x00007900
+/* bitmask for bitfield tx_clk_gate_en */
+#define HW_ATL_TPB_TX_CLK_GATE_EN_MSK 0x00000010
+/* inverted bitmask for bitfield tx_clk_gate_en */
+#define HW_ATL_TPB_TX_CLK_GATE_EN_MSKN 0xffffffef
+/* lower bit position of bitfield tx_clk_gate_en */
+#define HW_ATL_TPB_TX_CLK_GATE_EN_SHIFT 4
+/* width of bitfield tx_clk_gate_en */
+#define HW_ATL_TPB_TX_CLK_GATE_EN_WIDTH 1
+/* default value of bitfield tx_clk_gate_en */
+#define HW_ATL_TPB_TX_CLK_GATE_EN_DEFAULT 0x1
+
+/* tx ipv4_chk_en bitfield definitions
+ * preprocessor definitions for the bitfield "ipv4_chk_en".
+ * port="pif_tpo_ipv4_chk_en_i"
+ */
+
+/* register address for bitfield ipv4_chk_en */
+#define HW_ATL_TPO_IPV4CHK_EN_ADR 0x00007800
+/* bitmask for bitfield ipv4_chk_en */
+#define HW_ATL_TPO_IPV4CHK_EN_MSK 0x00000002
+/* inverted bitmask for bitfield ipv4_chk_en */
+#define HW_ATL_TPO_IPV4CHK_EN_MSKN 0xfffffffd
+/* lower bit position of bitfield ipv4_chk_en */
+#define HW_ATL_TPO_IPV4CHK_EN_SHIFT 1
+/* width of bitfield ipv4_chk_en */
+#define HW_ATL_TPO_IPV4CHK_EN_WIDTH 1
+/* default value of bitfield ipv4_chk_en */
+#define HW_ATL_TPO_IPV4CHK_EN_DEFAULT 0x0
+
+/* tx l4_chk_en bitfield definitions
+ * preprocessor definitions for the bitfield "l4_chk_en".
+ * port="pif_tpo_l4_chk_en_i"
+ */
+
+/* register address for bitfield l4_chk_en */
+#define HW_ATL_TPOL4CHK_EN_ADR 0x00007800
+/* bitmask for bitfield l4_chk_en */
+#define HW_ATL_TPOL4CHK_EN_MSK 0x00000001
+/* inverted bitmask for bitfield l4_chk_en */
+#define HW_ATL_TPOL4CHK_EN_MSKN 0xfffffffe
+/* lower bit position of bitfield l4_chk_en */
+#define HW_ATL_TPOL4CHK_EN_SHIFT 0
+/* width of bitfield l4_chk_en */
+#define HW_ATL_TPOL4CHK_EN_WIDTH 1
+/* default value of bitfield l4_chk_en */
+#define HW_ATL_TPOL4CHK_EN_DEFAULT 0x0
+
+/* tx pkt_sys_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "pkt_sys_loopback".
+ * port="pif_tpo_pkt_sys_lbk_i"
+ */
+
+/* register address for bitfield pkt_sys_loopback */
+#define HW_ATL_TPO_PKT_SYS_LBK_ADR 0x00007000
+/* bitmask for bitfield pkt_sys_loopback */
+#define HW_ATL_TPO_PKT_SYS_LBK_MSK 0x00000080
+/* inverted bitmask for bitfield pkt_sys_loopback */
+#define HW_ATL_TPO_PKT_SYS_LBK_MSKN 0xffffff7f
+/* lower bit position of bitfield pkt_sys_loopback */
+#define HW_ATL_TPO_PKT_SYS_LBK_SHIFT 7
+/* width of bitfield pkt_sys_loopback */
+#define HW_ATL_TPO_PKT_SYS_LBK_WIDTH 1
+/* default value of bitfield pkt_sys_loopback */
+#define HW_ATL_TPO_PKT_SYS_LBK_DEFAULT 0x0
+
+/* tx data_tc_arb_mode bitfield definitions
+ * preprocessor definitions for the bitfield "data_tc_arb_mode".
+ * port="pif_tps_data_tc_arb_mode_i"
+ */
+
+/* register address for bitfield data_tc_arb_mode */
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_ADR 0x00007100
+/* bitmask for bitfield data_tc_arb_mode */
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_MSK 0x00000001
+/* inverted bitmask for bitfield data_tc_arb_mode */
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_MSKN 0xfffffffe
+/* lower bit position of bitfield data_tc_arb_mode */
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_SHIFT 0
+/* width of bitfield data_tc_arb_mode */
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_WIDTH 1
+/* default value of bitfield data_tc_arb_mode */
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_DEFAULT 0x0
+
+/* tx desc{r}_rate_en bitfield definitions
+ * preprocessor definitions for the bitfield "desc{r}_rate_en".
+ * port="pif_tps_desc_rate_en_i[0]"
+ */
+
+/* register address for bitfield desc{r}_rate_en */
+#define HW_ATL_TPS_DESC_RATE_EN_ADR(desc) (0x00007408 + (desc) * 0x10)
+/* bitmask for bitfield desc{r}_rate_en */
+#define HW_ATL_TPS_DESC_RATE_EN_MSK 0x80000000
+/* inverted bitmask for bitfield desc{r}_rate_en */
+#define HW_ATL_TPS_DESC_RATE_EN_MSKN 0x7FFFFFFF
+/* lower bit position of bitfield desc{r}_rate_en */
+#define HW_ATL_TPS_DESC_RATE_EN_SHIFT 31
+/* width of bitfield desc{r}_rate_en */
+#define HW_ATL_TPS_DESC_RATE_EN_WIDTH 1
+/* default value of bitfield desc{r}_rate_en */
+#define HW_ATL_TPS_DESC_RATE_EN_DEFAULT 0x0
+
+/* tx desc{r}_rate_x bitfield definitions
+ * preprocessor definitions for the bitfield "desc{r}_rate_x".
+ * port="pif_tps_desc0_rate_x"
+ */
+/* register address for bitfield desc{r}_rate_x */
+#define HW_ATL_TPS_DESC_RATE_X_ADR(desc) (0x00007408 + (desc) * 0x10)
+/* bitmask for bitfield desc{r}_rate_x */
+#define HW_ATL_TPS_DESC_RATE_X_MSK 0x03FF0000
+/* inverted bitmask for bitfield desc{r}_rate_x */
+#define HW_ATL_TPS_DESC_RATE_X_MSKN 0xFC00FFFF
+/* lower bit position of bitfield desc{r}_rate_x */
+#define HW_ATL_TPS_DESC_RATE_X_SHIFT 16
+/* width of bitfield desc{r}_rate_x */
+#define HW_ATL_TPS_DESC_RATE_X_WIDTH 10
+/* default value of bitfield desc{r}_rate_x */
+#define HW_ATL_TPS_DESC_RATE_X_DEFAULT 0x0
+
+/* tx desc{r}_rate_y bitfield definitions
+ * preprocessor definitions for the bitfield "desc{r}_rate_y".
+ * port="pif_tps_desc0_rate_y"
+ */
+/* register address for bitfield desc{r}_rate_y */
+#define HW_ATL_TPS_DESC_RATE_Y_ADR(desc) (0x00007408 + (desc) * 0x10)
+/* bitmask for bitfield desc{r}_rate_y */
+#define HW_ATL_TPS_DESC_RATE_Y_MSK 0x00003FFF
+/* inverted bitmask for bitfield desc{r}_rate_y */
+#define HW_ATL_TPS_DESC_RATE_Y_MSKN 0xFFFFC000
+/* lower bit position of bitfield desc{r}_rate_y */
+#define HW_ATL_TPS_DESC_RATE_Y_SHIFT 0
+/* width of bitfield desc{r}_rate_y */
+#define HW_ATL_TPS_DESC_RATE_Y_WIDTH 14
+/* default value of bitfield desc{r}_rate_y */
+#define HW_ATL_TPS_DESC_RATE_Y_DEFAULT 0x0
+
+/* tx desc_rate_ta_rst bitfield definitions
+ * preprocessor definitions for the bitfield "desc_rate_ta_rst".
+ * port="pif_tps_desc_rate_ta_rst_i"
+ */
+
+/* register address for bitfield desc_rate_ta_rst */
+#define HW_ATL_TPS_DESC_RATE_TA_RST_ADR 0x00007310
+/* bitmask for bitfield desc_rate_ta_rst */
+#define HW_ATL_TPS_DESC_RATE_TA_RST_MSK 0x80000000
+/* inverted bitmask for bitfield desc_rate_ta_rst */
+#define HW_ATL_TPS_DESC_RATE_TA_RST_MSKN 0x7fffffff
+/* lower bit position of bitfield desc_rate_ta_rst */
+#define HW_ATL_TPS_DESC_RATE_TA_RST_SHIFT 31
+/* width of bitfield desc_rate_ta_rst */
+#define HW_ATL_TPS_DESC_RATE_TA_RST_WIDTH 1
+/* default value of bitfield desc_rate_ta_rst */
+#define HW_ATL_TPS_DESC_RATE_TA_RST_DEFAULT 0x0
+
+/* tx desc_rate_limit[a:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc_rate_limit[a:0]".
+ * port="pif_tps_desc_rate_lim_i[10:0]"
+ */
+
+/* register address for bitfield desc_rate_limit[a:0] */
+#define HW_ATL_TPS_DESC_RATE_LIM_ADR 0x00007310
+/* bitmask for bitfield desc_rate_limit[a:0] */
+#define HW_ATL_TPS_DESC_RATE_LIM_MSK 0x000007ff
+/* inverted bitmask for bitfield desc_rate_limit[a:0] */
+#define HW_ATL_TPS_DESC_RATE_LIM_MSKN 0xfffff800
+/* lower bit position of bitfield desc_rate_limit[a:0] */
+#define HW_ATL_TPS_DESC_RATE_LIM_SHIFT 0
+/* width of bitfield desc_rate_limit[a:0] */
+#define HW_ATL_TPS_DESC_RATE_LIM_WIDTH 11
+/* default value of bitfield desc_rate_limit[a:0] */
+#define HW_ATL_TPS_DESC_RATE_LIM_DEFAULT 0x0
+
+/* tx desc_tc_arb_mode[1:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc_tc_arb_mode[1:0]".
+ * port="pif_tps_desc_tc_arb_mode_i[1:0]"
+ */
+
+/* register address for bitfield desc_tc_arb_mode[1:0] */
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_ADR 0x00007200
+/* bitmask for bitfield desc_tc_arb_mode[1:0] */
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_MSK 0x00000003
+/* inverted bitmask for bitfield desc_tc_arb_mode[1:0] */
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_MSKN 0xfffffffc
+/* lower bit position of bitfield desc_tc_arb_mode[1:0] */
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_SHIFT 0
+/* width of bitfield desc_tc_arb_mode[1:0] */
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_WIDTH 2
+/* default value of bitfield desc_tc_arb_mode[1:0] */
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_DEFAULT 0x0
+
+/* tx desc_tc{t}_credit_max[b:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc_tc{t}_credit_max[b:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_desc_tc0_credit_max_i[11:0]"
+ */
+
+/* register address for bitfield desc_tc{t}_credit_max[b:0] */
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc) (0x00007210 + (tc) * 0x4)
+/* bitmask for bitfield desc_tc{t}_credit_max[b:0] */
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSK 0x0fff0000
+/* inverted bitmask for bitfield desc_tc{t}_credit_max[b:0] */
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSKN 0xf000ffff
+/* lower bit position of bitfield desc_tc{t}_credit_max[b:0] */
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_SHIFT 16
+/* width of bitfield desc_tc{t}_credit_max[b:0] */
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_WIDTH 12
+/* default value of bitfield desc_tc{t}_credit_max[b:0] */
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_DEFAULT 0x0
+
+/* tx desc_tc{t}_weight[8:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc_tc{t}_weight[8:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_desc_tc0_weight_i[8:0]"
+ */
+
+/* register address for bitfield desc_tc{t}_weight[8:0] */
+#define HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc) (0x00007210 + (tc) * 0x4)
+/* bitmask for bitfield desc_tc{t}_weight[8:0] */
+#define HW_ATL_TPS_DESC_TCTWEIGHT_MSK 0x000001ff
+/* inverted bitmask for bitfield desc_tc{t}_weight[8:0] */
+#define HW_ATL_TPS_DESC_TCTWEIGHT_MSKN 0xfffffe00
+/* lower bit position of bitfield desc_tc{t}_weight[8:0] */
+#define HW_ATL_TPS_DESC_TCTWEIGHT_SHIFT 0
+/* width of bitfield desc_tc{t}_weight[8:0] */
+#define HW_ATL_TPS_DESC_TCTWEIGHT_WIDTH 9
+/* default value of bitfield desc_tc{t}_weight[8:0] */
+#define HW_ATL_TPS_DESC_TCTWEIGHT_DEFAULT 0x0
+
+/* tx desc_vm_arb_mode bitfield definitions
+ * preprocessor definitions for the bitfield "desc_vm_arb_mode".
+ * port="pif_tps_desc_vm_arb_mode_i"
+ */
+
+/* register address for bitfield desc_vm_arb_mode */
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_ADR 0x00007300
+/* bitmask for bitfield desc_vm_arb_mode */
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_MSK 0x00000001
+/* inverted bitmask for bitfield desc_vm_arb_mode */
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_MSKN 0xfffffffe
+/* lower bit position of bitfield desc_vm_arb_mode */
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_SHIFT 0
+/* width of bitfield desc_vm_arb_mode */
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_WIDTH 1
+/* default value of bitfield desc_vm_arb_mode */
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_DEFAULT 0x0
+
+/* tx data_tc{t}_credit_max[b:0] bitfield definitions
+ * preprocessor definitions for the bitfield "data_tc{t}_credit_max[b:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_data_tc0_credit_max_i[11:0]"
+ */
+
+/* register address for bitfield data_tc{t}_credit_max[b:0] */
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc) (0x00007110 + (tc) * 0x4)
+/* bitmask for bitfield data_tc{t}_credit_max[b:0] */
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSK 0x0fff0000
+/* inverted bitmask for bitfield data_tc{t}_credit_max[b:0] */
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSKN 0xf000ffff
+/* lower bit position of bitfield data_tc{t}_credit_max[b:0] */
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_SHIFT 16
+/* width of bitfield data_tc{t}_credit_max[b:0] */
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_WIDTH 12
+/* default value of bitfield data_tc{t}_credit_max[b:0] */
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_DEFAULT 0x0
+
+/* tx data_tc{t}_weight[8:0] bitfield definitions
+ * preprocessor definitions for the bitfield "data_tc{t}_weight[8:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_data_tc0_weight_i[8:0]"
+ */
+
+/* register address for bitfield data_tc{t}_weight[8:0] */
+#define HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc) (0x00007110 + (tc) * 0x4)
+/* bitmask for bitfield data_tc{t}_weight[8:0] */
+#define HW_ATL_TPS_DATA_TCTWEIGHT_MSK 0x000001ff
+/* inverted bitmask for bitfield data_tc{t}_weight[8:0] */
+#define HW_ATL_TPS_DATA_TCTWEIGHT_MSKN 0xfffffe00
+/* lower bit position of bitfield data_tc{t}_weight[8:0] */
+#define HW_ATL_TPS_DATA_TCTWEIGHT_SHIFT 0
+/* width of bitfield data_tc{t}_weight[8:0] */
+#define HW_ATL_TPS_DATA_TCTWEIGHT_WIDTH 9
+/* default value of bitfield data_tc{t}_weight[8:0] */
+#define HW_ATL_TPS_DATA_TCTWEIGHT_DEFAULT 0x0
+
+/* tx reg_res_dsbl bitfield definitions
+ * preprocessor definitions for the bitfield "reg_res_dsbl".
+ * port="pif_tx_reg_res_dsbl_i"
+ */
+
+/* register address for bitfield reg_res_dsbl */
+#define HW_ATL_TX_REG_RES_DSBL_ADR 0x00007000
+/* bitmask for bitfield reg_res_dsbl */
+#define HW_ATL_TX_REG_RES_DSBL_MSK 0x20000000
+/* inverted bitmask for bitfield reg_res_dsbl */
+#define HW_ATL_TX_REG_RES_DSBL_MSKN 0xdfffffff
+/* lower bit position of bitfield reg_res_dsbl */
+#define HW_ATL_TX_REG_RES_DSBL_SHIFT 29
+/* width of bitfield reg_res_dsbl */
+#define HW_ATL_TX_REG_RES_DSBL_WIDTH 1
+/* default value of bitfield reg_res_dsbl */
+#define HW_ATL_TX_REG_RES_DSBL_DEFAULT 0x1
+
+/* mac_phy register access busy bitfield definitions
+ * preprocessor definitions for the bitfield "register access busy".
+ * port="msm_pif_reg_busy_o"
+ */
+
+/* register address for bitfield register access busy */
+#define HW_ATL_MSM_REG_ACCESS_BUSY_ADR 0x00004400
+/* bitmask for bitfield register access busy */
+#define HW_ATL_MSM_REG_ACCESS_BUSY_MSK 0x00001000
+/* inverted bitmask for bitfield register access busy */
+#define HW_ATL_MSM_REG_ACCESS_BUSY_MSKN 0xffffefff
+/* lower bit position of bitfield register access busy */
+#define HW_ATL_MSM_REG_ACCESS_BUSY_SHIFT 12
+/* width of bitfield register access busy */
+#define HW_ATL_MSM_REG_ACCESS_BUSY_WIDTH 1
+
+/* mac_phy msm register address[7:0] bitfield definitions
+ * preprocessor definitions for the bitfield "msm register address[7:0]".
+ * port="pif_msm_reg_addr_i[7:0]"
+ */
+
+/* register address for bitfield msm register address[7:0] */
+#define HW_ATL_MSM_REG_ADDR_ADR 0x00004400
+/* bitmask for bitfield msm register address[7:0] */
+#define HW_ATL_MSM_REG_ADDR_MSK 0x000000ff
+/* inverted bitmask for bitfield msm register address[7:0] */
+#define HW_ATL_MSM_REG_ADDR_MSKN 0xffffff00
+/* lower bit position of bitfield msm register address[7:0] */
+#define HW_ATL_MSM_REG_ADDR_SHIFT 0
+/* width of bitfield msm register address[7:0] */
+#define HW_ATL_MSM_REG_ADDR_WIDTH 8
+/* default value of bitfield msm register address[7:0] */
+#define HW_ATL_MSM_REG_ADDR_DEFAULT 0x0
+
+/* mac_phy register read strobe bitfield definitions
+ * preprocessor definitions for the bitfield "register read strobe".
+ * port="pif_msm_reg_rden_i"
+ */
+
+/* register address for bitfield register read strobe */
+#define HW_ATL_MSM_REG_RD_STROBE_ADR 0x00004400
+/* bitmask for bitfield register read strobe */
+#define HW_ATL_MSM_REG_RD_STROBE_MSK 0x00000200
+/* inverted bitmask for bitfield register read strobe */
+#define HW_ATL_MSM_REG_RD_STROBE_MSKN 0xfffffdff
+/* lower bit position of bitfield register read strobe */
+#define HW_ATL_MSM_REG_RD_STROBE_SHIFT 9
+/* width of bitfield register read strobe */
+#define HW_ATL_MSM_REG_RD_STROBE_WIDTH 1
+/* default value of bitfield register read strobe */
+#define HW_ATL_MSM_REG_RD_STROBE_DEFAULT 0x0
+
+/* mac_phy msm register read data[31:0] bitfield definitions
+ * preprocessor definitions for the bitfield "msm register read data[31:0]".
+ * port="msm_pif_reg_rd_data_o[31:0]"
+ */
+
+/* register address for bitfield msm register read data[31:0] */
+#define HW_ATL_MSM_REG_RD_DATA_ADR 0x00004408
+/* bitmask for bitfield msm register read data[31:0] */
+#define HW_ATL_MSM_REG_RD_DATA_MSK 0xffffffff
+/* inverted bitmask for bitfield msm register read data[31:0] */
+#define HW_ATL_MSM_REG_RD_DATA_MSKN 0x00000000
+/* lower bit position of bitfield msm register read data[31:0] */
+#define HW_ATL_MSM_REG_RD_DATA_SHIFT 0
+/* width of bitfield msm register read data[31:0] */
+#define HW_ATL_MSM_REG_RD_DATA_WIDTH 32
+
+/* mac_phy msm register write data[31:0] bitfield definitions
+ * preprocessor definitions for the bitfield "msm register write data[31:0]".
+ * port="pif_msm_reg_wr_data_i[31:0]"
+ */
+
+/* register address for bitfield msm register write data[31:0] */
+#define HW_ATL_MSM_REG_WR_DATA_ADR 0x00004404
+/* bitmask for bitfield msm register write data[31:0] */
+#define HW_ATL_MSM_REG_WR_DATA_MSK 0xffffffff
+/* inverted bitmask for bitfield msm register write data[31:0] */
+#define HW_ATL_MSM_REG_WR_DATA_MSKN 0x00000000
+/* lower bit position of bitfield msm register write data[31:0] */
+#define HW_ATL_MSM_REG_WR_DATA_SHIFT 0
+/* width of bitfield msm register write data[31:0] */
+#define HW_ATL_MSM_REG_WR_DATA_WIDTH 32
+/* default value of bitfield msm register write data[31:0] */
+#define HW_ATL_MSM_REG_WR_DATA_DEFAULT 0x0
+
+/* mac_phy register write strobe bitfield definitions
+ * preprocessor definitions for the bitfield "register write strobe".
+ * port="pif_msm_reg_wren_i"
+ */
+
+/* register address for bitfield register write strobe */
+#define HW_ATL_MSM_REG_WR_STROBE_ADR 0x00004400
+/* bitmask for bitfield register write strobe */
+#define HW_ATL_MSM_REG_WR_STROBE_MSK 0x00000100
+/* inverted bitmask for bitfield register write strobe */
+#define HW_ATL_MSM_REG_WR_STROBE_MSKN 0xfffffeff
+/* lower bit position of bitfield register write strobe */
+#define HW_ATL_MSM_REG_WR_STROBE_SHIFT 8
+/* width of bitfield register write strobe */
+#define HW_ATL_MSM_REG_WR_STROBE_WIDTH 1
+/* default value of bitfield register write strobe */
+#define HW_ATL_MSM_REG_WR_STROBE_DEFAULT 0x0
+
+/* register address for bitfield PTP Digital Clock Read Enable */
+#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_ADR 0x00004628
+/* bitmask for bitfield PTP Digital Clock Read Enable */
+#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_MSK 0x00000010
+/* inverted bitmask for bitfield PTP Digital Clock Read Enable */
+#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_MSKN 0xFFFFFFEF
+/* lower bit position of bitfield PTP Digital Clock Read Enable */
+#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_SHIFT 4
+/* width of bitfield PTP Digital Clock Read Enable */
+#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_WIDTH 1
+/* default value of bitfield PTP Digital Clock Read Enable */
+#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_DEFAULT 0x0
+
+/* register address for ptp counter reading */
+#define HW_ATL_PCS_PTP_TS_VAL_ADDR(index) (0x00004900 + (index) * 0x4)
+
+/* mif soft reset bitfield definitions
+ * preprocessor definitions for the bitfield "soft reset".
+ * port="pif_glb_res_i"
+ */
+
+/* register address for bitfield soft reset */
+#define HW_ATL_GLB_SOFT_RES_ADR 0x00000000
+/* bitmask for bitfield soft reset */
+#define HW_ATL_GLB_SOFT_RES_MSK 0x00008000
+/* inverted bitmask for bitfield soft reset */
+#define HW_ATL_GLB_SOFT_RES_MSKN 0xffff7fff
+/* lower bit position of bitfield soft reset */
+#define HW_ATL_GLB_SOFT_RES_SHIFT 15
+/* width of bitfield soft reset */
+#define HW_ATL_GLB_SOFT_RES_WIDTH 1
+/* default value of bitfield soft reset */
+#define HW_ATL_GLB_SOFT_RES_DEFAULT 0x0
+
+/* mif register reset disable bitfield definitions
+ * preprocessor definitions for the bitfield "register reset disable".
+ * port="pif_glb_reg_res_dsbl_i"
+ */
+
+/* register address for bitfield register reset disable */
+#define HW_ATL_GLB_REG_RES_DIS_ADR 0x00000000
+/* bitmask for bitfield register reset disable */
+#define HW_ATL_GLB_REG_RES_DIS_MSK 0x00004000
+/* inverted bitmask for bitfield register reset disable */
+#define HW_ATL_GLB_REG_RES_DIS_MSKN 0xffffbfff
+/* lower bit position of bitfield register reset disable */
+#define HW_ATL_GLB_REG_RES_DIS_SHIFT 14
+/* width of bitfield register reset disable */
+#define HW_ATL_GLB_REG_RES_DIS_WIDTH 1
+/* default value of bitfield register reset disable */
+#define HW_ATL_GLB_REG_RES_DIS_DEFAULT 0x1
+
+/* tx dma debug control definitions */
+#define HW_ATL_TX_DMA_DEBUG_CTL_ADR 0x00008920u
+
+/* tx dma descriptor base address msw definitions */
+#define HW_ATL_TX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor) \
+ (0x00007c04u + (descriptor) * 0x40)
+
+/* tx dma total request limit */
+#define HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR 0x00007b20u
+
+/* tx interrupt moderation control register definitions
+ * Preprocessor definitions for TX Interrupt Moderation Control Register
+ * Base Address: 0x00008980
+ * Parameter: queue {Q} | stride size 0x4 | range [0, 31]
+ */
+
+#define HW_ATL_TX_INTR_MODERATION_CTL_ADR(queue) (0x00008980u + (queue) * 0x4)
+
+/* pcie reg_res_dsbl bitfield definitions
+ * preprocessor definitions for the bitfield "reg_res_dsbl".
+ * port="pif_pci_reg_res_dsbl_i"
+ */
+
+/* register address for bitfield reg_res_dsbl */
+#define HW_ATL_PCI_REG_RES_DSBL_ADR 0x00001000
+/* bitmask for bitfield reg_res_dsbl */
+#define HW_ATL_PCI_REG_RES_DSBL_MSK 0x20000000
+/* inverted bitmask for bitfield reg_res_dsbl */
+#define HW_ATL_PCI_REG_RES_DSBL_MSKN 0xdfffffff
+/* lower bit position of bitfield reg_res_dsbl */
+#define HW_ATL_PCI_REG_RES_DSBL_SHIFT 29
+/* width of bitfield reg_res_dsbl */
+#define HW_ATL_PCI_REG_RES_DSBL_WIDTH 1
+/* default value of bitfield reg_res_dsbl */
+#define HW_ATL_PCI_REG_RES_DSBL_DEFAULT 0x1
+
+/* PCI core control register */
+#define HW_ATL_PCI_REG_CONTROL6_ADR 0x1014u
+
+/* global microprocessor scratch pad definitions */
+#define HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp) \
+ (0x00000300u + (scratch_scp) * 0x4)
+
+/* register address for bitfield uP Force Interrupt */
+#define HW_ATL_MCP_UP_FORCE_INTERRUPT_ADR 0x00000404
+/* bitmask for bitfield uP Force Interrupt */
+#define HW_ATL_MCP_UP_FORCE_INTERRUPT_MSK 0x00000002
+/* inverted bitmask for bitfield uP Force Interrupt */
+#define HW_ATL_MCP_UP_FORCE_INTERRUPT_MSKN 0xFFFFFFFD
+/* lower bit position of bitfield uP Force Interrupt */
+#define HW_ATL_MCP_UP_FORCE_INTERRUPT_SHIFT 1
+/* width of bitfield uP Force Interrupt */
+#define HW_ATL_MCP_UP_FORCE_INTERRUPT_WIDTH 1
+/* default value of bitfield uP Force Interrupt */
+#define HW_ATL_MCP_UP_FORCE_INTERRUPT_DEFAULT 0x0
+
+/* Preprocessor definitions for Global MDIO Interfaces
+ * Address: 0x00000280 + 0x4 * Number of interface
+ */
+#define HW_ATL_GLB_MDIO_IFACE_ADDR_BEGIN 0x00000280u
+
+#define HW_ATL_GLB_MDIO_IFACE_N_ADR(number) \
+ (HW_ATL_GLB_MDIO_IFACE_ADDR_BEGIN + (((number) - 1) * 0x4))
+
+/* MIF MDIO Busy Bitfield Definitions
+ * Preprocessor definitions for the bitfield "MDIO Busy".
+ * PORT="mdio_pif_busy_o"
+ */
+
+/* Register address for bitfield MDIO Busy */
+#define HW_ATL_MDIO_BUSY_ADR 0x00000284
+/* Bitmask for bitfield MDIO Busy */
+#define HW_ATL_MDIO_BUSY_MSK 0x80000000
+/* Inverted bitmask for bitfield MDIO Busy */
+#define HW_ATL_MDIO_BUSY_MSKN 0x7FFFFFFF
+/* Lower bit position of bitfield MDIO Busy */
+#define HW_ATL_MDIO_BUSY_SHIFT 31
+/* Width of bitfield MDIO Busy */
+#define HW_ATL_MDIO_BUSY_WIDTH 1
+
+/* MIF MDIO Execute Operation Bitfield Definitions
+ * Preprocessor definitions for the bitfield "MDIO Execute Operation".
+ * PORT="pif_mdio_op_start_i"
+ */
+
+/* Register address for bitfield MDIO Execute Operation */
+#define HW_ATL_MDIO_EXECUTE_OPERATION_ADR 0x00000284
+/* Bitmask for bitfield MDIO Execute Operation */
+#define HW_ATL_MDIO_EXECUTE_OPERATION_MSK 0x00008000
+/* Inverted bitmask for bitfield MDIO Execute Operation */
+#define HW_ATL_MDIO_EXECUTE_OPERATION_MSKN 0xFFFF7FFF
+/* Lower bit position of bitfield MDIO Execute Operation */
+#define HW_ATL_MDIO_EXECUTE_OPERATION_SHIFT 15
+/* Width of bitfield MDIO Execute Operation */
+#define HW_ATL_MDIO_EXECUTE_OPERATION_WIDTH 1
+/* Default value of bitfield MDIO Execute Operation */
+#define HW_ATL_MDIO_EXECUTE_OPERATION_DEFAULT 0x0
+
+/* MIF Op Mode [1:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "Op Mode [1:0]".
+ * PORT="pif_mdio_mode_i[1:0]"
+ */
+
+/* Register address for bitfield Op Mode [1:0] */
+#define HW_ATL_MDIO_OP_MODE_ADR 0x00000284
+/* Bitmask for bitfield Op Mode [1:0] */
+#define HW_ATL_MDIO_OP_MODE_MSK 0x00003000
+/* Inverted bitmask for bitfield Op Mode [1:0] */
+#define HW_ATL_MDIO_OP_MODE_MSKN 0xFFFFCFFF
+/* Lower bit position of bitfield Op Mode [1:0] */
+#define HW_ATL_MDIO_OP_MODE_SHIFT 12
+/* Width of bitfield Op Mode [1:0] */
+#define HW_ATL_MDIO_OP_MODE_WIDTH 2
+/* Default value of bitfield Op Mode [1:0] */
+#define HW_ATL_MDIO_OP_MODE_DEFAULT 0x0
+
+/* MIF PHY address Bitfield Definitions
+ * Preprocessor definitions for the bitfield "PHY address".
+ * PORT="pif_mdio_phy_addr_i[9:0]"
+ */
+
+/* Register address for bitfield PHY address */
+#define HW_ATL_MDIO_PHY_ADDRESS_ADR 0x00000284
+/* Bitmask for bitfield PHY address */
+#define HW_ATL_MDIO_PHY_ADDRESS_MSK 0x000003FF
+/* Inverted bitmask for bitfield PHY address */
+#define HW_ATL_MDIO_PHY_ADDRESS_MSKN 0xFFFFFC00
+/* Lower bit position of bitfield PHY address */
+#define HW_ATL_MDIO_PHY_ADDRESS_SHIFT 0
+/* Width of bitfield PHY address */
+#define HW_ATL_MDIO_PHY_ADDRESS_WIDTH 10
+/* Default value of bitfield PHY address */
+#define HW_ATL_MDIO_PHY_ADDRESS_DEFAULT 0x0
+
+/* MIF MDIO WriteData [F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "MDIO WriteData [F:0]".
+ * PORT="pif_mdio_wdata_i[15:0]"
+ */
+
+/* Register address for bitfield MDIO WriteData [F:0] */
+#define HW_ATL_MDIO_WRITE_DATA_ADR 0x00000288
+/* Bitmask for bitfield MDIO WriteData [F:0] */
+#define HW_ATL_MDIO_WRITE_DATA_MSK 0x0000FFFF
+/* Inverted bitmask for bitfield MDIO WriteData [F:0] */
+#define HW_ATL_MDIO_WRITE_DATA_MSKN 0xFFFF0000
+/* Lower bit position of bitfield MDIO WriteData [F:0] */
+#define HW_ATL_MDIO_WRITE_DATA_SHIFT 0
+/* Width of bitfield MDIO WriteData [F:0] */
+#define HW_ATL_MDIO_WRITE_DATA_WIDTH 16
+/* Default value of bitfield MDIO WriteData [F:0] */
+#define HW_ATL_MDIO_WRITE_DATA_DEFAULT 0x0
+
+/* MIF MDIO Address [F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "MDIO Address [F:0]".
+ * PORT="pif_mdio_addr_i[15:0]"
+ */
+
+/* Register address for bitfield MDIO Address [F:0] */
+#define HW_ATL_MDIO_ADDRESS_ADR 0x0000028C
+/* Bitmask for bitfield MDIO Address [F:0] */
+#define HW_ATL_MDIO_ADDRESS_MSK 0x0000FFFF
+/* Inverted bitmask for bitfield MDIO Address [F:0] */
+#define HW_ATL_MDIO_ADDRESS_MSKN 0xFFFF0000
+/* Lower bit position of bitfield MDIO Address [F:0] */
+#define HW_ATL_MDIO_ADDRESS_SHIFT 0
+/* Width of bitfield MDIO Address [F:0] */
+#define HW_ATL_MDIO_ADDRESS_WIDTH 16
+/* Default value of bitfield MDIO Address [F:0] */
+#define HW_ATL_MDIO_ADDRESS_DEFAULT 0x0
+
+#define HW_ATL_MIF_RESET_TIMEOUT_ADR 0x00000348
+
+#define HW_ATL_FW_SM_MDIO 0x0U
+#define HW_ATL_FW_SM_RAM 0x2U
+#define HW_ATL_FW_SM_RESET1 0x3U
+#define HW_ATL_FW_SM_RESET2 0x4U
+
+#endif /* HW_ATL_LLH_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
new file mode 100644
index 000000000..7e88d7234
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -0,0 +1,1099 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+/* File hw_atl_utils.c: Definition of common functions for Atlantic hardware
+ * abstraction layer.
+ */
+
+#include "../aq_nic.h"
+#include "../aq_hw_utils.h"
+#include "hw_atl_utils.h"
+#include "hw_atl_llh.h"
+#include "hw_atl_llh_internal.h"
+
+#include <linux/random.h>
+
+#define HW_ATL_UCP_0X370_REG 0x0370U
+
+#define HW_ATL_MIF_CMD 0x0200U
+#define HW_ATL_MIF_ADDR 0x0208U
+#define HW_ATL_MIF_VAL 0x020CU
+
+#define HW_ATL_MPI_RPC_ADDR 0x0334U
+#define HW_ATL_RPC_CONTROL_ADR 0x0338U
+#define HW_ATL_RPC_STATE_ADR 0x033CU
+
+#define HW_ATL_MPI_FW_VERSION 0x18
+#define HW_ATL_MPI_CONTROL_ADR 0x0368U
+#define HW_ATL_MPI_STATE_ADR 0x036CU
+
+#define HW_ATL_MPI_STATE_MSK 0x00FFU
+#define HW_ATL_MPI_STATE_SHIFT 0U
+#define HW_ATL_MPI_SPEED_MSK 0x00FF0000U
+#define HW_ATL_MPI_SPEED_SHIFT 16U
+#define HW_ATL_MPI_DIRTY_WAKE_MSK 0x02000000U
+
+#define HW_ATL_MPI_DAISY_CHAIN_STATUS 0x704
+#define HW_ATL_MPI_BOOT_EXIT_CODE 0x388
+
+#define HW_ATL_MAC_PHY_CONTROL 0x4000
+#define HW_ATL_MAC_PHY_MPI_RESET_BIT 0x1D
+
+#define HW_ATL_FW_VER_1X 0x01050006U
+#define HW_ATL_FW_VER_2X 0x02000000U
+#define HW_ATL_FW_VER_3X 0x03000000U
+#define HW_ATL_FW_VER_4X 0x04000000U
+
+#define FORCE_FLASHLESS 0
+
+enum mcp_area {
+ MCP_AREA_CONFIG = 0x80000000,
+ MCP_AREA_SETTINGS = 0x20000000,
+};
+
+static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state);
+static u32 hw_atl_utils_get_mpi_mbox_tid(struct aq_hw_s *self);
+static u32 hw_atl_utils_mpi_get_state(struct aq_hw_s *self);
+static u32 hw_atl_utils_mif_cmd_get(struct aq_hw_s *self);
+static u32 hw_atl_utils_mif_addr_get(struct aq_hw_s *self);
+static u32 hw_atl_utils_rpc_state_get(struct aq_hw_s *self);
+static u32 aq_fw1x_rpc_get(struct aq_hw_s *self);
+
+int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
+{
+ int err = 0;
+
+ hw_atl_utils_hw_chip_features_init(self,
+ &self->chip_features);
+
+ self->fw_ver_actual = hw_atl_utils_get_fw_version(self);
+
+ if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X, self->fw_ver_actual)) {
+ *fw_ops = &aq_fw_1x_ops;
+ } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_2X, self->fw_ver_actual)) {
+ *fw_ops = &aq_fw_2x_ops;
+ } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_3X, self->fw_ver_actual)) {
+ *fw_ops = &aq_fw_2x_ops;
+ } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_4X, self->fw_ver_actual)) {
+ *fw_ops = &aq_fw_2x_ops;
+ } else {
+ aq_pr_err("Bad FW version detected: %x\n",
+ self->fw_ver_actual);
+ return -EOPNOTSUPP;
+ }
+ self->aq_fw_ops = *fw_ops;
+ err = self->aq_fw_ops->init(self);
+
+ return err;
+}
+
+static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)
+{
+ u32 gsr, val;
+ int k = 0;
+
+ aq_hw_write_reg(self, 0x404, 0x40e1);
+ AQ_HW_SLEEP(50);
+
+ /* Cleanup SPI */
+ val = aq_hw_read_reg(self, 0x53C);
+ aq_hw_write_reg(self, 0x53C, val | 0x10);
+
+ gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
+ aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000);
+
+ /* Kickstart MAC */
+ aq_hw_write_reg(self, 0x404, 0x80e0);
+ aq_hw_write_reg(self, 0x32a8, 0x0);
+ aq_hw_write_reg(self, 0x520, 0x1);
+
+ /* Reset SPI again because of possible interrupted SPI burst */
+ val = aq_hw_read_reg(self, 0x53C);
+ aq_hw_write_reg(self, 0x53C, val | 0x10);
+ AQ_HW_SLEEP(10);
+ /* Clear SPI reset state */
+ aq_hw_write_reg(self, 0x53C, val & ~0x10);
+
+ aq_hw_write_reg(self, 0x404, 0x180e0);
+
+ for (k = 0; k < 1000; k++) {
+ u32 flb_status = aq_hw_read_reg(self,
+ HW_ATL_MPI_DAISY_CHAIN_STATUS);
+
+ flb_status = flb_status & 0x10;
+ if (flb_status)
+ break;
+ AQ_HW_SLEEP(10);
+ }
+ if (k == 1000) {
+ aq_pr_err("MAC kickstart failed\n");
+ return -EIO;
+ }
+
+ /* FW reset */
+ aq_hw_write_reg(self, 0x404, 0x80e0);
+ AQ_HW_SLEEP(50);
+ aq_hw_write_reg(self, 0x3a0, 0x1);
+
+ /* Kickstart PHY - skipped */
+
+ /* Global software reset*/
+ hw_atl_rx_rx_reg_res_dis_set(self, 0U);
+ hw_atl_tx_tx_reg_res_dis_set(self, 0U);
+ aq_hw_write_reg_bit(self, HW_ATL_MAC_PHY_CONTROL,
+ BIT(HW_ATL_MAC_PHY_MPI_RESET_BIT),
+ HW_ATL_MAC_PHY_MPI_RESET_BIT, 0x0);
+ gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
+ aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000);
+
+ for (k = 0; k < 1000; k++) {
+ u32 fw_state = aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
+
+ if (fw_state)
+ break;
+ AQ_HW_SLEEP(10);
+ }
+ if (k == 1000) {
+ aq_pr_err("FW kickstart failed\n");
+ return -EIO;
+ }
+ /* Old FW requires fixed delay after init */
+ AQ_HW_SLEEP(15);
+
+ return 0;
+}
+
+static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
+{
+ u32 gsr, val, rbl_status;
+ int k;
+
+ aq_hw_write_reg(self, 0x404, 0x40e1);
+ aq_hw_write_reg(self, 0x3a0, 0x1);
+ aq_hw_write_reg(self, 0x32a8, 0x0);
+
+ /* Alter RBL status */
+ aq_hw_write_reg(self, 0x388, 0xDEAD);
+
+ /* Cleanup SPI */
+ val = aq_hw_read_reg(self, 0x53C);
+ aq_hw_write_reg(self, 0x53C, val | 0x10);
+
+ /* Global software reset*/
+ hw_atl_rx_rx_reg_res_dis_set(self, 0U);
+ hw_atl_tx_tx_reg_res_dis_set(self, 0U);
+ aq_hw_write_reg_bit(self, HW_ATL_MAC_PHY_CONTROL,
+ BIT(HW_ATL_MAC_PHY_MPI_RESET_BIT),
+ HW_ATL_MAC_PHY_MPI_RESET_BIT, 0x0);
+ gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
+ aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR,
+ (gsr & 0xFFFFBFFF) | 0x8000);
+
+ if (FORCE_FLASHLESS)
+ aq_hw_write_reg(self, 0x534, 0x0);
+
+ aq_hw_write_reg(self, 0x404, 0x40e0);
+
+ /* Wait for RBL boot */
+ for (k = 0; k < 1000; k++) {
+ rbl_status = aq_hw_read_reg(self, 0x388) & 0xFFFF;
+ if (rbl_status && rbl_status != 0xDEAD)
+ break;
+ AQ_HW_SLEEP(10);
+ }
+ if (!rbl_status || rbl_status == 0xDEAD) {
+ aq_pr_err("RBL Restart failed");
+ return -EIO;
+ }
+
+ /* Restore NVR */
+ if (FORCE_FLASHLESS)
+ aq_hw_write_reg(self, 0x534, 0xA0);
+
+ if (rbl_status == 0xF1A7) {
+ aq_pr_err("No FW detected. Dynamic FW load not implemented\n");
+ return -EOPNOTSUPP;
+ }
+
+ for (k = 0; k < 1000; k++) {
+ u32 fw_state = aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
+
+ if (fw_state)
+ break;
+ AQ_HW_SLEEP(10);
+ }
+ if (k == 1000) {
+ aq_pr_err("FW kickstart failed\n");
+ return -EIO;
+ }
+ /* Old FW requires fixed delay after init */
+ AQ_HW_SLEEP(15);
+
+ return 0;
+}
+
+int hw_atl_utils_soft_reset(struct aq_hw_s *self)
+{
+ int ver = hw_atl_utils_get_fw_version(self);
+ u32 boot_exit_code = 0;
+ u32 val;
+ int k;
+
+ for (k = 0; k < 1000; ++k) {
+ u32 flb_status = aq_hw_read_reg(self,
+ HW_ATL_MPI_DAISY_CHAIN_STATUS);
+ boot_exit_code = aq_hw_read_reg(self,
+ HW_ATL_MPI_BOOT_EXIT_CODE);
+ if (flb_status != 0x06000000 || boot_exit_code != 0)
+ break;
+ }
+
+ if (k == 1000) {
+ aq_pr_err("Neither RBL nor FLB firmware started\n");
+ return -EOPNOTSUPP;
+ }
+
+ self->rbl_enabled = (boot_exit_code != 0);
+
+ if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X, ver)) {
+ int err = 0;
+
+ /* FW 1.x may bootup in an invalid POWER state (WOL feature).
+ * We should work around this by forcing its state back to DEINIT
+ */
+ hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
+ err = readx_poll_timeout_atomic(hw_atl_utils_mpi_get_state,
+ self, val,
+ (val & HW_ATL_MPI_STATE_MSK) ==
+ MPI_DEINIT,
+ 10, 10000U);
+ if (err)
+ return err;
+ } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_4X, ver)) {
+ u64 sem_timeout = aq_hw_read_reg(self, HW_ATL_MIF_RESET_TIMEOUT_ADR);
+
+ /* Acquire 2 semaphores before issuing reset for FW 4.x */
+ if (sem_timeout > 3000)
+ sem_timeout = 3000;
+ sem_timeout = sem_timeout * 1000;
+
+ if (sem_timeout != 0) {
+ int err;
+
+ err = readx_poll_timeout_atomic(hw_atl_sem_reset1_get, self, val,
+ val == 1U, 1U, sem_timeout);
+ if (err)
+ aq_pr_err("reset sema1 timeout");
+
+ err = readx_poll_timeout_atomic(hw_atl_sem_reset2_get, self, val,
+ val == 1U, 1U, sem_timeout);
+ if (err)
+ aq_pr_err("reset sema2 timeout");
+ }
+ }
+
+ if (self->rbl_enabled)
+ return hw_atl_utils_soft_reset_rbl(self);
+ else
+ return hw_atl_utils_soft_reset_flb(self);
+}
+
+int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
+ u32 *p, u32 cnt)
+{
+ int err = 0;
+ u32 val;
+
+ err = readx_poll_timeout_atomic(hw_atl_sem_ram_get,
+ self, val, val == 1U,
+ 1U, 10000U);
+
+ if (err < 0) {
+ bool is_locked;
+
+ hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+ is_locked = hw_atl_sem_ram_get(self);
+ if (!is_locked) {
+ err = -ETIME;
+ goto err_exit;
+ }
+ }
+
+ aq_hw_write_reg(self, HW_ATL_MIF_ADDR, a);
+
+ for (++cnt; --cnt && !err;) {
+ aq_hw_write_reg(self, HW_ATL_MIF_CMD, 0x00008000U);
+
+ if (ATL_HW_IS_CHIP_FEATURE(self, REVISION_B1))
+ err = readx_poll_timeout_atomic(hw_atl_utils_mif_addr_get,
+ self, val, val != a,
+ 1U, 1000U);
+ else
+ err = readx_poll_timeout_atomic(hw_atl_utils_mif_cmd_get,
+ self, val,
+ !(val & 0x100),
+ 1U, 1000U);
+
+ *(p++) = aq_hw_read_reg(self, HW_ATL_MIF_VAL);
+ a += 4;
+ }
+
+ hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_utils_write_b1_mbox(struct aq_hw_s *self, u32 addr,
+ u32 *p, u32 cnt, enum mcp_area area)
+{
+ u32 data_offset = 0;
+ u32 offset = addr;
+ int err = 0;
+ u32 val;
+
+ switch (area) {
+ case MCP_AREA_CONFIG:
+ offset -= self->rpc_addr;
+ break;
+
+ case MCP_AREA_SETTINGS:
+ offset -= self->settings_addr;
+ break;
+ }
+
+ offset = offset / sizeof(u32);
+
+ for (; data_offset < cnt; ++data_offset, ++offset) {
+ aq_hw_write_reg(self, 0x328, p[data_offset]);
+ aq_hw_write_reg(self, 0x32C,
+ (area | (0xFFFF & (offset * 4))));
+ hw_atl_mcp_up_force_intr_set(self, 1);
+ /* 1000 times by 10us = 10ms */
+ err = readx_poll_timeout_atomic(hw_atl_scrpad12_get,
+ self, val,
+ (val & 0xF0000000) !=
+ area,
+ 10U, 10000U);
+
+ if (err < 0)
+ break;
+ }
+
+ return err;
+}
+
+static int hw_atl_utils_write_b0_mbox(struct aq_hw_s *self, u32 addr,
+ u32 *p, u32 cnt)
+{
+ u32 offset = 0;
+ int err = 0;
+ u32 val;
+
+ aq_hw_write_reg(self, 0x208, addr);
+
+ for (; offset < cnt; ++offset) {
+ aq_hw_write_reg(self, 0x20C, p[offset]);
+ aq_hw_write_reg(self, 0x200, 0xC000);
+
+ err = readx_poll_timeout_atomic(hw_atl_utils_mif_cmd_get,
+ self, val,
+ (val & 0x100) == 0U,
+ 10U, 10000U);
+
+ if (err < 0)
+ break;
+ }
+
+ return err;
+}
+
+static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 addr, u32 *p,
+ u32 cnt, enum mcp_area area)
+{
+ int err = 0;
+ u32 val;
+
+ err = readx_poll_timeout_atomic(hw_atl_sem_ram_get, self,
+ val, val == 1U,
+ 10U, 100000U);
+ if (err < 0)
+ goto err_exit;
+
+ if (ATL_HW_IS_CHIP_FEATURE(self, REVISION_B1))
+ err = hw_atl_utils_write_b1_mbox(self, addr, p, cnt, area);
+ else
+ err = hw_atl_utils_write_b0_mbox(self, addr, p, cnt);
+
+ hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+int hw_atl_write_fwcfg_dwords(struct aq_hw_s *self, u32 *p, u32 cnt)
+{
+ return hw_atl_utils_fw_upload_dwords(self, self->rpc_addr, p,
+ cnt, MCP_AREA_CONFIG);
+}
+
+int hw_atl_write_fwsettings_dwords(struct aq_hw_s *self, u32 offset, u32 *p,
+ u32 cnt)
+{
+ return hw_atl_utils_fw_upload_dwords(self, self->settings_addr + offset,
+ p, cnt, MCP_AREA_SETTINGS);
+}
+
+bool hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual)
+{
+ const u32 dw_major_mask = 0xff000000U;
+ const u32 dw_minor_mask = 0x00ffffffU;
+ bool ver_match;
+
+ ver_match = (dw_major_mask & (ver_expected ^ ver_actual)) ? false : true;
+ if (!ver_match)
+ goto err_exit;
+ ver_match = ((dw_minor_mask & ver_expected) > (dw_minor_mask & ver_actual)) ?
+ false : true;
+
+err_exit:
+ return ver_match;
+}
+
+static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
+ const struct aq_hw_caps_s *aq_hw_caps)
+{
+ int err = 0;
+
+ if (!aq_hw_read_reg(self, 0x370U)) {
+ unsigned int rnd = 0U;
+ unsigned int ucp_0x370 = 0U;
+
+ get_random_bytes(&rnd, sizeof(unsigned int));
+
+ ucp_0x370 = 0x02020202U | (0xFEFEFEFEU & rnd);
+ aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
+ }
+
+ hw_atl_reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U);
+
+ /* check 10 times by 1ms */
+ err = readx_poll_timeout_atomic(hw_atl_scrpad25_get,
+ self, self->mbox_addr,
+ self->mbox_addr != 0U,
+ 1000U, 10000U);
+ err = readx_poll_timeout_atomic(aq_fw1x_rpc_get, self,
+ self->rpc_addr,
+ self->rpc_addr != 0U,
+ 1000U, 100000U);
+
+ return err;
+}
+
+struct aq_hw_atl_utils_fw_rpc_tid_s {
+ union {
+ u32 val;
+ struct {
+ u16 tid;
+ u16 len;
+ };
+ };
+};
+
+#define hw_atl_utils_fw_rpc_init(_H_) hw_atl_utils_fw_rpc_wait(_H_, NULL)
+
+int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
+{
+ struct aq_hw_atl_utils_fw_rpc_tid_s sw;
+ int err = 0;
+
+ if (!ATL_HW_IS_CHIP_FEATURE(self, MIPS)) {
+ err = -1;
+ goto err_exit;
+ }
+ err = hw_atl_write_fwcfg_dwords(self, (u32 *)(void *)&self->rpc,
+ (rpc_size + sizeof(u32) -
+ sizeof(u8)) / sizeof(u32));
+ if (err < 0)
+ goto err_exit;
+
+ sw.tid = 0xFFFFU & (++self->rpc_tid);
+ sw.len = (u16)rpc_size;
+ aq_hw_write_reg(self, HW_ATL_RPC_CONTROL_ADR, sw.val);
+
+err_exit:
+ return err;
+}
+
+int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
+ struct hw_atl_utils_fw_rpc **rpc)
+{
+ struct aq_hw_atl_utils_fw_rpc_tid_s sw;
+ struct aq_hw_atl_utils_fw_rpc_tid_s fw;
+ int err = 0;
+
+ do {
+ sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR);
+
+ self->rpc_tid = sw.tid;
+
+ err = readx_poll_timeout_atomic(hw_atl_utils_rpc_state_get,
+ self, fw.val,
+ sw.tid == fw.tid,
+ 1000U, 100000U);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_hw_err_from_flags(self);
+ if (err < 0)
+ goto err_exit;
+
+ if (fw.len == 0xFFFFU) {
+ if (sw.len > sizeof(self->rpc)) {
+ printk(KERN_INFO "Invalid sw len: %x\n", sw.len);
+ err = -EINVAL;
+ goto err_exit;
+ }
+ err = hw_atl_utils_fw_rpc_call(self, sw.len);
+ if (err < 0)
+ goto err_exit;
+ }
+ } while (sw.tid != fw.tid || 0xFFFFU == fw.len);
+
+ if (rpc) {
+ if (fw.len) {
+ if (fw.len > sizeof(self->rpc)) {
+ printk(KERN_INFO "Invalid fw len: %x\n", fw.len);
+ err = -EINVAL;
+ goto err_exit;
+ }
+ err =
+ hw_atl_utils_fw_downld_dwords(self,
+ self->rpc_addr,
+ (u32 *)(void *)
+ &self->rpc,
+ (fw.len + sizeof(u32) -
+ sizeof(u8)) /
+ sizeof(u32));
+ if (err < 0)
+ goto err_exit;
+ }
+
+ *rpc = &self->rpc;
+ }
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_utils_mpi_create(struct aq_hw_s *self)
+{
+ int err = 0;
+
+ err = hw_atl_utils_init_ucp(self, self->aq_nic_cfg->aq_hw_caps);
+ if (err < 0)
+ goto err_exit;
+
+ err = hw_atl_utils_fw_rpc_init(self);
+ if (err < 0)
+ goto err_exit;
+
+err_exit:
+ return err;
+}
+
+int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
+ struct hw_atl_utils_mbox_header *pmbox)
+{
+ return hw_atl_utils_fw_downld_dwords(self,
+ self->mbox_addr,
+ (u32 *)(void *)pmbox,
+ sizeof(*pmbox) / sizeof(u32));
+}
+
+void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
+ struct hw_atl_utils_mbox *pmbox)
+{
+ int err = 0;
+
+ err = hw_atl_utils_fw_downld_dwords(self,
+ self->mbox_addr,
+ (u32 *)(void *)pmbox,
+ sizeof(*pmbox) / sizeof(u32));
+ if (err < 0)
+ goto err_exit;
+
+ if (ATL_HW_IS_CHIP_FEATURE(self, REVISION_A0)) {
+ unsigned int mtu = self->aq_nic_cfg ?
+ self->aq_nic_cfg->mtu : 1514U;
+ pmbox->stats.ubrc = pmbox->stats.uprc * mtu;
+ pmbox->stats.ubtc = pmbox->stats.uptc * mtu;
+ pmbox->stats.dpc = atomic_read(&self->dpc);
+ } else {
+ pmbox->stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
+ }
+
+err_exit:;
+}
+
+static int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed)
+{
+ u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
+
+ val = val & ~HW_ATL_MPI_SPEED_MSK;
+ val |= speed << HW_ATL_MPI_SPEED_SHIFT;
+ aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
+
+ return 0;
+}
+
+static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state)
+{
+ u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
+ struct hw_atl_utils_mbox_header mbox;
+ u32 transaction_id = 0;
+ int err = 0;
+
+ if (state == MPI_RESET) {
+ hw_atl_utils_mpi_read_mbox(self, &mbox);
+
+ transaction_id = mbox.transaction_id;
+
+ err = readx_poll_timeout_atomic(hw_atl_utils_get_mpi_mbox_tid,
+ self, mbox.transaction_id,
+ transaction_id !=
+ mbox.transaction_id,
+ 1000U, 100000U);
+ if (err < 0)
+ goto err_exit;
+ }
+ /* On interface DEINIT we disable DW (raise bit)
+ * Otherwise enable DW (clear bit)
+ */
+ if (state == MPI_DEINIT || state == MPI_POWER)
+ val |= HW_ATL_MPI_DIRTY_WAKE_MSK;
+ else
+ val &= ~HW_ATL_MPI_DIRTY_WAKE_MSK;
+
+ /* Set new state bits */
+ val = val & ~HW_ATL_MPI_STATE_MSK;
+ val |= state & HW_ATL_MPI_STATE_MSK;
+
+ aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
+
+err_exit:
+ return err;
+}
+
+int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
+{
+ struct aq_hw_link_status_s *link_status = &self->aq_link_status;
+ u32 mpi_state;
+ u32 speed;
+
+ mpi_state = hw_atl_utils_mpi_get_state(self);
+ speed = mpi_state >> HW_ATL_MPI_SPEED_SHIFT;
+
+ if (!speed) {
+ link_status->mbps = 0U;
+ } else {
+ switch (speed) {
+ case HAL_ATLANTIC_RATE_10G:
+ link_status->mbps = 10000U;
+ break;
+
+ case HAL_ATLANTIC_RATE_5G:
+ case HAL_ATLANTIC_RATE_5GSR:
+ link_status->mbps = 5000U;
+ break;
+
+ case HAL_ATLANTIC_RATE_2G5:
+ link_status->mbps = 2500U;
+ break;
+
+ case HAL_ATLANTIC_RATE_1G:
+ link_status->mbps = 1000U;
+ break;
+
+ case HAL_ATLANTIC_RATE_100M:
+ link_status->mbps = 100U;
+ break;
+
+ default:
+ return -EBUSY;
+ }
+ }
+ link_status->full_duplex = true;
+
+ return 0;
+}
+
+int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
+ u8 *mac)
+{
+ u32 mac_addr[2];
+ u32 efuse_addr;
+ int err = 0;
+ u32 h = 0U;
+ u32 l = 0U;
+
+ if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) {
+ unsigned int ucp_0x370 = 0;
+ unsigned int rnd = 0;
+
+ get_random_bytes(&rnd, sizeof(unsigned int));
+
+ ucp_0x370 = 0x02020202 | (0xFEFEFEFE & rnd);
+ aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
+ }
+
+ efuse_addr = aq_hw_read_reg(self, 0x00000374U);
+
+ err = hw_atl_utils_fw_downld_dwords(self, efuse_addr + (40U * 4U),
+ mac_addr, ARRAY_SIZE(mac_addr));
+ if (err < 0) {
+ mac_addr[0] = 0U;
+ mac_addr[1] = 0U;
+ err = 0;
+ } else {
+ mac_addr[0] = __swab32(mac_addr[0]);
+ mac_addr[1] = __swab32(mac_addr[1]);
+ }
+
+ ether_addr_copy(mac, (u8 *)mac_addr);
+
+ if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) {
+ /* chip revision */
+ l = 0xE3000000U |
+ (0xFFFFU & aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) |
+ (0x00 << 16);
+ h = 0x8001300EU;
+
+ mac[5] = (u8)(0xFFU & l);
+ l >>= 8;
+ mac[4] = (u8)(0xFFU & l);
+ l >>= 8;
+ mac[3] = (u8)(0xFFU & l);
+ l >>= 8;
+ mac[2] = (u8)(0xFFU & l);
+ mac[1] = (u8)(0xFFU & h);
+ h >>= 8;
+ mac[0] = (u8)(0xFFU & h);
+ }
+
+ return err;
+}
+
+unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps)
+{
+ unsigned int ret = 0U;
+
+ switch (mbps) {
+ case 100U:
+ ret = 5U;
+ break;
+
+ case 1000U:
+ ret = 4U;
+ break;
+
+ case 2500U:
+ ret = 3U;
+ break;
+
+ case 5000U:
+ ret = 1U;
+ break;
+
+ case 10000U:
+ ret = 0U;
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
+{
+ u32 val = hw_atl_reg_glb_mif_id_get(self);
+ u32 mif_rev = val & 0xFFU;
+ u32 chip_features = 0U;
+
+ chip_features |= ATL_HW_CHIP_ATLANTIC;
+
+ if ((0xFU & mif_rev) == 1U) {
+ chip_features |= ATL_HW_CHIP_REVISION_A0 |
+ ATL_HW_CHIP_MPI_AQ |
+ ATL_HW_CHIP_MIPS;
+ } else if ((0xFU & mif_rev) == 2U) {
+ chip_features |= ATL_HW_CHIP_REVISION_B0 |
+ ATL_HW_CHIP_MPI_AQ |
+ ATL_HW_CHIP_MIPS |
+ ATL_HW_CHIP_TPO2 |
+ ATL_HW_CHIP_RPF2;
+ } else if ((0xFU & mif_rev) == 0xAU) {
+ chip_features |= ATL_HW_CHIP_REVISION_B1 |
+ ATL_HW_CHIP_MPI_AQ |
+ ATL_HW_CHIP_MIPS |
+ ATL_HW_CHIP_TPO2 |
+ ATL_HW_CHIP_RPF2;
+ }
+
+ *p = chip_features;
+}
+
+static int hw_atl_fw1x_deinit(struct aq_hw_s *self)
+{
+ hw_atl_utils_mpi_set_speed(self, 0);
+ hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
+
+ return 0;
+}
+
+int hw_atl_utils_update_stats(struct aq_hw_s *self)
+{
+ struct aq_stats_s *cs = &self->curr_stats;
+ struct aq_stats_s curr_stats = *cs;
+ struct hw_atl_utils_mbox mbox;
+ bool corrupted_stats = false;
+
+ hw_atl_utils_mpi_read_stats(self, &mbox);
+
+#define AQ_SDELTA(_N_) \
+do { \
+ if (!corrupted_stats && \
+ ((s64)(mbox.stats._N_ - self->last_stats._N_)) >= 0) \
+ curr_stats._N_ += mbox.stats._N_ - self->last_stats._N_; \
+ else \
+ corrupted_stats = true; \
+} while (0)
+
+ if (self->aq_link_status.mbps) {
+ AQ_SDELTA(uprc);
+ AQ_SDELTA(mprc);
+ AQ_SDELTA(bprc);
+ AQ_SDELTA(erpt);
+
+ AQ_SDELTA(uptc);
+ AQ_SDELTA(mptc);
+ AQ_SDELTA(bptc);
+ AQ_SDELTA(erpr);
+
+ AQ_SDELTA(ubrc);
+ AQ_SDELTA(ubtc);
+ AQ_SDELTA(mbrc);
+ AQ_SDELTA(mbtc);
+ AQ_SDELTA(bbrc);
+ AQ_SDELTA(bbtc);
+ AQ_SDELTA(dpc);
+
+ if (!corrupted_stats)
+ *cs = curr_stats;
+ }
+#undef AQ_SDELTA
+
+ cs->dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counter_get(self);
+ cs->dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counter_get(self);
+ cs->dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counter_get(self);
+ cs->dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counter_get(self);
+
+ memcpy(&self->last_stats, &mbox.stats, sizeof(mbox.stats));
+
+ return 0;
+}
+
+struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self)
+{
+ return &self->curr_stats;
+}
+
+static const u32 hw_atl_utils_hw_mac_regs[] = {
+ 0x00005580U, 0x00005590U, 0x000055B0U, 0x000055B4U,
+ 0x000055C0U, 0x00005B00U, 0x00005B04U, 0x00005B08U,
+ 0x00005B0CU, 0x00005B10U, 0x00005B14U, 0x00005B18U,
+ 0x00005B1CU, 0x00005B20U, 0x00005B24U, 0x00005B28U,
+ 0x00005B2CU, 0x00005B30U, 0x00005B34U, 0x00005B38U,
+ 0x00005B3CU, 0x00005B40U, 0x00005B44U, 0x00005B48U,
+ 0x00005B4CU, 0x00005B50U, 0x00005B54U, 0x00005B58U,
+ 0x00005B5CU, 0x00005B60U, 0x00005B64U, 0x00005B68U,
+ 0x00005B6CU, 0x00005B70U, 0x00005B74U, 0x00005B78U,
+ 0x00005B7CU, 0x00007C00U, 0x00007C04U, 0x00007C08U,
+ 0x00007C0CU, 0x00007C10U, 0x00007C14U, 0x00007C18U,
+ 0x00007C1CU, 0x00007C20U, 0x00007C40U, 0x00007C44U,
+ 0x00007C48U, 0x00007C4CU, 0x00007C50U, 0x00007C54U,
+ 0x00007C58U, 0x00007C5CU, 0x00007C60U, 0x00007C80U,
+ 0x00007C84U, 0x00007C88U, 0x00007C8CU, 0x00007C90U,
+ 0x00007C94U, 0x00007C98U, 0x00007C9CU, 0x00007CA0U,
+ 0x00007CC0U, 0x00007CC4U, 0x00007CC8U, 0x00007CCCU,
+ 0x00007CD0U, 0x00007CD4U, 0x00007CD8U, 0x00007CDCU,
+ 0x00007CE0U, 0x00000300U, 0x00000304U, 0x00000308U,
+ 0x0000030cU, 0x00000310U, 0x00000314U, 0x00000318U,
+ 0x0000031cU, 0x00000360U, 0x00000364U, 0x00000368U,
+ 0x0000036cU, 0x00000370U, 0x00000374U, 0x00006900U,
+};
+
+int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
+ const struct aq_hw_caps_s *aq_hw_caps,
+ u32 *regs_buff)
+{
+ unsigned int i = 0U;
+
+ for (i = 0; i < aq_hw_caps->mac_regs_count; i++)
+ regs_buff[i] = aq_hw_read_reg(self,
+ hw_atl_utils_hw_mac_regs[i]);
+
+ return 0;
+}
+
+u32 hw_atl_utils_get_fw_version(struct aq_hw_s *self)
+{
+ return aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
+}
+
+static int aq_fw1x_set_wake_magic(struct aq_hw_s *self, bool wol_enabled,
+ const u8 *mac)
+{
+ struct hw_atl_utils_fw_rpc *prpc = NULL;
+ unsigned int rpc_size = 0U;
+ int err = 0;
+
+ err = hw_atl_utils_fw_rpc_wait(self, &prpc);
+ if (err < 0)
+ goto err_exit;
+
+ memset(prpc, 0, sizeof(*prpc));
+
+ if (wol_enabled) {
+ rpc_size = offsetof(struct hw_atl_utils_fw_rpc, msg_wol_add) +
+ sizeof(prpc->msg_wol_add);
+
+
+ prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD;
+ prpc->msg_wol_add.priority =
+ HAL_ATLANTIC_UTILS_FW_MSG_WOL_PRIOR;
+ prpc->msg_wol_add.pattern_id =
+ HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN;
+ prpc->msg_wol_add.packet_type =
+ HAL_ATLANTIC_UTILS_FW_MSG_WOL_MAG_PKT;
+
+ ether_addr_copy((u8 *)&prpc->msg_wol_add.magic_packet_pattern,
+ mac);
+ } else {
+ rpc_size = sizeof(prpc->msg_wol_remove) +
+ offsetof(struct hw_atl_utils_fw_rpc, msg_wol_remove);
+
+ prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL;
+ prpc->msg_wol_add.pattern_id =
+ HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN;
+ }
+
+ err = hw_atl_utils_fw_rpc_call(self, rpc_size);
+
+err_exit:
+ return err;
+}
+
+static int aq_fw1x_set_power(struct aq_hw_s *self, unsigned int power_state,
+ const u8 *mac)
+{
+ struct hw_atl_utils_fw_rpc *prpc = NULL;
+ unsigned int rpc_size = 0U;
+ int err = 0;
+
+ if (self->aq_nic_cfg->wol & WAKE_MAGIC) {
+ err = aq_fw1x_set_wake_magic(self, 1, mac);
+
+ if (err < 0)
+ goto err_exit;
+
+ rpc_size = sizeof(prpc->msg_id) +
+ sizeof(prpc->msg_enable_wakeup);
+
+ err = hw_atl_utils_fw_rpc_wait(self, &prpc);
+
+ if (err < 0)
+ goto err_exit;
+
+ memset(prpc, 0, rpc_size);
+
+ prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_ENABLE_WAKEUP;
+ prpc->msg_enable_wakeup.pattern_mask = 0x00000002;
+
+ err = hw_atl_utils_fw_rpc_call(self, rpc_size);
+ if (err < 0)
+ goto err_exit;
+ }
+ hw_atl_utils_mpi_set_speed(self, 0);
+ hw_atl_utils_mpi_set_state(self, MPI_POWER);
+
+err_exit:
+ return err;
+}
+
+static u32 hw_atl_utils_get_mpi_mbox_tid(struct aq_hw_s *self)
+{
+ struct hw_atl_utils_mbox_header mbox;
+
+ hw_atl_utils_mpi_read_mbox(self, &mbox);
+
+ return mbox.transaction_id;
+}
+
+static u32 hw_atl_utils_mpi_get_state(struct aq_hw_s *self)
+{
+ return aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR);
+}
+
+static u32 hw_atl_utils_mif_cmd_get(struct aq_hw_s *self)
+{
+ return aq_hw_read_reg(self, HW_ATL_MIF_CMD);
+}
+
+static u32 hw_atl_utils_mif_addr_get(struct aq_hw_s *self)
+{
+ return aq_hw_read_reg(self, HW_ATL_MIF_ADDR);
+}
+
+static u32 hw_atl_utils_rpc_state_get(struct aq_hw_s *self)
+{
+ return aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR);
+}
+
+static u32 aq_fw1x_rpc_get(struct aq_hw_s *self)
+{
+ return aq_hw_read_reg(self, HW_ATL_MPI_RPC_ADDR);
+}
+
+const struct aq_fw_ops aq_fw_1x_ops = {
+ .init = hw_atl_utils_mpi_create,
+ .deinit = hw_atl_fw1x_deinit,
+ .reset = NULL,
+ .get_mac_permanent = hw_atl_utils_get_mac_permanent,
+ .set_link_speed = hw_atl_utils_mpi_set_speed,
+ .set_state = hw_atl_utils_mpi_set_state,
+ .update_link_status = hw_atl_utils_mpi_get_link_status,
+ .update_stats = hw_atl_utils_update_stats,
+ .get_mac_temp = NULL,
+ .get_phy_temp = NULL,
+ .set_power = aq_fw1x_set_power,
+ .set_eee_rate = NULL,
+ .get_eee_rate = NULL,
+ .set_flow_control = NULL,
+ .send_fw_request = NULL,
+ .enable_ptp = NULL,
+ .led_control = NULL,
+};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
new file mode 100644
index 000000000..f5901f8e3
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -0,0 +1,643 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+/* File hw_atl_utils.h: Declaration of common functions for Atlantic hardware
+ * abstraction layer.
+ */
+
+#ifndef HW_ATL_UTILS_H
+#define HW_ATL_UTILS_H
+
+#define HW_ATL_FLUSH() { (void)aq_hw_read_reg(self, 0x10); }
+
+/* Hardware tx descriptor */
+struct __packed hw_atl_txd_s {
+ u64 buf_addr;
+ u32 ctl;
+ u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
+};
+
+/* Hardware tx context descriptor */
+struct __packed hw_atl_txc_s {
+ u32 rsvd;
+ u32 len;
+ u32 ctl;
+ u32 len2;
+};
+
+/* Hardware rx descriptor */
+struct __packed hw_atl_rxd_s {
+ u64 buf_addr;
+ u64 hdr_addr;
+};
+
+/* Hardware rx descriptor writeback */
+struct __packed hw_atl_rxd_wb_s {
+ u32 type;
+ u32 rss_hash;
+ u16 status;
+ u16 pkt_len;
+ u16 next_desc_ptr;
+ __le16 vlan;
+};
+
+/* Hardware rx HW TIMESTAMP writeback */
+struct __packed hw_atl_rxd_hwts_wb_s {
+ u32 sec_hw;
+ u32 ns;
+ u32 sec_lw0;
+ u32 sec_lw1;
+};
+
+struct __packed hw_atl_stats_s {
+ u32 uprc;
+ u32 mprc;
+ u32 bprc;
+ u32 erpt;
+ u32 uptc;
+ u32 mptc;
+ u32 bptc;
+ u32 erpr;
+ u32 mbtc;
+ u32 bbtc;
+ u32 mbrc;
+ u32 bbrc;
+ u32 ubrc;
+ u32 ubtc;
+ u32 dpc;
+};
+
+struct __packed drv_msg_enable_wakeup {
+ union {
+ u32 pattern_mask;
+
+ struct {
+ u32 reason_arp_v4_pkt : 1;
+ u32 reason_ipv4_ping_pkt : 1;
+ u32 reason_ipv6_ns_pkt : 1;
+ u32 reason_ipv6_ping_pkt : 1;
+ u32 reason_link_up : 1;
+ u32 reason_link_down : 1;
+ u32 reason_maximum : 1;
+ };
+ };
+
+ union {
+ u32 offload_mask;
+ };
+};
+
+struct __packed magic_packet_pattern_s {
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct __packed drv_msg_wol_add {
+ u32 priority;
+ u32 packet_type;
+ u32 pattern_id;
+ u32 next_pattern_offset;
+
+ struct magic_packet_pattern_s magic_packet_pattern;
+};
+
+struct __packed drv_msg_wol_remove {
+ u32 id;
+};
+
+struct __packed hw_atl_utils_mbox_header {
+ u32 version;
+ u32 transaction_id;
+ u32 error;
+};
+
+struct __packed hw_atl_ptp_offset {
+ u16 ingress_100;
+ u16 egress_100;
+ u16 ingress_1000;
+ u16 egress_1000;
+ u16 ingress_2500;
+ u16 egress_2500;
+ u16 ingress_5000;
+ u16 egress_5000;
+ u16 ingress_10000;
+ u16 egress_10000;
+};
+
+struct __packed hw_atl_cable_diag {
+ u8 fault;
+ u8 distance;
+ u8 far_distance;
+ u8 reserved;
+};
+
+enum gpio_pin_function {
+ GPIO_PIN_FUNCTION_NC,
+ GPIO_PIN_FUNCTION_VAUX_ENABLE,
+ GPIO_PIN_FUNCTION_EFUSE_BURN_ENABLE,
+ GPIO_PIN_FUNCTION_SFP_PLUS_DETECT,
+ GPIO_PIN_FUNCTION_TX_DISABLE,
+ GPIO_PIN_FUNCTION_RATE_SEL_0,
+ GPIO_PIN_FUNCTION_RATE_SEL_1,
+ GPIO_PIN_FUNCTION_TX_FAULT,
+ GPIO_PIN_FUNCTION_PTP0,
+ GPIO_PIN_FUNCTION_PTP1,
+ GPIO_PIN_FUNCTION_PTP2,
+ GPIO_PIN_FUNCTION_SIZE
+};
+
+struct __packed hw_atl_info {
+ u8 reserved[6];
+ u16 phy_fault_code;
+ u16 phy_temperature;
+ u8 cable_len;
+ u8 reserved1;
+ struct hw_atl_cable_diag cable_diag_data[4];
+ struct hw_atl_ptp_offset ptp_offset;
+ u8 reserved2[12];
+ u32 caps_lo;
+ u32 caps_hi;
+ u32 reserved_datapath;
+ u32 reserved3[7];
+ u32 reserved_simpleresp[3];
+ u32 reserved_linkstat[7];
+ u32 reserved_wakes_count;
+ u32 reserved_eee_stat[12];
+ u32 tx_stuck_cnt;
+ u32 setting_address;
+ u32 setting_length;
+ u32 caps_ex;
+ enum gpio_pin_function gpio_pin[3];
+ u32 pcie_aer_dump[18];
+ u16 snr_margin[4];
+};
+
+struct __packed hw_atl_utils_mbox {
+ struct hw_atl_utils_mbox_header header;
+ struct hw_atl_stats_s stats;
+ struct hw_atl_info info;
+};
+
+struct __packed offload_ip_info {
+ u8 v4_local_addr_count;
+ u8 v4_addr_count;
+ u8 v6_local_addr_count;
+ u8 v6_addr_count;
+ u32 v4_addr;
+ u32 v4_prefix;
+ u32 v6_addr;
+ u32 v6_prefix;
+};
+
+struct __packed offload_port_info {
+ u16 udp_port_count;
+ u16 tcp_port_count;
+ u32 udp_port;
+ u32 tcp_port;
+};
+
+struct __packed offload_ka_info {
+ u16 v4_ka_count;
+ u16 v6_ka_count;
+ u32 retry_count;
+ u32 retry_interval;
+ u32 v4_ka;
+ u32 v6_ka;
+};
+
+struct __packed offload_rr_info {
+ u32 rr_count;
+ u32 rr_buf_len;
+ u32 rr_id_x;
+ u32 rr_buf;
+};
+
+struct __packed offload_info {
+ u32 version;
+ u32 len;
+ u8 mac_addr[ETH_ALEN];
+
+ u8 reserved[2];
+
+ struct offload_ip_info ips;
+ struct offload_port_info ports;
+ struct offload_ka_info kas;
+ struct offload_rr_info rrs;
+ u8 buf[];
+};
+
+struct __packed hw_atl_utils_fw_rpc {
+ u32 msg_id;
+
+ union {
+ /* fw1x structures */
+ struct drv_msg_wol_add msg_wol_add;
+ struct drv_msg_wol_remove msg_wol_remove;
+ struct drv_msg_enable_wakeup msg_enable_wakeup;
+ /* fw2x structures */
+ struct offload_info fw2x_offloads;
+ };
+};
+
+/* Mailbox FW Request interface */
+struct __packed hw_fw_request_ptp_gpio_ctrl {
+ u32 index;
+ u32 period;
+ u64 start;
+};
+
+struct __packed hw_fw_request_ptp_adj_freq {
+ u32 ns_mac;
+ u32 fns_mac;
+ u32 ns_phy;
+ u32 fns_phy;
+ u32 mac_ns_adj;
+ u32 mac_fns_adj;
+};
+
+struct __packed hw_fw_request_ptp_adj_clock {
+ u32 ns;
+ u32 sec;
+ int sign;
+};
+
+#define HW_AQ_FW_REQUEST_PTP_GPIO_CTRL 0x11
+#define HW_AQ_FW_REQUEST_PTP_ADJ_FREQ 0x12
+#define HW_AQ_FW_REQUEST_PTP_ADJ_CLOCK 0x13
+
+struct __packed hw_fw_request_iface {
+ u32 msg_id;
+ union {
+ /* PTP FW Request */
+ struct hw_fw_request_ptp_gpio_ctrl ptp_gpio_ctrl;
+ struct hw_fw_request_ptp_adj_freq ptp_adj_freq;
+ struct hw_fw_request_ptp_adj_clock ptp_adj_clock;
+ };
+};
+
+struct __packed hw_atl_utils_settings {
+ u32 mtu;
+ u32 downshift_retry_count;
+ u32 link_pause_frame_quanta_100m;
+ u32 link_pause_frame_threshold_100m;
+ u32 link_pause_frame_quanta_1g;
+ u32 link_pause_frame_threshold_1g;
+ u32 link_pause_frame_quanta_2p5g;
+ u32 link_pause_frame_threshold_2p5g;
+ u32 link_pause_frame_quanta_5g;
+ u32 link_pause_frame_threshold_5g;
+ u32 link_pause_frame_quanta_10g;
+ u32 link_pause_frame_threshold_10g;
+ u32 pfc_quanta_class_0;
+ u32 pfc_threshold_class_0;
+ u32 pfc_quanta_class_1;
+ u32 pfc_threshold_class_1;
+ u32 pfc_quanta_class_2;
+ u32 pfc_threshold_class_2;
+ u32 pfc_quanta_class_3;
+ u32 pfc_threshold_class_3;
+ u32 pfc_quanta_class_4;
+ u32 pfc_threshold_class_4;
+ u32 pfc_quanta_class_5;
+ u32 pfc_threshold_class_5;
+ u32 pfc_quanta_class_6;
+ u32 pfc_threshold_class_6;
+ u32 pfc_quanta_class_7;
+ u32 pfc_threshold_class_7;
+ u32 eee_link_down_timeout;
+ u32 eee_link_up_timeout;
+ u32 eee_max_link_drops;
+ u32 eee_rates_mask;
+ u32 wake_timer;
+ u32 thermal_shutdown_off_temp;
+ u32 thermal_shutdown_warning_temp;
+ u32 thermal_shutdown_cold_temp;
+ u32 msm_options;
+ u32 dac_cable_serdes_modes;
+ u32 media_detect;
+};
+
+enum macsec_msg_type {
+ macsec_cfg_msg = 0,
+ macsec_add_rx_sc_msg,
+ macsec_add_tx_sc_msg,
+ macsec_add_rx_sa_msg,
+ macsec_add_tx_sa_msg,
+ macsec_get_stats_msg,
+};
+
+struct __packed macsec_cfg_request {
+ u32 enabled;
+ u32 egress_threshold;
+ u32 ingress_threshold;
+ u32 interrupts_enabled;
+};
+
+struct __packed macsec_msg_fw_request {
+ u32 msg_id; /* not used */
+ u32 msg_type;
+ struct macsec_cfg_request cfg;
+};
+
+struct __packed macsec_msg_fw_response {
+ u32 result;
+};
+
+enum hw_atl_rx_action_with_traffic {
+ HW_ATL_RX_DISCARD,
+ HW_ATL_RX_HOST,
+ HW_ATL_RX_MNGMNT,
+ HW_ATL_RX_HOST_AND_MNGMNT,
+ HW_ATL_RX_WOL
+};
+
+struct aq_rx_filter_vlan {
+ u8 enable;
+ u8 location;
+ u16 vlan_id;
+ u8 queue;
+};
+
+#define HW_ATL_VLAN_MAX_FILTERS 16U
+
+struct aq_rx_filter_l2 {
+ s8 queue;
+ u8 location;
+ u8 user_priority_en;
+ u8 user_priority;
+ u16 ethertype;
+};
+
+struct aq_rx_filter_l3l4 {
+ u32 cmd;
+ u8 location;
+ u32 ip_dst[4];
+ u32 ip_src[4];
+ u16 p_dst;
+ u16 p_src;
+ u8 is_ipv6;
+};
+
+enum hw_atl_rx_protocol_value_l3l4 {
+ HW_ATL_RX_TCP,
+ HW_ATL_RX_UDP,
+ HW_ATL_RX_SCTP,
+ HW_ATL_RX_ICMP
+};
+
+enum hw_atl_rx_ctrl_registers_l3l4 {
+ HW_ATL_RX_ENABLE_MNGMNT_QUEUE_L3L4 = BIT(22),
+ HW_ATL_RX_ENABLE_QUEUE_L3L4 = BIT(23),
+ HW_ATL_RX_ENABLE_ARP_FLTR_L3 = BIT(24),
+ HW_ATL_RX_ENABLE_CMP_PROT_L4 = BIT(25),
+ HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 = BIT(26),
+ HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4 = BIT(27),
+ HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3 = BIT(28),
+ HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3 = BIT(29),
+ HW_ATL_RX_ENABLE_L3_IPV6 = BIT(30),
+ HW_ATL_RX_ENABLE_FLTR_L3L4 = BIT(31)
+};
+
+#define HW_ATL_RX_QUEUE_FL3L4_SHIFT 8U
+#define HW_ATL_RX_ACTION_FL3F4_SHIFT 16U
+
+#define HW_ATL_RX_CNT_REG_ADDR_IPV6 4U
+
+#define HW_ATL_GET_REG_LOCATION_FL3L4(location) \
+ ((location) - AQ_RX_FIRST_LOC_FL3L4)
+
+enum hal_atl_utils_fw_state_e {
+ MPI_DEINIT = 0,
+ MPI_RESET = 1,
+ MPI_INIT = 2,
+ MPI_POWER = 4,
+};
+
+#define HAL_ATLANTIC_RATE_10G BIT(0)
+#define HAL_ATLANTIC_RATE_5G BIT(1)
+#define HAL_ATLANTIC_RATE_5GSR BIT(2)
+#define HAL_ATLANTIC_RATE_2G5 BIT(3)
+#define HAL_ATLANTIC_RATE_1G BIT(4)
+#define HAL_ATLANTIC_RATE_100M BIT(5)
+#define HAL_ATLANTIC_RATE_INVALID BIT(6)
+
+#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD 0x4U
+#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_PRIOR 0x10000000U
+#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN 0x1U
+#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_MAG_PKT 0x2U
+#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL 0x5U
+#define HAL_ATLANTIC_UTILS_FW_MSG_ENABLE_WAKEUP 0x6U
+
+enum hw_atl_fw2x_rate {
+ FW2X_RATE_100M = 0x20,
+ FW2X_RATE_1G = 0x100,
+ FW2X_RATE_2G5 = 0x200,
+ FW2X_RATE_5G = 0x400,
+ FW2X_RATE_10G = 0x800,
+};
+
+/* 0x370
+ * Link capabilities resolution register
+ */
+enum hw_atl_fw2x_caps_lo {
+ CAPS_LO_10BASET_HD = 0,
+ CAPS_LO_10BASET_FD,
+ CAPS_LO_100BASETX_HD,
+ CAPS_LO_100BASET4_HD,
+ CAPS_LO_100BASET2_HD,
+ CAPS_LO_100BASETX_FD = 5,
+ CAPS_LO_100BASET2_FD,
+ CAPS_LO_1000BASET_HD,
+ CAPS_LO_1000BASET_FD,
+ CAPS_LO_2P5GBASET_FD,
+ CAPS_LO_5GBASET_FD = 10,
+ CAPS_LO_10GBASET_FD,
+ CAPS_LO_AUTONEG,
+ CAPS_LO_SMBUS_READ,
+ CAPS_LO_SMBUS_WRITE,
+ CAPS_LO_MACSEC = 15,
+ CAPS_LO_RESERVED1,
+ CAPS_LO_WAKE_ON_LINK_FORCED,
+ CAPS_LO_HIGH_TEMP_WARNING = 29,
+ CAPS_LO_DRIVER_SCRATCHPAD = 30,
+ CAPS_LO_GLOBAL_FAULT = 31
+};
+
+/* 0x374
+ * Status register
+ */
+enum hw_atl_fw2x_caps_hi {
+ CAPS_HI_TPO2EN = 0,
+ CAPS_HI_10BASET_EEE,
+ CAPS_HI_RESERVED2,
+ CAPS_HI_PAUSE,
+ CAPS_HI_ASYMMETRIC_PAUSE,
+ CAPS_HI_100BASETX_EEE = 5,
+ CAPS_HI_PHY_BUF_SEND,
+ CAPS_HI_PHY_BUF_RECV,
+ CAPS_HI_1000BASET_FD_EEE,
+ CAPS_HI_2P5GBASET_FD_EEE,
+ CAPS_HI_5GBASET_FD_EEE = 10,
+ CAPS_HI_10GBASET_FD_EEE,
+ CAPS_HI_FW_REQUEST,
+ CAPS_HI_PHY_LOG,
+ CAPS_HI_EEE_AUTO_DISABLE_SETTINGS,
+ CAPS_HI_PFC = 15,
+ CAPS_HI_WAKE_ON_LINK,
+ CAPS_HI_CABLE_DIAG,
+ CAPS_HI_TEMPERATURE,
+ CAPS_HI_DOWNSHIFT,
+ CAPS_HI_PTP_AVB_EN_FW2X = 20,
+ CAPS_HI_THERMAL_SHUTDOWN,
+ CAPS_HI_LINK_DROP,
+ CAPS_HI_SLEEP_PROXY,
+ CAPS_HI_WOL,
+ CAPS_HI_MAC_STOP = 25,
+ CAPS_HI_EXT_LOOPBACK,
+ CAPS_HI_INT_LOOPBACK,
+ CAPS_HI_EFUSE_AGENT,
+ CAPS_HI_WOL_TIMER,
+ CAPS_HI_STATISTICS = 30,
+ CAPS_HI_TRANSACTION_ID,
+};
+
+/* 0x36C
+ * Control register
+ */
+enum hw_atl_fw2x_ctrl {
+ CTRL_RESERVED1 = 0,
+ CTRL_RESERVED2,
+ CTRL_RESERVED3,
+ CTRL_PAUSE,
+ CTRL_ASYMMETRIC_PAUSE,
+ CTRL_RESERVED4 = 5,
+ CTRL_RESERVED5,
+ CTRL_RESERVED6,
+ CTRL_1GBASET_FD_EEE,
+ CTRL_2P5GBASET_FD_EEE,
+ CTRL_5GBASET_FD_EEE = 10,
+ CTRL_10GBASET_FD_EEE,
+ CTRL_THERMAL_SHUTDOWN,
+ CTRL_PHY_LOGS,
+ CTRL_EEE_AUTO_DISABLE,
+ CTRL_PFC = 15,
+ CTRL_WAKE_ON_LINK,
+ CTRL_CABLE_DIAG,
+ CTRL_TEMPERATURE,
+ CTRL_DOWNSHIFT,
+ CTRL_PTP_AVB = 20,
+ CTRL_RESERVED7,
+ CTRL_LINK_DROP,
+ CTRL_SLEEP_PROXY,
+ CTRL_WOL,
+ CTRL_MAC_STOP = 25,
+ CTRL_EXT_LOOPBACK,
+ CTRL_INT_LOOPBACK,
+ CTRL_RESERVED8,
+ CTRL_WOL_TIMER,
+ CTRL_STATISTICS = 30,
+ CTRL_FORCE_RECONNECT,
+};
+
+enum hw_atl_caps_ex {
+ CAPS_EX_LED_CONTROL = 0,
+ CAPS_EX_LED0_MODE_LO,
+ CAPS_EX_LED0_MODE_HI,
+ CAPS_EX_LED1_MODE_LO,
+ CAPS_EX_LED1_MODE_HI,
+ CAPS_EX_LED2_MODE_LO = 5,
+ CAPS_EX_LED2_MODE_HI,
+ CAPS_EX_RESERVED07,
+ CAPS_EX_RESERVED08,
+ CAPS_EX_RESERVED09,
+ CAPS_EX_RESERVED10 = 10,
+ CAPS_EX_RESERVED11,
+ CAPS_EX_RESERVED12,
+ CAPS_EX_RESERVED13,
+ CAPS_EX_RESERVED14,
+ CAPS_EX_RESERVED15 = 15,
+ CAPS_EX_PHY_PTP_EN,
+ CAPS_EX_MAC_PTP_EN,
+ CAPS_EX_EXT_CLK_EN,
+ CAPS_EX_SCHED_DMA_EN,
+ CAPS_EX_PTP_GPIO_EN = 20,
+ CAPS_EX_UPDATE_SETTINGS,
+ CAPS_EX_PHY_CTRL_TS_PIN,
+ CAPS_EX_SNR_OPERATING_MARGIN,
+ CAPS_EX_RESERVED24,
+ CAPS_EX_RESERVED25 = 25,
+ CAPS_EX_RESERVED26,
+ CAPS_EX_RESERVED27,
+ CAPS_EX_RESERVED28,
+ CAPS_EX_RESERVED29,
+ CAPS_EX_RESERVED30 = 30,
+ CAPS_EX_RESERVED31
+};
+
+struct aq_hw_s;
+struct aq_fw_ops;
+struct aq_hw_caps_s;
+struct aq_hw_link_status_s;
+
+int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops);
+
+int hw_atl_utils_soft_reset(struct aq_hw_s *self);
+
+void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p);
+
+int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
+ struct hw_atl_utils_mbox_header *pmbox);
+
+void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
+ struct hw_atl_utils_mbox *pmbox);
+
+void hw_atl_utils_mpi_set(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state,
+ u32 speed);
+
+int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self);
+
+int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
+ u8 *mac);
+
+unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps);
+
+int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
+ const struct aq_hw_caps_s *aq_hw_caps,
+ u32 *regs_buff);
+
+int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
+ unsigned int power_state);
+
+int hw_atl_utils_hw_deinit(struct aq_hw_s *self);
+
+u32 hw_atl_utils_get_fw_version(struct aq_hw_s *self);
+
+int hw_atl_utils_update_stats(struct aq_hw_s *self);
+
+struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self);
+
+int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
+ u32 *p, u32 cnt);
+
+int hw_atl_write_fwcfg_dwords(struct aq_hw_s *self, u32 *p, u32 cnt);
+
+int hw_atl_write_fwsettings_dwords(struct aq_hw_s *self, u32 offset, u32 *p,
+ u32 cnt);
+
+int hw_atl_utils_fw_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac);
+
+int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size);
+
+int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
+ struct hw_atl_utils_fw_rpc **rpc);
+
+bool hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
+
+extern const struct aq_fw_ops aq_fw_1x_ops;
+extern const struct aq_fw_ops aq_fw_2x_ops;
+
+#endif /* HW_ATL_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
new file mode 100644
index 000000000..4d4cfbc91
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -0,0 +1,732 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
+
+/* File hw_atl_utils_fw2x.c: Definition of firmware 2.x functions for
+ * Atlantic hardware abstraction layer.
+ */
+
+#include "../aq_hw.h"
+#include "../aq_hw_utils.h"
+#include "../aq_pci_func.h"
+#include "../aq_ring.h"
+#include "../aq_vec.h"
+#include "../aq_nic.h"
+#include "hw_atl_utils.h"
+#include "hw_atl_llh.h"
+
+#define HW_ATL_FW2X_MPI_LED_ADDR 0x31c
+#define HW_ATL_FW2X_MPI_RPC_ADDR 0x334
+
+#define HW_ATL_FW2X_MPI_MBOX_ADDR 0x360
+#define HW_ATL_FW2X_MPI_EFUSE_ADDR 0x364
+#define HW_ATL_FW2X_MPI_CONTROL_ADDR 0x368
+#define HW_ATL_FW2X_MPI_CONTROL2_ADDR 0x36C
+#define HW_ATL_FW2X_MPI_STATE_ADDR 0x370
+#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374
+
+#define HW_ATL_FW3X_EXT_CONTROL_ADDR 0x378
+#define HW_ATL_FW3X_EXT_STATE_ADDR 0x37c
+
+#define HW_ATL_FW3X_PTP_ADJ_LSW_ADDR 0x50a0
+#define HW_ATL_FW3X_PTP_ADJ_MSW_ADDR 0x50a4
+
+#define HW_ATL_FW2X_CAP_PAUSE BIT(CAPS_HI_PAUSE)
+#define HW_ATL_FW2X_CAP_ASYM_PAUSE BIT(CAPS_HI_ASYMMETRIC_PAUSE)
+#define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY)
+#define HW_ATL_FW2X_CAP_WOL BIT(CAPS_HI_WOL)
+
+#define HW_ATL_FW2X_CTRL_WAKE_ON_LINK BIT(CTRL_WAKE_ON_LINK)
+#define HW_ATL_FW2X_CTRL_SLEEP_PROXY BIT(CTRL_SLEEP_PROXY)
+#define HW_ATL_FW2X_CTRL_WOL BIT(CTRL_WOL)
+#define HW_ATL_FW2X_CTRL_LINK_DROP BIT(CTRL_LINK_DROP)
+#define HW_ATL_FW2X_CTRL_PAUSE BIT(CTRL_PAUSE)
+#define HW_ATL_FW2X_CTRL_TEMPERATURE BIT(CTRL_TEMPERATURE)
+#define HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE BIT(CTRL_ASYMMETRIC_PAUSE)
+#define HW_ATL_FW2X_CTRL_INT_LOOPBACK BIT(CTRL_INT_LOOPBACK)
+#define HW_ATL_FW2X_CTRL_EXT_LOOPBACK BIT(CTRL_EXT_LOOPBACK)
+#define HW_ATL_FW2X_CTRL_DOWNSHIFT BIT(CTRL_DOWNSHIFT)
+#define HW_ATL_FW2X_CTRL_FORCE_RECONNECT BIT(CTRL_FORCE_RECONNECT)
+
+#define HW_ATL_FW2X_CAP_EEE_1G_MASK BIT(CAPS_HI_1000BASET_FD_EEE)
+#define HW_ATL_FW2X_CAP_EEE_2G5_MASK BIT(CAPS_HI_2P5GBASET_FD_EEE)
+#define HW_ATL_FW2X_CAP_EEE_5G_MASK BIT(CAPS_HI_5GBASET_FD_EEE)
+#define HW_ATL_FW2X_CAP_EEE_10G_MASK BIT(CAPS_HI_10GBASET_FD_EEE)
+
+#define HW_ATL_FW2X_CAP_MACSEC BIT(CAPS_LO_MACSEC)
+
+#define HAL_ATLANTIC_WOL_FILTERS_COUNT 8
+#define HAL_ATLANTIC_UTILS_FW2X_MSG_WOL 0x0E
+
+#define HW_ATL_FW_VER_LED 0x03010026U
+#define HW_ATL_FW_VER_MEDIA_CONTROL 0x0301005aU
+
+struct __packed fw2x_msg_wol_pattern {
+ u8 mask[16];
+ u32 crc;
+};
+
+struct __packed fw2x_msg_wol {
+ u32 msg_id;
+ u8 hw_addr[ETH_ALEN];
+ u8 magic_packet_enabled;
+ u8 filter_count;
+ struct fw2x_msg_wol_pattern filter[HAL_ATLANTIC_WOL_FILTERS_COUNT];
+ u8 link_up_enabled;
+ u8 link_down_enabled;
+ u16 reserved;
+ u32 link_up_timeout;
+ u32 link_down_timeout;
+};
+
+static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed);
+static int aq_fw2x_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state);
+
+static u32 aq_fw2x_mbox_get(struct aq_hw_s *self);
+static u32 aq_fw2x_rpc_get(struct aq_hw_s *self);
+static int aq_fw2x_settings_get(struct aq_hw_s *self, u32 *addr);
+static u32 aq_fw2x_state_get(struct aq_hw_s *self);
+static u32 aq_fw2x_state2_get(struct aq_hw_s *self);
+
+static int aq_fw2x_init(struct aq_hw_s *self)
+{
+ int err = 0;
+
+ /* check 10 times by 1ms */
+ err = readx_poll_timeout_atomic(aq_fw2x_mbox_get,
+ self, self->mbox_addr,
+ self->mbox_addr != 0U,
+ 1000U, 10000U);
+
+ err = readx_poll_timeout_atomic(aq_fw2x_rpc_get,
+ self, self->rpc_addr,
+ self->rpc_addr != 0U,
+ 1000U, 100000U);
+
+ err = aq_fw2x_settings_get(self, &self->settings_addr);
+
+ return err;
+}
+
+static int aq_fw2x_deinit(struct aq_hw_s *self)
+{
+ int err = aq_fw2x_set_link_speed(self, 0);
+
+ if (!err)
+ err = aq_fw2x_set_state(self, MPI_DEINIT);
+
+ return err;
+}
+
+static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed)
+{
+ enum hw_atl_fw2x_rate rate = 0;
+
+ if (speed & AQ_NIC_RATE_10G)
+ rate |= FW2X_RATE_10G;
+
+ if (speed & AQ_NIC_RATE_5G)
+ rate |= FW2X_RATE_5G;
+
+ if (speed & AQ_NIC_RATE_2G5)
+ rate |= FW2X_RATE_2G5;
+
+ if (speed & AQ_NIC_RATE_1G)
+ rate |= FW2X_RATE_1G;
+
+ if (speed & AQ_NIC_RATE_100M)
+ rate |= FW2X_RATE_100M;
+
+ return rate;
+}
+
+static u32 fw2x_to_eee_mask(u32 speed)
+{
+ u32 rate = 0;
+
+ if (speed & HW_ATL_FW2X_CAP_EEE_10G_MASK)
+ rate |= AQ_NIC_RATE_EEE_10G;
+ if (speed & HW_ATL_FW2X_CAP_EEE_5G_MASK)
+ rate |= AQ_NIC_RATE_EEE_5G;
+ if (speed & HW_ATL_FW2X_CAP_EEE_2G5_MASK)
+ rate |= AQ_NIC_RATE_EEE_2G5;
+ if (speed & HW_ATL_FW2X_CAP_EEE_1G_MASK)
+ rate |= AQ_NIC_RATE_EEE_1G;
+
+ return rate;
+}
+
+static u32 eee_mask_to_fw2x(u32 speed)
+{
+ u32 rate = 0;
+
+ if (speed & AQ_NIC_RATE_EEE_10G)
+ rate |= HW_ATL_FW2X_CAP_EEE_10G_MASK;
+ if (speed & AQ_NIC_RATE_EEE_5G)
+ rate |= HW_ATL_FW2X_CAP_EEE_5G_MASK;
+ if (speed & AQ_NIC_RATE_EEE_2G5)
+ rate |= HW_ATL_FW2X_CAP_EEE_2G5_MASK;
+ if (speed & AQ_NIC_RATE_EEE_1G)
+ rate |= HW_ATL_FW2X_CAP_EEE_1G_MASK;
+
+ return rate;
+}
+
+static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed)
+{
+ u32 val = link_speed_mask_2fw2x_ratemask(speed);
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL_ADDR, val);
+
+ return 0;
+}
+
+static void aq_fw2x_upd_flow_control_bits(struct aq_hw_s *self,
+ u32 *mpi_state, u32 fc)
+{
+ *mpi_state &= ~(HW_ATL_FW2X_CTRL_PAUSE |
+ HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE);
+
+ switch (fc) {
+ /* There is not explicit mode of RX only pause frames,
+ * thus, we join this mode with FC full.
+ * FC full is either Rx, either Tx, or both.
+ */
+ case AQ_NIC_FC_FULL:
+ case AQ_NIC_FC_RX:
+ *mpi_state |= HW_ATL_FW2X_CTRL_PAUSE |
+ HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE;
+ break;
+ case AQ_NIC_FC_TX:
+ *mpi_state |= HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE;
+ break;
+ }
+}
+
+static void aq_fw2x_upd_eee_rate_bits(struct aq_hw_s *self, u32 *mpi_opts,
+ u32 eee_speeds)
+{
+ *mpi_opts &= ~(HW_ATL_FW2X_CAP_EEE_1G_MASK |
+ HW_ATL_FW2X_CAP_EEE_2G5_MASK |
+ HW_ATL_FW2X_CAP_EEE_5G_MASK |
+ HW_ATL_FW2X_CAP_EEE_10G_MASK);
+
+ *mpi_opts |= eee_mask_to_fw2x(eee_speeds);
+}
+
+static int aq_fw2x_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state)
+{
+ u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+
+ switch (state) {
+ case MPI_INIT:
+ mpi_state &= ~BIT(CAPS_HI_LINK_DROP);
+ aq_fw2x_upd_eee_rate_bits(self, &mpi_state, cfg->eee_speeds);
+ aq_fw2x_upd_flow_control_bits(self, &mpi_state,
+ self->aq_nic_cfg->fc.req);
+ break;
+ case MPI_DEINIT:
+ mpi_state |= BIT(CAPS_HI_LINK_DROP);
+ break;
+ case MPI_RESET:
+ case MPI_POWER:
+ /* No actions */
+ break;
+ }
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state);
+
+ return 0;
+}
+
+static int aq_fw2x_update_link_status(struct aq_hw_s *self)
+{
+ struct aq_hw_link_status_s *link_status = &self->aq_link_status;
+ u32 mpi_state;
+ u32 speed;
+
+ mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR);
+ speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G |
+ FW2X_RATE_2G5 | FW2X_RATE_5G |
+ FW2X_RATE_10G);
+
+ if (speed) {
+ if (speed & FW2X_RATE_10G)
+ link_status->mbps = 10000;
+ else if (speed & FW2X_RATE_5G)
+ link_status->mbps = 5000;
+ else if (speed & FW2X_RATE_2G5)
+ link_status->mbps = 2500;
+ else if (speed & FW2X_RATE_1G)
+ link_status->mbps = 1000;
+ else if (speed & FW2X_RATE_100M)
+ link_status->mbps = 100;
+ else
+ link_status->mbps = 10000;
+ } else {
+ link_status->mbps = 0;
+ }
+ link_status->full_duplex = true;
+
+ return 0;
+}
+
+static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
+{
+ u32 efuse_addr = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_EFUSE_ADDR);
+ u32 mac_addr[2] = { 0 };
+ int err = 0;
+
+ if (efuse_addr != 0) {
+ err = hw_atl_utils_fw_downld_dwords(self,
+ efuse_addr + (40U * 4U),
+ mac_addr,
+ ARRAY_SIZE(mac_addr));
+ if (err)
+ return err;
+ mac_addr[0] = __swab32(mac_addr[0]);
+ mac_addr[1] = __swab32(mac_addr[1]);
+ }
+
+ ether_addr_copy(mac, (u8 *)mac_addr);
+
+ return err;
+}
+
+static int aq_fw2x_update_stats(struct aq_hw_s *self)
+{
+ u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ u32 orig_stats_val = mpi_opts & BIT(CAPS_HI_STATISTICS);
+ u32 stats_val;
+ int err = 0;
+
+ /* Toggle statistics bit for FW to update */
+ mpi_opts = mpi_opts ^ BIT(CAPS_HI_STATISTICS);
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+
+ /* Wait FW to report back */
+ err = readx_poll_timeout_atomic(aq_fw2x_state2_get,
+ self, stats_val,
+ orig_stats_val != (stats_val &
+ BIT(CAPS_HI_STATISTICS)),
+ 1U, 10000U);
+ if (err)
+ return err;
+
+ return hw_atl_utils_update_stats(self);
+}
+
+static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp)
+{
+ u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ u32 temp_val = mpi_opts & HW_ATL_FW2X_CTRL_TEMPERATURE;
+ u32 phy_temp_offset;
+ u32 temp_res;
+ int err = 0;
+ u32 val;
+
+ phy_temp_offset = self->mbox_addr + offsetof(struct hw_atl_utils_mbox,
+ info.phy_temperature);
+
+ /* Toggle statistics bit for FW to 0x36C.18 (CTRL_TEMPERATURE) */
+ mpi_opts = mpi_opts ^ HW_ATL_FW2X_CTRL_TEMPERATURE;
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+ /* Wait FW to report back */
+ err = readx_poll_timeout_atomic(aq_fw2x_state2_get, self, val,
+ temp_val !=
+ (val & HW_ATL_FW2X_CTRL_TEMPERATURE),
+ 1U, 10000U);
+ err = hw_atl_utils_fw_downld_dwords(self, phy_temp_offset,
+ &temp_res, 1);
+
+ if (err)
+ return err;
+
+ /* Convert PHY temperature from 1/256 degree Celsius
+ * to 1/1000 degree Celsius.
+ */
+ *temp = (int16_t)(temp_res & 0xFFFF) * 1000 / 256;
+
+ return 0;
+}
+
+static int aq_fw2x_set_wol(struct aq_hw_s *self, const u8 *mac)
+{
+ struct hw_atl_utils_fw_rpc *rpc = NULL;
+ struct offload_info *info = NULL;
+ u32 wol_bits = 0;
+ u32 rpc_size;
+ int err = 0;
+ u32 val;
+
+ if (self->aq_nic_cfg->wol & WAKE_PHY) {
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR,
+ HW_ATL_FW2X_CTRL_LINK_DROP);
+ readx_poll_timeout_atomic(aq_fw2x_state2_get, self, val,
+ (val &
+ HW_ATL_FW2X_CTRL_LINK_DROP) != 0,
+ 1000, 100000);
+ wol_bits |= HW_ATL_FW2X_CTRL_WAKE_ON_LINK;
+ }
+
+ if (self->aq_nic_cfg->wol & WAKE_MAGIC) {
+ wol_bits |= HW_ATL_FW2X_CTRL_SLEEP_PROXY |
+ HW_ATL_FW2X_CTRL_WOL;
+
+ err = hw_atl_utils_fw_rpc_wait(self, &rpc);
+ if (err < 0)
+ goto err_exit;
+
+ rpc_size = sizeof(*info) +
+ offsetof(struct hw_atl_utils_fw_rpc, fw2x_offloads);
+ memset(rpc, 0, rpc_size);
+ info = &rpc->fw2x_offloads;
+ memcpy(info->mac_addr, mac, ETH_ALEN);
+ info->len = sizeof(*info);
+
+ err = hw_atl_utils_fw_rpc_call(self, rpc_size);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, wol_bits);
+
+err_exit:
+ return err;
+}
+
+static int aq_fw2x_set_power(struct aq_hw_s *self, unsigned int power_state,
+ const u8 *mac)
+{
+ int err = 0;
+
+ if (self->aq_nic_cfg->wol)
+ err = aq_fw2x_set_wol(self, mac);
+
+ return err;
+}
+
+static int aq_fw2x_send_fw_request(struct aq_hw_s *self,
+ const struct hw_fw_request_iface *fw_req,
+ size_t size)
+{
+ u32 ctrl2, orig_ctrl2;
+ u32 dword_cnt;
+ int err = 0;
+ u32 val;
+
+ /* Write data to drvIface Mailbox */
+ dword_cnt = size / sizeof(u32);
+ if (size % sizeof(u32))
+ dword_cnt++;
+ err = hw_atl_write_fwcfg_dwords(self, (void *)fw_req, dword_cnt);
+ if (err < 0)
+ goto err_exit;
+
+ /* Toggle statistics bit for FW to update */
+ ctrl2 = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ orig_ctrl2 = ctrl2 & BIT(CAPS_HI_FW_REQUEST);
+ ctrl2 = ctrl2 ^ BIT(CAPS_HI_FW_REQUEST);
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, ctrl2);
+
+ /* Wait FW to report back */
+ err = readx_poll_timeout_atomic(aq_fw2x_state2_get, self, val,
+ orig_ctrl2 != (val &
+ BIT(CAPS_HI_FW_REQUEST)),
+ 1U, 10000U);
+
+err_exit:
+ return err;
+}
+
+static void aq_fw3x_enable_ptp(struct aq_hw_s *self, int enable)
+{
+ u32 ptp_opts = aq_hw_read_reg(self, HW_ATL_FW3X_EXT_STATE_ADDR);
+ u32 all_ptp_features = BIT(CAPS_EX_PHY_PTP_EN) |
+ BIT(CAPS_EX_PTP_GPIO_EN);
+
+ if (enable)
+ ptp_opts |= all_ptp_features;
+ else
+ ptp_opts &= ~all_ptp_features;
+
+ aq_hw_write_reg(self, HW_ATL_FW3X_EXT_CONTROL_ADDR, ptp_opts);
+}
+
+static void aq_fw3x_adjust_ptp(struct aq_hw_s *self, uint64_t adj)
+{
+ aq_hw_write_reg(self, HW_ATL_FW3X_PTP_ADJ_LSW_ADDR,
+ (adj >> 0) & 0xffffffff);
+ aq_hw_write_reg(self, HW_ATL_FW3X_PTP_ADJ_MSW_ADDR,
+ (adj >> 32) & 0xffffffff);
+}
+
+static int aq_fw2x_led_control(struct aq_hw_s *self, u32 mode)
+{
+ if (self->fw_ver_actual < HW_ATL_FW_VER_LED)
+ return -EOPNOTSUPP;
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_LED_ADDR, mode);
+
+ return 0;
+}
+
+static int aq_fw2x_set_eee_rate(struct aq_hw_s *self, u32 speed)
+{
+ u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+
+ aq_fw2x_upd_eee_rate_bits(self, &mpi_opts, speed);
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+
+ return 0;
+}
+
+static int aq_fw2x_get_eee_rate(struct aq_hw_s *self, u32 *rate,
+ u32 *supported_rates)
+{
+ u32 mpi_state;
+ u32 caps_hi;
+ int err = 0;
+ u32 offset;
+
+ offset = self->mbox_addr + offsetof(struct hw_atl_utils_mbox,
+ info.caps_hi);
+
+ err = hw_atl_utils_fw_downld_dwords(self, offset, &caps_hi, 1);
+
+ if (err)
+ return err;
+
+ *supported_rates = fw2x_to_eee_mask(caps_hi);
+
+ mpi_state = aq_fw2x_state2_get(self);
+ *rate = fw2x_to_eee_mask(mpi_state);
+
+ return err;
+}
+
+static int aq_fw2x_renegotiate(struct aq_hw_s *self)
+{
+ u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+
+ mpi_opts |= BIT(CTRL_FORCE_RECONNECT);
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+
+ return 0;
+}
+
+static int aq_fw2x_set_flow_control(struct aq_hw_s *self)
+{
+ u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+
+ aq_fw2x_upd_flow_control_bits(self, &mpi_state,
+ self->aq_nic_cfg->fc.req);
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state);
+
+ return 0;
+}
+
+static u32 aq_fw2x_get_flow_control(struct aq_hw_s *self, u32 *fcmode)
+{
+ u32 mpi_state = aq_fw2x_state2_get(self);
+ *fcmode = 0;
+
+ if (mpi_state & HW_ATL_FW2X_CAP_PAUSE)
+ *fcmode |= AQ_NIC_FC_RX;
+
+ if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
+ *fcmode |= AQ_NIC_FC_TX;
+
+ return 0;
+}
+
+static int aq_fw2x_set_phyloopback(struct aq_hw_s *self, u32 mode, bool enable)
+{
+ u32 mpi_opts;
+
+ switch (mode) {
+ case AQ_HW_LOOPBACK_PHYINT_SYS:
+ mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ if (enable)
+ mpi_opts |= HW_ATL_FW2X_CTRL_INT_LOOPBACK;
+ else
+ mpi_opts &= ~HW_ATL_FW2X_CTRL_INT_LOOPBACK;
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+ break;
+ case AQ_HW_LOOPBACK_PHYEXT_SYS:
+ mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ if (enable)
+ mpi_opts |= HW_ATL_FW2X_CTRL_EXT_LOOPBACK;
+ else
+ mpi_opts &= ~HW_ATL_FW2X_CTRL_EXT_LOOPBACK;
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u32 aq_fw2x_mbox_get(struct aq_hw_s *self)
+{
+ return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_MBOX_ADDR);
+}
+
+static u32 aq_fw2x_rpc_get(struct aq_hw_s *self)
+{
+ return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_RPC_ADDR);
+}
+
+static int aq_fw2x_settings_get(struct aq_hw_s *self, u32 *addr)
+{
+ int err = 0;
+ u32 offset;
+
+ offset = self->mbox_addr + offsetof(struct hw_atl_utils_mbox,
+ info.setting_address);
+
+ err = hw_atl_utils_fw_downld_dwords(self, offset, addr, 1);
+
+ return err;
+}
+
+static u32 aq_fw2x_state_get(struct aq_hw_s *self)
+{
+ return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR);
+}
+
+static u32 aq_fw2x_state2_get(struct aq_hw_s *self)
+{
+ return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR);
+}
+
+static int aq_fw2x_set_downshift(struct aq_hw_s *self, u32 counter)
+{
+ int err = 0;
+ u32 mpi_opts;
+ u32 offset;
+
+ offset = offsetof(struct hw_atl_utils_settings, downshift_retry_count);
+ err = hw_atl_write_fwsettings_dwords(self, offset, &counter, 1);
+ if (err)
+ return err;
+
+ mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ if (counter)
+ mpi_opts |= HW_ATL_FW2X_CTRL_DOWNSHIFT;
+ else
+ mpi_opts &= ~HW_ATL_FW2X_CTRL_DOWNSHIFT;
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+
+ return err;
+}
+
+static int aq_fw2x_set_media_detect(struct aq_hw_s *self, bool on)
+{
+ u32 enable;
+ u32 offset;
+
+ if (self->fw_ver_actual < HW_ATL_FW_VER_MEDIA_CONTROL)
+ return -EOPNOTSUPP;
+
+ offset = offsetof(struct hw_atl_utils_settings, media_detect);
+ enable = on;
+
+ return hw_atl_write_fwsettings_dwords(self, offset, &enable, 1);
+}
+
+static u32 aq_fw2x_get_link_capabilities(struct aq_hw_s *self)
+{
+ int err = 0;
+ u32 offset;
+ u32 val;
+
+ offset = self->mbox_addr +
+ offsetof(struct hw_atl_utils_mbox, info.caps_lo);
+
+ err = hw_atl_utils_fw_downld_dwords(self, offset, &val, 1);
+
+ if (err)
+ return 0;
+
+ return val;
+}
+
+static int aq_fw2x_send_macsec_req(struct aq_hw_s *hw,
+ struct macsec_msg_fw_request *req,
+ struct macsec_msg_fw_response *response)
+{
+ u32 low_status, low_req = 0;
+ u32 dword_cnt;
+ u32 caps_lo;
+ u32 offset;
+ int err;
+
+ if (!req || !response)
+ return -EINVAL;
+
+ caps_lo = aq_fw2x_get_link_capabilities(hw);
+ if (!(caps_lo & BIT(CAPS_LO_MACSEC)))
+ return -EOPNOTSUPP;
+
+ /* Write macsec request to cfg memory */
+ dword_cnt = (sizeof(*req) + sizeof(u32) - 1) / sizeof(u32);
+ err = hw_atl_write_fwcfg_dwords(hw, (void *)req, dword_cnt);
+ if (err < 0)
+ return err;
+
+ /* Toggle 0x368.CAPS_LO_MACSEC bit */
+ low_req = aq_hw_read_reg(hw, HW_ATL_FW2X_MPI_CONTROL_ADDR);
+ low_req ^= HW_ATL_FW2X_CAP_MACSEC;
+ aq_hw_write_reg(hw, HW_ATL_FW2X_MPI_CONTROL_ADDR, low_req);
+
+ /* Wait FW to report back */
+ err = readx_poll_timeout_atomic(aq_fw2x_state_get, hw, low_status,
+ low_req != (low_status & BIT(CAPS_LO_MACSEC)), 1U, 10000U);
+ if (err)
+ return -EIO;
+
+ /* Read status of write operation */
+ offset = hw->rpc_addr + sizeof(u32);
+ err = hw_atl_utils_fw_downld_dwords(hw, offset, (u32 *)(void *)response,
+ sizeof(*response) / sizeof(u32));
+
+ return err;
+}
+
+const struct aq_fw_ops aq_fw_2x_ops = {
+ .init = aq_fw2x_init,
+ .deinit = aq_fw2x_deinit,
+ .reset = NULL,
+ .renegotiate = aq_fw2x_renegotiate,
+ .get_mac_permanent = aq_fw2x_get_mac_permanent,
+ .set_link_speed = aq_fw2x_set_link_speed,
+ .set_state = aq_fw2x_set_state,
+ .update_link_status = aq_fw2x_update_link_status,
+ .update_stats = aq_fw2x_update_stats,
+ .get_mac_temp = NULL,
+ .get_phy_temp = aq_fw2x_get_phy_temp,
+ .set_power = aq_fw2x_set_power,
+ .set_eee_rate = aq_fw2x_set_eee_rate,
+ .get_eee_rate = aq_fw2x_get_eee_rate,
+ .set_flow_control = aq_fw2x_set_flow_control,
+ .get_flow_control = aq_fw2x_get_flow_control,
+ .send_fw_request = aq_fw2x_send_fw_request,
+ .enable_ptp = aq_fw3x_enable_ptp,
+ .led_control = aq_fw2x_led_control,
+ .set_phyloopback = aq_fw2x_set_phyloopback,
+ .set_downshift = aq_fw2x_set_downshift,
+ .set_media_detect = aq_fw2x_set_media_detect,
+ .adjust_ptp = aq_fw3x_adjust_ptp,
+ .get_link_capabilities = aq_fw2x_get_link_capabilities,
+ .send_macsec_req = aq_fw2x_send_macsec_req,
+};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
new file mode 100644
index 000000000..5dfc75157
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
@@ -0,0 +1,863 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include "aq_hw.h"
+#include "aq_hw_utils.h"
+#include "aq_ring.h"
+#include "aq_nic.h"
+#include "hw_atl/hw_atl_b0.h"
+#include "hw_atl/hw_atl_utils.h"
+#include "hw_atl/hw_atl_llh.h"
+#include "hw_atl/hw_atl_llh_internal.h"
+#include "hw_atl2_utils.h"
+#include "hw_atl2_llh.h"
+#include "hw_atl2_internal.h"
+#include "hw_atl2_llh_internal.h"
+
+static int hw_atl2_act_rslvr_table_set(struct aq_hw_s *self, u8 location,
+ u32 tag, u32 mask, u32 action);
+
+#define DEFAULT_BOARD_BASIC_CAPABILITIES \
+ .is_64_dma = true, \
+ .op64bit = true, \
+ .msix_irqs = 8U, \
+ .irq_mask = ~0U, \
+ .vecs = HW_ATL2_RSS_MAX, \
+ .tcs_max = HW_ATL2_TC_MAX, \
+ .rxd_alignment = 1U, \
+ .rxd_size = HW_ATL2_RXD_SIZE, \
+ .rxds_max = HW_ATL2_MAX_RXD, \
+ .rxds_min = HW_ATL2_MIN_RXD, \
+ .txd_alignment = 1U, \
+ .txd_size = HW_ATL2_TXD_SIZE, \
+ .txds_max = HW_ATL2_MAX_TXD, \
+ .txds_min = HW_ATL2_MIN_TXD, \
+ .txhwb_alignment = 4096U, \
+ .tx_rings = HW_ATL2_TX_RINGS, \
+ .rx_rings = HW_ATL2_RX_RINGS, \
+ .hw_features = NETIF_F_HW_CSUM | \
+ NETIF_F_RXCSUM | \
+ NETIF_F_RXHASH | \
+ NETIF_F_SG | \
+ NETIF_F_TSO | \
+ NETIF_F_TSO6 | \
+ NETIF_F_LRO | \
+ NETIF_F_NTUPLE | \
+ NETIF_F_HW_VLAN_CTAG_FILTER | \
+ NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_GSO_UDP_L4 | \
+ NETIF_F_GSO_PARTIAL | \
+ NETIF_F_HW_TC, \
+ .hw_priv_flags = IFF_UNICAST_FLT, \
+ .flow_control = true, \
+ .mtu = HW_ATL2_MTU_JUMBO, \
+ .mac_regs_count = 72, \
+ .hw_alive_check_addr = 0x10U, \
+ .priv_data_len = sizeof(struct hw_atl2_priv)
+
+const struct aq_hw_caps_s hw_atl2_caps_aqc113 = {
+ DEFAULT_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = AQ_NIC_RATE_10G |
+ AQ_NIC_RATE_5G |
+ AQ_NIC_RATE_2G5 |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M |
+ AQ_NIC_RATE_10M,
+};
+
+const struct aq_hw_caps_s hw_atl2_caps_aqc115c = {
+ DEFAULT_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = AQ_NIC_RATE_2G5 |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M |
+ AQ_NIC_RATE_10M,
+};
+
+const struct aq_hw_caps_s hw_atl2_caps_aqc116c = {
+ DEFAULT_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M |
+ AQ_NIC_RATE_10M,
+};
+
+static u32 hw_atl2_sem_act_rslvr_get(struct aq_hw_s *self)
+{
+ return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL2_FW_SM_ACT_RSLVR);
+}
+
+static int hw_atl2_hw_reset(struct aq_hw_s *self)
+{
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ int err;
+
+ err = hw_atl2_utils_soft_reset(self);
+ if (err)
+ return err;
+
+ memset(priv, 0, sizeof(*priv));
+
+ self->aq_fw_ops->set_state(self, MPI_RESET);
+
+ err = aq_hw_err_from_flags(self);
+
+ return err;
+}
+
+static int hw_atl2_hw_queue_to_tc_map_set(struct aq_hw_s *self)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ unsigned int tcs, q_per_tc;
+ unsigned int tc, q;
+ u32 rx_map = 0;
+ u32 tx_map = 0;
+
+ hw_atl2_tpb_tx_tc_q_rand_map_en_set(self, 1U);
+
+ switch (cfg->tc_mode) {
+ case AQ_TC_MODE_8TCS:
+ tcs = 8;
+ q_per_tc = 4;
+ break;
+ case AQ_TC_MODE_4TCS:
+ tcs = 4;
+ q_per_tc = 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (tc = 0; tc != tcs; tc++) {
+ unsigned int tc_q_offset = tc * q_per_tc;
+
+ for (q = tc_q_offset; q != tc_q_offset + q_per_tc; q++) {
+ rx_map |= tc << HW_ATL2_RX_Q_TC_MAP_SHIFT(q);
+ if (HW_ATL2_RX_Q_TC_MAP_ADR(q) !=
+ HW_ATL2_RX_Q_TC_MAP_ADR(q + 1)) {
+ aq_hw_write_reg(self,
+ HW_ATL2_RX_Q_TC_MAP_ADR(q),
+ rx_map);
+ rx_map = 0;
+ }
+
+ tx_map |= tc << HW_ATL2_TX_Q_TC_MAP_SHIFT(q);
+ if (HW_ATL2_TX_Q_TC_MAP_ADR(q) !=
+ HW_ATL2_TX_Q_TC_MAP_ADR(q + 1)) {
+ aq_hw_write_reg(self,
+ HW_ATL2_TX_Q_TC_MAP_ADR(q),
+ tx_map);
+ tx_map = 0;
+ }
+ }
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl2_hw_qos_set(struct aq_hw_s *self)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ u32 tx_buff_size = HW_ATL2_TXBUF_MAX;
+ u32 rx_buff_size = HW_ATL2_RXBUF_MAX;
+ unsigned int prio = 0U;
+ u32 tc = 0U;
+
+ /* TPS Descriptor rate init */
+ hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
+ hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
+
+ /* TPS VM init */
+ hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
+
+ tx_buff_size /= cfg->tcs;
+ rx_buff_size /= cfg->tcs;
+ for (tc = 0; tc < cfg->tcs; tc++) {
+ u32 threshold = 0U;
+
+ /* Tx buf size TC0 */
+ hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, tx_buff_size, tc);
+
+ threshold = (tx_buff_size * (1024 / 32U) * 66U) / 100U;
+ hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, threshold, tc);
+
+ threshold = (tx_buff_size * (1024 / 32U) * 50U) / 100U;
+ hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, threshold, tc);
+
+ /* QoS Rx buf size per TC */
+ hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, rx_buff_size, tc);
+
+ threshold = (rx_buff_size * (1024U / 32U) * 66U) / 100U;
+ hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, threshold, tc);
+
+ threshold = (rx_buff_size * (1024U / 32U) * 50U) / 100U;
+ hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, threshold, tc);
+
+ hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc);
+ }
+
+ /* QoS 802.1p priority -> TC mapping */
+ for (prio = 0; prio < 8; ++prio)
+ hw_atl_rpf_rpb_user_priority_tc_map_set(self, prio,
+ cfg->prio_tc_map[prio]);
+
+ /* ATL2 Apply ring to TC mapping */
+ hw_atl2_hw_queue_to_tc_map_set(self);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl2_hw_rss_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params)
+{
+ u8 *indirection_table = rss_params->indirection_table;
+ const u32 num_tcs = aq_hw_num_tcs(self);
+ u32 rpf_redir2_enable;
+ int tc;
+ int i;
+
+ rpf_redir2_enable = num_tcs > 4 ? 1 : 0;
+
+ hw_atl2_rpf_redirection_table2_select_set(self, rpf_redir2_enable);
+
+ for (i = HW_ATL2_RSS_REDIRECTION_MAX; i--;) {
+ for (tc = 0; tc != num_tcs; tc++) {
+ hw_atl2_new_rpf_rss_redir_set(self, tc, i,
+ tc *
+ aq_hw_q_per_tc(self) +
+ indirection_table[i]);
+ }
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl2_hw_init_tx_tc_rate_limit(struct aq_hw_s *self)
+{
+ static const u32 max_weight = BIT(HW_ATL2_TPS_DATA_TCTWEIGHT_WIDTH) - 1;
+ /* Scale factor is based on the number of bits in fractional portion */
+ static const u32 scale = BIT(HW_ATL_TPS_DESC_RATE_Y_WIDTH);
+ static const u32 frac_msk = HW_ATL_TPS_DESC_RATE_Y_MSK >>
+ HW_ATL_TPS_DESC_RATE_Y_SHIFT;
+ const u32 link_speed = self->aq_link_status.mbps;
+ struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg;
+ unsigned long num_min_rated_tcs = 0;
+ u32 tc_weight[AQ_CFG_TCS_MAX];
+ u32 fixed_max_credit_4b;
+ u32 fixed_max_credit;
+ u8 min_rate_msk = 0;
+ u32 sum_weight = 0;
+ int tc;
+
+ /* By default max_credit is based upon MTU (in unit of 64b) */
+ fixed_max_credit = nic_cfg->aq_hw_caps->mtu / 64;
+ /* in unit of 4b */
+ fixed_max_credit_4b = nic_cfg->aq_hw_caps->mtu / 4;
+
+ if (link_speed) {
+ min_rate_msk = nic_cfg->tc_min_rate_msk &
+ (BIT(nic_cfg->tcs) - 1);
+ num_min_rated_tcs = hweight8(min_rate_msk);
+ }
+
+ /* First, calculate weights where min_rate is specified */
+ if (num_min_rated_tcs) {
+ for (tc = 0; tc != nic_cfg->tcs; tc++) {
+ if (!nic_cfg->tc_min_rate[tc]) {
+ tc_weight[tc] = 0;
+ continue;
+ }
+
+ tc_weight[tc] = (-1L + link_speed +
+ nic_cfg->tc_min_rate[tc] *
+ max_weight) /
+ link_speed;
+ tc_weight[tc] = min(tc_weight[tc], max_weight);
+ sum_weight += tc_weight[tc];
+ }
+ }
+
+ /* WSP, if min_rate is set for at least one TC.
+ * RR otherwise.
+ */
+ hw_atl2_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U);
+ /* Data TC Arbiter takes precedence over Descriptor TC Arbiter,
+ * leave Descriptor TC Arbiter as RR.
+ */
+ hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
+
+ hw_atl_tps_tx_desc_rate_mode_set(self, nic_cfg->is_qos ? 1U : 0U);
+
+ for (tc = 0; tc != nic_cfg->tcs; tc++) {
+ const u32 en = (nic_cfg->tc_max_rate[tc] != 0) ? 1U : 0U;
+ const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0);
+ u32 weight, max_credit;
+
+ hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, tc,
+ fixed_max_credit);
+ hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, tc, 0x1E);
+
+ if (num_min_rated_tcs) {
+ weight = tc_weight[tc];
+
+ if (!weight && sum_weight < max_weight)
+ weight = (max_weight - sum_weight) /
+ (nic_cfg->tcs - num_min_rated_tcs);
+ else if (!weight)
+ weight = 0x640;
+
+ max_credit = max(2 * weight, fixed_max_credit_4b);
+ } else {
+ weight = 0x640;
+ max_credit = 0xFFF0;
+ }
+
+ hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(self, tc, weight);
+ hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(self, tc,
+ max_credit);
+
+ hw_atl_tps_tx_desc_rate_en_set(self, desc, en);
+
+ if (en) {
+ /* Nominal rate is always 10G */
+ const u32 rate = 10000U * scale /
+ nic_cfg->tc_max_rate[tc];
+ const u32 rate_int = rate >>
+ HW_ATL_TPS_DESC_RATE_Y_WIDTH;
+ const u32 rate_frac = rate & frac_msk;
+
+ hw_atl_tps_tx_desc_rate_x_set(self, desc, rate_int);
+ hw_atl_tps_tx_desc_rate_y_set(self, desc, rate_frac);
+ } else {
+ /* A value of 1 indicates the queue is not
+ * rate controlled.
+ */
+ hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U);
+ hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U);
+ }
+ }
+ for (tc = nic_cfg->tcs; tc != AQ_CFG_TCS_MAX; tc++) {
+ const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0);
+
+ hw_atl_tps_tx_desc_rate_en_set(self, desc, 0U);
+ hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U);
+ hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl2_hw_init_tx_path(struct aq_hw_s *self)
+{
+ struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg;
+
+ /* Tx TC/RSS number config */
+ hw_atl_tpb_tps_tx_tc_mode_set(self, nic_cfg->tc_mode);
+
+ hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
+
+ /* Tx interrupts */
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+
+ /* misc */
+ hw_atl_tdm_tx_dca_en_set(self, 0U);
+ hw_atl_tdm_tx_dca_mode_set(self, 0U);
+
+ hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
+
+ hw_atl2_tpb_tx_buf_clk_gate_en_set(self, 0U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static void hw_atl2_hw_init_new_rx_filters(struct aq_hw_s *self)
+{
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ u8 *prio_tc_map = self->aq_nic_cfg->prio_tc_map;
+ u16 action;
+ u8 index;
+ int i;
+
+ /* Action Resolver Table (ART) is used by RPF to decide which action
+ * to take with a packet based upon input tag and tag mask, where:
+ * - input tag is a combination of 3-bit VLan Prio (PTP) and
+ * 29-bit concatenation of all tags from filter block;
+ * - tag mask is a mask used for matching against input tag.
+ * The input_tag is compared with the all the Requested_tags in the
+ * Record table to find a match. Action field of the selected matched
+ * REC entry is used for further processing. If multiple entries match,
+ * the lowest REC entry, Action field will be selected.
+ */
+ hw_atl2_rpf_act_rslvr_section_en_set(self, 0xFFFF);
+ hw_atl2_rpfl2_uc_flr_tag_set(self, HW_ATL2_RPF_TAG_BASE_UC,
+ HW_ATL2_MAC_UC);
+ hw_atl2_rpfl2_bc_flr_tag_set(self, HW_ATL2_RPF_TAG_BASE_UC);
+
+ /* FW reserves the beginning of ART, thus all driver entries must
+ * start from the offset specified in FW caps.
+ */
+ index = priv->art_base_index + HW_ATL2_RPF_L2_PROMISC_OFF_INDEX;
+ hw_atl2_act_rslvr_table_set(self, index, 0,
+ HW_ATL2_RPF_TAG_UC_MASK |
+ HW_ATL2_RPF_TAG_ALLMC_MASK,
+ HW_ATL2_ACTION_DROP);
+
+ index = priv->art_base_index + HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX;
+ hw_atl2_act_rslvr_table_set(self, index, 0,
+ HW_ATL2_RPF_TAG_VLAN_MASK |
+ HW_ATL2_RPF_TAG_UNTAG_MASK,
+ HW_ATL2_ACTION_DROP);
+
+ /* Configure ART to map given VLan Prio (PCP) to the TC index for
+ * RSS redirection table.
+ */
+ for (i = 0; i < 8; i++) {
+ action = HW_ATL2_ACTION_ASSIGN_TC(prio_tc_map[i]);
+
+ index = priv->art_base_index + HW_ATL2_RPF_PCP_TO_TC_INDEX + i;
+ hw_atl2_act_rslvr_table_set(self, index,
+ i << HW_ATL2_RPF_TAG_PCP_OFFSET,
+ HW_ATL2_RPF_TAG_PCP_MASK, action);
+ }
+}
+
+static void hw_atl2_hw_new_rx_filter_vlan_promisc(struct aq_hw_s *self,
+ bool promisc)
+{
+ u16 off_action = (!promisc &&
+ !hw_atl_rpfl2promiscuous_mode_en_get(self)) ?
+ HW_ATL2_ACTION_DROP : HW_ATL2_ACTION_DISABLE;
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ u8 index;
+
+ index = priv->art_base_index + HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX;
+ hw_atl2_act_rslvr_table_set(self, index, 0,
+ HW_ATL2_RPF_TAG_VLAN_MASK |
+ HW_ATL2_RPF_TAG_UNTAG_MASK, off_action);
+}
+
+static void hw_atl2_hw_new_rx_filter_promisc(struct aq_hw_s *self, bool promisc)
+{
+ u16 off_action = promisc ? HW_ATL2_ACTION_DISABLE : HW_ATL2_ACTION_DROP;
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ bool vlan_promisc_enable;
+ u8 index;
+
+ index = priv->art_base_index + HW_ATL2_RPF_L2_PROMISC_OFF_INDEX;
+ hw_atl2_act_rslvr_table_set(self, index, 0,
+ HW_ATL2_RPF_TAG_UC_MASK |
+ HW_ATL2_RPF_TAG_ALLMC_MASK,
+ off_action);
+
+ /* turn VLAN promisc mode too */
+ vlan_promisc_enable = hw_atl_rpf_vlan_prom_mode_en_get(self);
+ hw_atl2_hw_new_rx_filter_vlan_promisc(self, promisc |
+ vlan_promisc_enable);
+}
+
+static int hw_atl2_act_rslvr_table_set(struct aq_hw_s *self, u8 location,
+ u32 tag, u32 mask, u32 action)
+{
+ u32 val;
+ int err;
+
+ err = readx_poll_timeout_atomic(hw_atl2_sem_act_rslvr_get,
+ self, val, val == 1,
+ 1, 10000U);
+ if (err)
+ return err;
+
+ hw_atl2_rpf_act_rslvr_record_set(self, location, tag, mask,
+ action);
+
+ hw_atl_reg_glb_cpu_sem_set(self, 1, HW_ATL2_FW_SM_ACT_RSLVR);
+
+ return err;
+}
+
+static int hw_atl2_hw_init_rx_path(struct aq_hw_s *self)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ int i;
+
+ /* Rx TC/RSS number config */
+ hw_atl_rpb_rpf_rx_traf_class_mode_set(self, cfg->tc_mode);
+
+ /* Rx flow control */
+ hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
+
+ hw_atl2_rpf_rss_hash_type_set(self, HW_ATL2_RPF_RSS_HASH_TYPE_ALL);
+
+ /* RSS Ring selection */
+ hw_atl_b0_hw_init_rx_rss_ctrl1(self);
+
+ /* Multicast filters */
+ for (i = HW_ATL2_MAC_MAX; i--;) {
+ hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
+ hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
+ }
+
+ hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
+ hw_atl_reg_rx_flr_mcst_flr_set(self, HW_ATL_MCAST_FLT_ANY_TO_HOST, 0U);
+
+ /* Vlan filters */
+ hw_atl_rpf_vlan_outer_etht_set(self, ETH_P_8021AD);
+ hw_atl_rpf_vlan_inner_etht_set(self, ETH_P_8021Q);
+
+ hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
+
+ /* Always accept untagged packets */
+ hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
+ hw_atl_rpf_vlan_untagged_act_set(self, 1U);
+
+ hw_atl2_hw_init_new_rx_filters(self);
+
+ /* Rx Interrupts */
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+
+ hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
+ hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
+
+ hw_atl_rdm_rx_dca_en_set(self, 0U);
+ hw_atl_rdm_rx_dca_mode_set(self, 0U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl2_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
+{
+ static u32 aq_hw_atl2_igcr_table_[4][2] = {
+ [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
+ [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U },
+ [AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
+ [AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
+ };
+
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
+ u8 base_index, count;
+ int err;
+
+ err = hw_atl2_utils_get_action_resolve_table_caps(self, &base_index,
+ &count);
+ if (err)
+ return err;
+
+ priv->art_base_index = 8 * base_index;
+
+ hw_atl2_init_launchtime(self);
+
+ hw_atl2_hw_init_tx_path(self);
+ hw_atl2_hw_init_rx_path(self);
+
+ hw_atl_b0_hw_mac_addr_set(self, mac_addr);
+
+ self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
+ self->aq_fw_ops->set_state(self, MPI_INIT);
+
+ hw_atl2_hw_qos_set(self);
+ hw_atl2_hw_rss_set(self, &aq_nic_cfg->aq_rss);
+ hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
+
+ hw_atl2_rpf_new_enable_set(self, 1);
+
+ /* Reset link status and read out initial hardware counters */
+ self->aq_link_status.mbps = 0;
+ self->aq_fw_ops->update_stats(self);
+
+ err = aq_hw_err_from_flags(self);
+ if (err < 0)
+ goto err_exit;
+
+ /* Interrupts */
+ hw_atl_reg_irq_glb_ctl_set(self,
+ aq_hw_atl2_igcr_table_[aq_nic_cfg->irq_type]
+ [(aq_nic_cfg->vecs > 1U) ?
+ 1 : 0]);
+
+ hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
+
+ /* Interrupts */
+ hw_atl_reg_gen_irq_map_set(self,
+ ((HW_ATL2_ERR_INT << 0x18) |
+ (1U << 0x1F)) |
+ ((HW_ATL2_ERR_INT << 0x10) |
+ (1U << 0x17)), 0U);
+
+ hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl2_hw_ring_rx_init(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param)
+{
+ return hw_atl_b0_hw_ring_rx_init(self, aq_ring, aq_ring_param);
+}
+
+static int hw_atl2_hw_ring_tx_init(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param)
+{
+ return hw_atl_b0_hw_ring_tx_init(self, aq_ring, aq_ring_param);
+}
+
+#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
+
+static int hw_atl2_hw_packet_filter_set(struct aq_hw_s *self,
+ unsigned int packet_filter)
+{
+ hw_atl2_hw_new_rx_filter_promisc(self, IS_FILTER_ENABLED(IFF_PROMISC));
+
+ return hw_atl_b0_hw_packet_filter_set(self, packet_filter);
+}
+
+#undef IS_FILTER_ENABLED
+
+static int hw_atl2_hw_multicast_list_set(struct aq_hw_s *self,
+ u8 ar_mac
+ [AQ_HW_MULTICAST_ADDRESS_MAX]
+ [ETH_ALEN],
+ u32 count)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ int err = 0;
+
+ if (count > (HW_ATL2_MAC_MAX - HW_ATL2_MAC_MIN)) {
+ err = -EBADRQC;
+ goto err_exit;
+ }
+ for (cfg->mc_list_count = 0U;
+ cfg->mc_list_count < count;
+ ++cfg->mc_list_count) {
+ u32 i = cfg->mc_list_count;
+ u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
+ u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
+ (ar_mac[i][4] << 8) | ar_mac[i][5];
+
+ hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL2_MAC_MIN + i);
+
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self, l,
+ HW_ATL2_MAC_MIN + i);
+
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self, h,
+ HW_ATL2_MAC_MIN + i);
+
+ hw_atl2_rpfl2_uc_flr_tag_set(self, 1, HW_ATL2_MAC_MIN + i);
+
+ hw_atl_rpfl2_uc_flr_en_set(self, (cfg->is_mc_list_enabled),
+ HW_ATL2_MAC_MIN + i);
+ }
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl2_hw_interrupt_moderation_set(struct aq_hw_s *self)
+{
+ unsigned int i = 0U;
+ u32 itr_tx = 2U;
+ u32 itr_rx = 2U;
+
+ switch (self->aq_nic_cfg->itr) {
+ case AQ_CFG_INTERRUPT_MODERATION_ON:
+ case AQ_CFG_INTERRUPT_MODERATION_AUTO:
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
+ hw_atl_tdm_tdm_intr_moder_en_set(self, 1U);
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
+ hw_atl_rdm_rdm_intr_moder_en_set(self, 1U);
+
+ if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
+ /* HW timers are in 2us units */
+ int tx_max_timer = self->aq_nic_cfg->tx_itr / 2;
+ int tx_min_timer = tx_max_timer / 2;
+
+ int rx_max_timer = self->aq_nic_cfg->rx_itr / 2;
+ int rx_min_timer = rx_max_timer / 2;
+
+ tx_max_timer = min(HW_ATL2_INTR_MODER_MAX,
+ tx_max_timer);
+ tx_min_timer = min(HW_ATL2_INTR_MODER_MIN,
+ tx_min_timer);
+ rx_max_timer = min(HW_ATL2_INTR_MODER_MAX,
+ rx_max_timer);
+ rx_min_timer = min(HW_ATL2_INTR_MODER_MIN,
+ rx_min_timer);
+
+ itr_tx |= tx_min_timer << 0x8U;
+ itr_tx |= tx_max_timer << 0x10U;
+ itr_rx |= rx_min_timer << 0x8U;
+ itr_rx |= rx_max_timer << 0x10U;
+ } else {
+ static unsigned int hw_atl2_timers_table_tx_[][2] = {
+ {0xfU, 0xffU}, /* 10Gbit */
+ {0xfU, 0x1ffU}, /* 5Gbit */
+ {0xfU, 0x1ffU}, /* 5Gbit 5GS */
+ {0xfU, 0x1ffU}, /* 2.5Gbit */
+ {0xfU, 0x1ffU}, /* 1Gbit */
+ {0xfU, 0x1ffU}, /* 100Mbit */
+ };
+ static unsigned int hw_atl2_timers_table_rx_[][2] = {
+ {0x6U, 0x38U},/* 10Gbit */
+ {0xCU, 0x70U},/* 5Gbit */
+ {0xCU, 0x70U},/* 5Gbit 5GS */
+ {0x18U, 0xE0U},/* 2.5Gbit */
+ {0x30U, 0x80U},/* 1Gbit */
+ {0x4U, 0x50U},/* 100Mbit */
+ };
+ unsigned int mbps = self->aq_link_status.mbps;
+ unsigned int speed_index;
+
+ speed_index = hw_atl_utils_mbps_2_speed_index(mbps);
+
+ /* Update user visible ITR settings */
+ self->aq_nic_cfg->tx_itr = hw_atl2_timers_table_tx_
+ [speed_index][1] * 2;
+ self->aq_nic_cfg->rx_itr = hw_atl2_timers_table_rx_
+ [speed_index][1] * 2;
+
+ itr_tx |= hw_atl2_timers_table_tx_
+ [speed_index][0] << 0x8U;
+ itr_tx |= hw_atl2_timers_table_tx_
+ [speed_index][1] << 0x10U;
+
+ itr_rx |= hw_atl2_timers_table_rx_
+ [speed_index][0] << 0x8U;
+ itr_rx |= hw_atl2_timers_table_rx_
+ [speed_index][1] << 0x10U;
+ }
+ break;
+ case AQ_CFG_INTERRUPT_MODERATION_OFF:
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_tdm_tdm_intr_moder_en_set(self, 0U);
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_rdm_rdm_intr_moder_en_set(self, 0U);
+ itr_tx = 0U;
+ itr_rx = 0U;
+ break;
+ }
+
+ for (i = HW_ATL2_RINGS_MAX; i--;) {
+ hw_atl2_reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
+ hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl2_hw_stop(struct aq_hw_s *self)
+{
+ hw_atl_b0_hw_irq_disable(self, HW_ATL2_INT_MASK);
+
+ return 0;
+}
+
+static struct aq_stats_s *hw_atl2_utils_get_hw_stats(struct aq_hw_s *self)
+{
+ return &self->curr_stats;
+}
+
+static int hw_atl2_hw_vlan_set(struct aq_hw_s *self,
+ struct aq_rx_filter_vlan *aq_vlans)
+{
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ u32 queue;
+ u8 index;
+ int i;
+
+ hw_atl_rpf_vlan_prom_mode_en_set(self, 1U);
+
+ for (i = 0; i < HW_ATL_VLAN_MAX_FILTERS; i++) {
+ queue = HW_ATL2_ACTION_ASSIGN_QUEUE(aq_vlans[i].queue);
+
+ hw_atl_rpf_vlan_flr_en_set(self, 0U, i);
+ hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i);
+ index = priv->art_base_index + HW_ATL2_RPF_VLAN_USER_INDEX + i;
+ hw_atl2_act_rslvr_table_set(self, index, 0, 0,
+ HW_ATL2_ACTION_DISABLE);
+ if (aq_vlans[i].enable) {
+ hw_atl_rpf_vlan_id_flr_set(self,
+ aq_vlans[i].vlan_id, i);
+ hw_atl_rpf_vlan_flr_act_set(self, 1U, i);
+ hw_atl_rpf_vlan_flr_en_set(self, 1U, i);
+
+ if (aq_vlans[i].queue != 0xFF) {
+ hw_atl_rpf_vlan_rxq_flr_set(self,
+ aq_vlans[i].queue,
+ i);
+ hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i);
+
+ hw_atl2_rpf_vlan_flr_tag_set(self, i + 2, i);
+
+ index = priv->art_base_index +
+ HW_ATL2_RPF_VLAN_USER_INDEX + i;
+ hw_atl2_act_rslvr_table_set(self, index,
+ (i + 2) << HW_ATL2_RPF_TAG_VLAN_OFFSET,
+ HW_ATL2_RPF_TAG_VLAN_MASK, queue);
+ } else {
+ hw_atl2_rpf_vlan_flr_tag_set(self, 1, i);
+ }
+ }
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl2_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
+{
+ /* set promisc in case of disabing the vlan filter */
+ hw_atl_rpf_vlan_prom_mode_en_set(self, !enable);
+ hw_atl2_hw_new_rx_filter_vlan_promisc(self, !enable);
+
+ return aq_hw_err_from_flags(self);
+}
+
+const struct aq_hw_ops hw_atl2_ops = {
+ .hw_soft_reset = hw_atl2_utils_soft_reset,
+ .hw_prepare = hw_atl2_utils_initfw,
+ .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
+ .hw_init = hw_atl2_hw_init,
+ .hw_reset = hw_atl2_hw_reset,
+ .hw_start = hw_atl_b0_hw_start,
+ .hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start,
+ .hw_ring_tx_stop = hw_atl_b0_hw_ring_tx_stop,
+ .hw_ring_rx_start = hw_atl_b0_hw_ring_rx_start,
+ .hw_ring_rx_stop = hw_atl_b0_hw_ring_rx_stop,
+ .hw_stop = hw_atl2_hw_stop,
+
+ .hw_ring_tx_xmit = hw_atl_b0_hw_ring_tx_xmit,
+ .hw_ring_tx_head_update = hw_atl_b0_hw_ring_tx_head_update,
+
+ .hw_ring_rx_receive = hw_atl_b0_hw_ring_rx_receive,
+ .hw_ring_rx_fill = hw_atl_b0_hw_ring_rx_fill,
+
+ .hw_irq_enable = hw_atl_b0_hw_irq_enable,
+ .hw_irq_disable = hw_atl_b0_hw_irq_disable,
+ .hw_irq_read = hw_atl_b0_hw_irq_read,
+
+ .hw_ring_rx_init = hw_atl2_hw_ring_rx_init,
+ .hw_ring_tx_init = hw_atl2_hw_ring_tx_init,
+ .hw_packet_filter_set = hw_atl2_hw_packet_filter_set,
+ .hw_filter_vlan_set = hw_atl2_hw_vlan_set,
+ .hw_filter_vlan_ctrl = hw_atl2_hw_vlan_ctrl,
+ .hw_multicast_list_set = hw_atl2_hw_multicast_list_set,
+ .hw_interrupt_moderation_set = hw_atl2_hw_interrupt_moderation_set,
+ .hw_rss_set = hw_atl2_hw_rss_set,
+ .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set,
+ .hw_tc_rate_limit_set = hw_atl2_hw_init_tx_tc_rate_limit,
+ .hw_get_hw_stats = hw_atl2_utils_get_hw_stats,
+ .hw_get_fw_version = hw_atl2_utils_get_fw_version,
+ .hw_set_offload = hw_atl_b0_hw_offload_set,
+ .hw_set_loopback = hw_atl_b0_set_loopback,
+ .hw_set_fc = hw_atl_b0_set_fc,
+};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h
new file mode 100644
index 000000000..346f0dc99
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef HW_ATL2_H
+#define HW_ATL2_H
+
+#include "aq_common.h"
+
+extern const struct aq_hw_caps_s hw_atl2_caps_aqc113;
+extern const struct aq_hw_caps_s hw_atl2_caps_aqc115c;
+extern const struct aq_hw_caps_s hw_atl2_caps_aqc116c;
+extern const struct aq_hw_ops hw_atl2_ops;
+
+#endif /* HW_ATL2_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_internal.h
new file mode 100644
index 000000000..5a89bb872
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_internal.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef HW_ATL2_INTERNAL_H
+#define HW_ATL2_INTERNAL_H
+
+#include "hw_atl2_utils.h"
+
+#define HW_ATL2_MTU_JUMBO 16352U
+#define HW_ATL2_MTU 1514U
+
+#define HW_ATL2_TX_RINGS 4U
+#define HW_ATL2_RX_RINGS 4U
+
+#define HW_ATL2_RINGS_MAX 32U
+#define HW_ATL2_TXD_SIZE (16U)
+#define HW_ATL2_RXD_SIZE (16U)
+
+#define HW_ATL2_MAC_UC 0U
+#define HW_ATL2_MAC_MIN 1U
+#define HW_ATL2_MAC_MAX 38U
+
+/* interrupts */
+#define HW_ATL2_ERR_INT 8U
+#define HW_ATL2_INT_MASK (0xFFFFFFFFU)
+
+#define HW_ATL2_TXBUF_MAX 128U
+#define HW_ATL2_RXBUF_MAX 192U
+
+#define HW_ATL2_RSS_REDIRECTION_MAX 64U
+
+#define HW_ATL2_TC_MAX 8U
+#define HW_ATL2_RSS_MAX 8U
+
+#define HW_ATL2_INTR_MODER_MAX 0x1FF
+#define HW_ATL2_INTR_MODER_MIN 0xFF
+
+#define HW_ATL2_MIN_RXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE))
+#define HW_ATL2_MIN_TXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE))
+
+#define HW_ATL2_MAX_RXD 8184U
+#define HW_ATL2_MAX_TXD 8184U
+
+#define HW_ATL2_FW_SM_ACT_RSLVR 0x3U
+
+#define HW_ATL2_RPF_TAG_UC_OFFSET 0x0
+#define HW_ATL2_RPF_TAG_ALLMC_OFFSET 0x6
+#define HW_ATL2_RPF_TAG_ET_OFFSET 0x7
+#define HW_ATL2_RPF_TAG_VLAN_OFFSET 0xA
+#define HW_ATL2_RPF_TAG_UNTAG_OFFSET 0xE
+#define HW_ATL2_RPF_TAG_L3_V4_OFFSET 0xF
+#define HW_ATL2_RPF_TAG_L3_V6_OFFSET 0x12
+#define HW_ATL2_RPF_TAG_L4_OFFSET 0x15
+#define HW_ATL2_RPF_TAG_L4_FLEX_OFFSET 0x18
+#define HW_ATL2_RPF_TAG_FLEX_OFFSET 0x1B
+#define HW_ATL2_RPF_TAG_PCP_OFFSET 0x1D
+
+#define HW_ATL2_RPF_TAG_UC_MASK (0x0000003F << HW_ATL2_RPF_TAG_UC_OFFSET)
+#define HW_ATL2_RPF_TAG_ALLMC_MASK (0x00000001 << HW_ATL2_RPF_TAG_ALLMC_OFFSET)
+#define HW_ATL2_RPF_TAG_UNTAG_MASK (0x00000001 << HW_ATL2_RPF_TAG_UNTAG_OFFSET)
+#define HW_ATL2_RPF_TAG_VLAN_MASK (0x0000000F << HW_ATL2_RPF_TAG_VLAN_OFFSET)
+#define HW_ATL2_RPF_TAG_ET_MASK (0x00000007 << HW_ATL2_RPF_TAG_ET_OFFSET)
+#define HW_ATL2_RPF_TAG_L3_V4_MASK (0x00000007 << HW_ATL2_RPF_TAG_L3_V4_OFFSET)
+#define HW_ATL2_RPF_TAG_L3_V6_MASK (0x00000007 << HW_ATL2_RPF_TAG_L3_V6_OFFSET)
+#define HW_ATL2_RPF_TAG_L4_MASK (0x00000007 << HW_ATL2_RPF_TAG_L4_OFFSET)
+#define HW_ATL2_RPF_TAG_PCP_MASK (0x00000007 << HW_ATL2_RPF_TAG_PCP_OFFSET)
+
+#define HW_ATL2_RPF_TAG_BASE_UC BIT(HW_ATL2_RPF_TAG_UC_OFFSET)
+#define HW_ATL2_RPF_TAG_BASE_ALLMC BIT(HW_ATL2_RPF_TAG_ALLMC_OFFSET)
+#define HW_ATL2_RPF_TAG_BASE_UNTAG BIT(HW_ATL2_RPF_TAG_UNTAG_OFFSET)
+#define HW_ATL2_RPF_TAG_BASE_VLAN BIT(HW_ATL2_RPF_TAG_VLAN_OFFSET)
+
+enum HW_ATL2_RPF_ART_INDEX {
+ HW_ATL2_RPF_L2_PROMISC_OFF_INDEX,
+ HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX,
+ HW_ATL2_RPF_L3L4_USER_INDEX = 8,
+ HW_ATL2_RPF_ET_PCP_USER_INDEX = HW_ATL2_RPF_L3L4_USER_INDEX + 16,
+ HW_ATL2_RPF_VLAN_USER_INDEX = HW_ATL2_RPF_ET_PCP_USER_INDEX + 16,
+ HW_ATL2_RPF_PCP_TO_TC_INDEX = HW_ATL2_RPF_VLAN_USER_INDEX +
+ HW_ATL_VLAN_MAX_FILTERS,
+};
+
+#define HW_ATL2_ACTION(ACTION, RSS, INDEX, VALID) \
+ ((((ACTION) & 0x3U) << 8) | \
+ (((RSS) & 0x1U) << 7) | \
+ (((INDEX) & 0x3FU) << 2) | \
+ (((VALID) & 0x1U) << 0))
+
+#define HW_ATL2_ACTION_DROP HW_ATL2_ACTION(0, 0, 0, 1)
+#define HW_ATL2_ACTION_DISABLE HW_ATL2_ACTION(0, 0, 0, 0)
+#define HW_ATL2_ACTION_ASSIGN_QUEUE(QUEUE) HW_ATL2_ACTION(1, 0, (QUEUE), 1)
+#define HW_ATL2_ACTION_ASSIGN_TC(TC) HW_ATL2_ACTION(1, 1, (TC), 1)
+
+enum HW_ATL2_RPF_RSS_HASH_TYPE {
+ HW_ATL2_RPF_RSS_HASH_TYPE_NONE = 0,
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV4 = BIT(0),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV4_TCP = BIT(1),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV4_UDP = BIT(2),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6 = BIT(3),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_TCP = BIT(4),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_UDP = BIT(5),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX = BIT(6),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX_TCP = BIT(7),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX_UDP = BIT(8),
+ HW_ATL2_RPF_RSS_HASH_TYPE_ALL = HW_ATL2_RPF_RSS_HASH_TYPE_IPV4 |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV4_TCP |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV4_UDP |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6 |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_TCP |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_UDP |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX_TCP |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX_UDP,
+};
+
+#define HW_ATL_MCAST_FLT_ANY_TO_HOST 0x00010FFFU
+
+struct hw_atl2_priv {
+ struct statistics_s last_stats;
+ unsigned int art_base_index;
+};
+
+#endif /* HW_ATL2_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.c
new file mode 100644
index 000000000..cd954b11d
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include "hw_atl2_llh.h"
+#include "hw_atl2_llh_internal.h"
+#include "aq_hw_utils.h"
+
+void hw_atl2_rpf_redirection_table2_select_set(struct aq_hw_s *aq_hw,
+ u32 select)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_ADR,
+ HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_MSK,
+ HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_SHIFT, select);
+}
+
+void hw_atl2_rpf_rss_hash_type_set(struct aq_hw_s *aq_hw, u32 rss_hash_type)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_ADR,
+ HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_MSK,
+ HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_SHIFT,
+ rss_hash_type);
+}
+
+/* rpf */
+
+void hw_atl2_rpf_new_enable_set(struct aq_hw_s *aq_hw, u32 enable)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_NEW_EN_ADR,
+ HW_ATL2_RPF_NEW_EN_MSK,
+ HW_ATL2_RPF_NEW_EN_SHIFT,
+ enable);
+}
+
+void hw_atl2_rpfl2_uc_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPFL2UC_TAG_ADR(filter),
+ HW_ATL2_RPFL2UC_TAG_MSK,
+ HW_ATL2_RPFL2UC_TAG_SHIFT,
+ tag);
+}
+
+void hw_atl2_rpfl2_bc_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_L2_BC_TAG_ADR,
+ HW_ATL2_RPF_L2_BC_TAG_MSK,
+ HW_ATL2_RPF_L2_BC_TAG_SHIFT,
+ tag);
+}
+
+void hw_atl2_new_rpf_rss_redir_set(struct aq_hw_s *aq_hw, u32 tc, u32 index,
+ u32 queue)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_RSS_REDIR_ADR(tc, index),
+ HW_ATL2_RPF_RSS_REDIR_MSK(tc),
+ HW_ATL2_RPF_RSS_REDIR_SHIFT(tc),
+ queue);
+}
+
+void hw_atl2_rpf_vlan_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_VL_TAG_ADR(filter),
+ HW_ATL2_RPF_VL_TAG_MSK,
+ HW_ATL2_RPF_VL_TAG_SHIFT,
+ tag);
+}
+
+/* TX */
+
+void hw_atl2_tpb_tx_tc_q_rand_map_en_set(struct aq_hw_s *aq_hw,
+ const u32 tc_q_rand_map_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_ADR,
+ HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_MSK,
+ HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_SHIFT,
+ tc_q_rand_map_en);
+}
+
+void hw_atl2_tpb_tx_buf_clk_gate_en_set(struct aq_hw_s *aq_hw, u32 clk_gate_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_ADR,
+ HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_MSK,
+ HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_SHIFT,
+ clk_gate_en);
+}
+
+void hw_atl2_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 tx_intr_moderation_ctl,
+ u32 queue)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL2_TX_INTR_MODERATION_CTL_ADR(queue),
+ tx_intr_moderation_ctl);
+}
+
+void hw_atl2_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+ const u32 data_arb_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPS_DATA_TC_ARB_MODE_ADR,
+ HW_ATL2_TPS_DATA_TC_ARB_MODE_MSK,
+ HW_ATL2_TPS_DATA_TC_ARB_MODE_SHIFT,
+ data_arb_mode);
+}
+
+void hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+ const u32 tc,
+ const u32 max_credit)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPS_DATA_TCTCREDIT_MAX_ADR(tc),
+ HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSK,
+ HW_ATL2_TPS_DATA_TCTCREDIT_MAX_SHIFT,
+ max_credit);
+}
+
+void hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+ const u32 tc,
+ const u32 weight)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPS_DATA_TCTWEIGHT_ADR(tc),
+ HW_ATL2_TPS_DATA_TCTWEIGHT_MSK,
+ HW_ATL2_TPS_DATA_TCTWEIGHT_SHIFT,
+ weight);
+}
+
+u32 hw_atl2_get_hw_version(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL2_FPGA_VER_ADR);
+}
+
+void hw_atl2_init_launchtime(struct aq_hw_s *aq_hw)
+{
+ u32 hw_ver = hw_atl2_get_hw_version(aq_hw);
+
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_LT_CTRL_ADR,
+ HW_ATL2_LT_CTRL_CLK_RATIO_MSK,
+ HW_ATL2_LT_CTRL_CLK_RATIO_SHIFT,
+ hw_ver < HW_ATL2_FPGA_VER_U32(1, 0, 0, 0) ?
+ HW_ATL2_LT_CTRL_CLK_RATIO_FULL_SPEED :
+ hw_ver >= HW_ATL2_FPGA_VER_U32(1, 0, 85, 2) ?
+ HW_ATL2_LT_CTRL_CLK_RATIO_HALF_SPEED :
+ HW_ATL2_LT_CTRL_CLK_RATIO_QUATER_SPEED);
+}
+
+/* set action resolver record */
+void hw_atl2_rpf_act_rslvr_record_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 tag, u32 mask, u32 action)
+{
+ aq_hw_write_reg(aq_hw,
+ HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_ADR(location),
+ tag);
+ aq_hw_write_reg(aq_hw,
+ HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_ADR(location),
+ mask);
+ aq_hw_write_reg(aq_hw,
+ HW_ATL2_RPF_ACT_RSLVR_ACTN_ADR(location),
+ action);
+}
+
+void hw_atl2_rpf_act_rslvr_section_en_set(struct aq_hw_s *aq_hw, u32 sections)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_REC_TAB_EN_ADR,
+ HW_ATL2_RPF_REC_TAB_EN_MSK,
+ HW_ATL2_RPF_REC_TAB_EN_SHIFT,
+ sections);
+}
+
+void hw_atl2_mif_shared_buf_get(struct aq_hw_s *aq_hw, int offset, u32 *data,
+ int len)
+{
+ int j = 0;
+ int i;
+
+ for (i = offset; i < offset + len; i++, j++)
+ data[j] = aq_hw_read_reg(aq_hw,
+ HW_ATL2_MIF_SHARED_BUFFER_IN_ADR(i));
+}
+
+void hw_atl2_mif_shared_buf_write(struct aq_hw_s *aq_hw, int offset, u32 *data,
+ int len)
+{
+ int j = 0;
+ int i;
+
+ for (i = offset; i < offset + len; i++, j++)
+ aq_hw_write_reg(aq_hw, HW_ATL2_MIF_SHARED_BUFFER_IN_ADR(i),
+ data[j]);
+}
+
+void hw_atl2_mif_shared_buf_read(struct aq_hw_s *aq_hw, int offset, u32 *data,
+ int len)
+{
+ int j = 0;
+ int i;
+
+ for (i = offset; i < offset + len; i++, j++)
+ data[j] = aq_hw_read_reg(aq_hw,
+ HW_ATL2_MIF_SHARED_BUFFER_OUT_ADR(i));
+}
+
+void hw_atl2_mif_host_finished_write_set(struct aq_hw_s *aq_hw, u32 finish)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_MIF_HOST_FINISHED_WRITE_ADR,
+ HW_ATL2_MIF_HOST_FINISHED_WRITE_MSK,
+ HW_ATL2_MIF_HOST_FINISHED_WRITE_SHIFT,
+ finish);
+}
+
+u32 hw_atl2_mif_mcp_finished_read_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL2_MIF_MCP_FINISHED_READ_ADR,
+ HW_ATL2_MIF_MCP_FINISHED_READ_MSK,
+ HW_ATL2_MIF_MCP_FINISHED_READ_SHIFT);
+}
+
+u32 hw_atl2_mif_mcp_boot_reg_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL2_MIF_BOOT_REG_ADR);
+}
+
+void hw_atl2_mif_mcp_boot_reg_set(struct aq_hw_s *aq_hw, u32 val)
+{
+ return aq_hw_write_reg(aq_hw, HW_ATL2_MIF_BOOT_REG_ADR, val);
+}
+
+u32 hw_atl2_mif_host_req_int_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL2_MCP_HOST_REQ_INT_ADR);
+}
+
+void hw_atl2_mif_host_req_int_clr(struct aq_hw_s *aq_hw, u32 val)
+{
+ return aq_hw_write_reg(aq_hw, HW_ATL2_MCP_HOST_REQ_INT_CLR_ADR,
+ val);
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.h
new file mode 100644
index 000000000..98c7a4621
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef HW_ATL2_LLH_H
+#define HW_ATL2_LLH_H
+
+#include <linux/types.h>
+
+struct aq_hw_s;
+
+/* Set TX Interrupt Moderation Control Register */
+void hw_atl2_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 tx_intr_moderation_ctl,
+ u32 queue);
+
+/* Set Redirection Table 2 Select */
+void hw_atl2_rpf_redirection_table2_select_set(struct aq_hw_s *aq_hw,
+ u32 select);
+
+/** Set RSS HASH type */
+void hw_atl2_rpf_rss_hash_type_set(struct aq_hw_s *aq_hw, u32 rss_hash_type);
+
+/* set new RPF enable */
+void hw_atl2_rpf_new_enable_set(struct aq_hw_s *aq_hw, u32 enable);
+
+/* set l2 unicast filter tag */
+void hw_atl2_rpfl2_uc_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter);
+
+/* set l2 broadcast filter tag */
+void hw_atl2_rpfl2_bc_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag);
+
+/* set new rss redirection table */
+void hw_atl2_new_rpf_rss_redir_set(struct aq_hw_s *aq_hw, u32 tc, u32 index,
+ u32 queue);
+
+/* Set VLAN filter tag */
+void hw_atl2_rpf_vlan_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter);
+
+/* set tx random TC-queue mapping enable bit */
+void hw_atl2_tpb_tx_tc_q_rand_map_en_set(struct aq_hw_s *aq_hw,
+ const u32 tc_q_rand_map_en);
+
+/* set tx buffer clock gate enable */
+void hw_atl2_tpb_tx_buf_clk_gate_en_set(struct aq_hw_s *aq_hw, u32 clk_gate_en);
+
+void hw_atl2_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+ const u32 data_arb_mode);
+
+/* set tx packet scheduler tc data max credit */
+void hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+ const u32 tc,
+ const u32 max_credit);
+
+/* set tx packet scheduler tc data weight */
+void hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+ const u32 tc,
+ const u32 weight);
+
+u32 hw_atl2_get_hw_version(struct aq_hw_s *aq_hw);
+
+void hw_atl2_init_launchtime(struct aq_hw_s *aq_hw);
+
+/* set action resolver record */
+void hw_atl2_rpf_act_rslvr_record_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 tag, u32 mask, u32 action);
+
+/* set enable action resolver section */
+void hw_atl2_rpf_act_rslvr_section_en_set(struct aq_hw_s *aq_hw, u32 sections);
+
+/* get data from firmware shared input buffer */
+void hw_atl2_mif_shared_buf_get(struct aq_hw_s *aq_hw, int offset, u32 *data,
+ int len);
+
+/* set data into firmware shared input buffer */
+void hw_atl2_mif_shared_buf_write(struct aq_hw_s *aq_hw, int offset, u32 *data,
+ int len);
+
+/* get data from firmware shared output buffer */
+void hw_atl2_mif_shared_buf_read(struct aq_hw_s *aq_hw, int offset, u32 *data,
+ int len);
+
+/* set host finished write shared buffer indication */
+void hw_atl2_mif_host_finished_write_set(struct aq_hw_s *aq_hw, u32 finish);
+
+/* get mcp finished read shared buffer indication */
+u32 hw_atl2_mif_mcp_finished_read_get(struct aq_hw_s *aq_hw);
+
+/* get mcp boot register */
+u32 hw_atl2_mif_mcp_boot_reg_get(struct aq_hw_s *aq_hw);
+
+/* set mcp boot register */
+void hw_atl2_mif_mcp_boot_reg_set(struct aq_hw_s *aq_hw, u32 val);
+
+/* get host interrupt request */
+u32 hw_atl2_mif_host_req_int_get(struct aq_hw_s *aq_hw);
+
+/* clear host interrupt request */
+void hw_atl2_mif_host_req_int_clr(struct aq_hw_s *aq_hw, u32 val);
+
+#endif /* HW_ATL2_LLH_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh_internal.h
new file mode 100644
index 000000000..e34c5cda0
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh_internal.h
@@ -0,0 +1,391 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef HW_ATL2_LLH_INTERNAL_H
+#define HW_ATL2_LLH_INTERNAL_H
+
+/* RX pif_rpf_redir_2_en_i Bitfield Definitions
+ * PORT="pif_rpf_redir_2_en_i"
+ */
+#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_ADR 0x000054C8
+#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_MSK 0x00001000
+#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_MSKN 0xFFFFEFFF
+#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_SHIFT 12
+#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_WIDTH 1
+#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_DEFAULT 0x0
+
+/* RX pif_rpf_rss_hash_type_i Bitfield Definitions
+ */
+#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_ADR 0x000054C8
+#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_MSK 0x000001FF
+#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_MSKN 0xFFFFFE00
+#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_SHIFT 0
+#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_WIDTH 9
+
+/* rx rpf_new_rpf_en bitfield definitions
+ * preprocessor definitions for the bitfield "rpf_new_rpf_en_i".
+ * port="pif_rpf_new_rpf_en_i
+ */
+
+/* register address for bitfield rpf_new_rpf_en */
+#define HW_ATL2_RPF_NEW_EN_ADR 0x00005104
+/* bitmask for bitfield rpf_new_rpf_en */
+#define HW_ATL2_RPF_NEW_EN_MSK 0x00000800
+/* inverted bitmask for bitfield rpf_new_rpf_en */
+#define HW_ATL2_RPF_NEW_EN_MSKN 0xfffff7ff
+/* lower bit position of bitfield rpf_new_rpf_en */
+#define HW_ATL2_RPF_NEW_EN_SHIFT 11
+/* width of bitfield rpf_new_rpf_en */
+#define HW_ATL2_RPF_NEW_EN_WIDTH 1
+/* default value of bitfield rpf_new_rpf_en */
+#define HW_ATL2_RPF_NEW_EN_DEFAULT 0x0
+
+/* rx l2_uc_req_tag0{f}[5:0] bitfield definitions
+ * preprocessor definitions for the bitfield "l2_uc_req_tag0{f}[7:0]".
+ * parameter: filter {f} | stride size 0x8 | range [0, 37]
+ * port="pif_rpf_l2_uc_req_tag0[5:0]"
+ */
+
+/* register address for bitfield l2_uc_req_tag0{f}[2:0] */
+#define HW_ATL2_RPFL2UC_TAG_ADR(filter) (0x00005114 + (filter) * 0x8)
+/* bitmask for bitfield l2_uc_req_tag0{f}[2:0] */
+#define HW_ATL2_RPFL2UC_TAG_MSK 0x0FC00000
+/* inverted bitmask for bitfield l2_uc_req_tag0{f}[2:0] */
+#define HW_ATL2_RPFL2UC_TAG_MSKN 0xF03FFFFF
+/* lower bit position of bitfield l2_uc_req_tag0{f}[2:0] */
+#define HW_ATL2_RPFL2UC_TAG_SHIFT 22
+/* width of bitfield l2_uc_req_tag0{f}[2:0] */
+#define HW_ATL2_RPFL2UC_TAG_WIDTH 6
+/* default value of bitfield l2_uc_req_tag0{f}[2:0] */
+#define HW_ATL2_RPFL2UC_TAG_DEFAULT 0x0
+
+/* rpf_l2_bc_req_tag[5:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rpf_l2_bc_req_tag[5:0]".
+ * port="pifrpf_l2_bc_req_tag_i[5:0]"
+ */
+
+/* register address for bitfield rpf_l2_bc_req_tag */
+#define HW_ATL2_RPF_L2_BC_TAG_ADR 0x000050F0
+/* bitmask for bitfield rpf_l2_bc_req_tag */
+#define HW_ATL2_RPF_L2_BC_TAG_MSK 0x0000003F
+/* inverted bitmask for bitfield rpf_l2_bc_req_tag */
+#define HW_ATL2_RPF_L2_BC_TAG_MSKN 0xffffffc0
+/* lower bit position of bitfield rpf_l2_bc_req_tag */
+#define HW_ATL2_RPF_L2_BC_TAG_SHIFT 0
+/* width of bitfield rpf_l2_bc_req_tag */
+#define HW_ATL2_RPF_L2_BC_TAG_WIDTH 6
+/* default value of bitfield rpf_l2_bc_req_tag */
+#define HW_ATL2_RPF_L2_BC_TAG_DEFAULT 0x0
+
+/* rx rpf_rss_red1_data_[4:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rpf_rss_red1_data[4:0]".
+ * port="pif_rpf_rss_red1_data_i[4:0]"
+ */
+
+/* register address for bitfield rpf_rss_red1_data[4:0] */
+#define HW_ATL2_RPF_RSS_REDIR_ADR(TC, INDEX) (0x00006200 + \
+ (0x100 * !!((TC) > 3)) + (INDEX) * 4)
+/* bitmask for bitfield rpf_rss_red1_data[4:0] */
+#define HW_ATL2_RPF_RSS_REDIR_MSK(TC) (0x00000001F << (5 * ((TC) % 4)))
+/* lower bit position of bitfield rpf_rss_red1_data[4:0] */
+#define HW_ATL2_RPF_RSS_REDIR_SHIFT(TC) (5 * ((TC) % 4))
+/* width of bitfield rpf_rss_red1_data[4:0] */
+#define HW_ATL2_RPF_RSS_REDIR_WIDTH 5
+/* default value of bitfield rpf_rss_red1_data[4:0] */
+#define HW_ATL2_RPF_RSS_REDIR_DEFAULT 0x0
+
+/* rx vlan_req_tag0{f}[3:0] bitfield definitions
+ * preprocessor definitions for the bitfield "vlan_req_tag0{f}[3:0]".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_vlan_req_tag0[3:0]"
+ */
+
+/* register address for bitfield vlan_req_tag0{f}[3:0] */
+#define HW_ATL2_RPF_VL_TAG_ADR(filter) (0x00005290 + (filter) * 0x4)
+/* bitmask for bitfield vlan_req_tag0{f}[3:0] */
+#define HW_ATL2_RPF_VL_TAG_MSK 0x0000F000
+/* inverted bitmask for bitfield vlan_req_tag0{f}[3:0] */
+#define HW_ATL2_RPF_VL_TAG_MSKN 0xFFFF0FFF
+/* lower bit position of bitfield vlan_req_tag0{f}[3:0] */
+#define HW_ATL2_RPF_VL_TAG_SHIFT 12
+/* width of bitfield vlan_req_tag0{f}[3:0] */
+#define HW_ATL2_RPF_VL_TAG_WIDTH 4
+/* default value of bitfield vlan_req_tag0{f}[3:0] */
+#define HW_ATL2_RPF_VL_TAG_DEFAULT 0x0
+
+/* RX rx_q{Q}_tc_map[2:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "rx_q{Q}_tc_map[2:0]".
+ * Parameter: Queue {Q} | bit-level stride | range [0, 31]
+ * PORT="pif_rx_q0_tc_map_i[2:0]"
+ */
+
+/* Register address for bitfield rx_q{Q}_tc_map[2:0] */
+#define HW_ATL2_RX_Q_TC_MAP_ADR(queue) \
+ (((queue) < 32) ? 0x00005900 + ((queue) / 8) * 4 : 0)
+/* Lower bit position of bitfield rx_q{Q}_tc_map[2:0] */
+#define HW_ATL2_RX_Q_TC_MAP_SHIFT(queue) \
+ (((queue) < 32) ? ((queue) * 4) % 32 : 0)
+/* Width of bitfield rx_q{Q}_tc_map[2:0] */
+#define HW_ATL2_RX_Q_TC_MAP_WIDTH 3
+/* Default value of bitfield rx_q{Q}_tc_map[2:0] */
+#define HW_ATL2_RX_Q_TC_MAP_DEFAULT 0x0
+
+/* tx tx_tc_q_rand_map_en bitfield definitions
+ * preprocessor definitions for the bitfield "tx_tc_q_rand_map_en".
+ * port="pif_tpb_tx_tc_q_rand_map_en_i"
+ */
+
+/* register address for bitfield tx_tc_q_rand_map_en */
+#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_ADR 0x00007900
+/* bitmask for bitfield tx_tc_q_rand_map_en */
+#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_MSK 0x00000200
+/* inverted bitmask for bitfield tx_tc_q_rand_map_en */
+#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_MSKN 0xFFFFFDFF
+/* lower bit position of bitfield tx_tc_q_rand_map_en */
+#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_SHIFT 9
+/* width of bitfield tx_tc_q_rand_map_en */
+#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_WIDTH 1
+/* default value of bitfield tx_tc_q_rand_map_en */
+#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_DEFAULT 0x0
+
+/* tx tx_buffer_clk_gate_en bitfield definitions
+ * preprocessor definitions for the bitfield "tx_buffer_clk_gate_en".
+ * port="pif_tpb_tx_buffer_clk_gate_en_i"
+ */
+
+/* register address for bitfield tx_buffer_clk_gate_en */
+#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_ADR 0x00007900
+/* bitmask for bitfield tx_buffer_clk_gate_en */
+#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_MSK 0x00000020
+/* inverted bitmask for bitfield tx_buffer_clk_gate_en */
+#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_MSKN 0xffffffdf
+/* lower bit position of bitfield tx_buffer_clk_gate_en */
+#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_SHIFT 5
+/* width of bitfield tx_buffer_clk_gate_en */
+#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_WIDTH 1
+/* default value of bitfield tx_buffer_clk_gate_en */
+#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_DEFAULT 0x0
+
+/* tx tx_q_tc_map{q} bitfield definitions
+ * preprocessor definitions for the bitfield "tx_q_tc_map{q}".
+ * parameter: queue {q} | bit-level stride | range [0, 31]
+ * port="pif_tpb_tx_q_tc_map0_i[2:0]"
+ */
+
+/* register address for bitfield tx_q_tc_map{q} */
+#define HW_ATL2_TX_Q_TC_MAP_ADR(queue) \
+ (((queue) < 32) ? 0x0000799C + ((queue) / 4) * 4 : 0)
+/* lower bit position of bitfield tx_q_tc_map{q} */
+#define HW_ATL2_TX_Q_TC_MAP_SHIFT(queue) \
+ (((queue) < 32) ? ((queue) * 8) % 32 : 0)
+/* width of bitfield tx_q_tc_map{q} */
+#define HW_ATL2_TX_Q_TC_MAP_WIDTH 3
+/* default value of bitfield tx_q_tc_map{q} */
+#define HW_ATL2_TX_Q_TC_MAP_DEFAULT 0x0
+
+/* tx data_tc_arb_mode bitfield definitions
+ * preprocessor definitions for the bitfield "data_tc_arb_mode".
+ * port="pif_tps_data_tc_arb_mode_i"
+ */
+
+/* register address for bitfield data_tc_arb_mode */
+#define HW_ATL2_TPS_DATA_TC_ARB_MODE_ADR 0x00007100
+/* bitmask for bitfield data_tc_arb_mode */
+#define HW_ATL2_TPS_DATA_TC_ARB_MODE_MSK 0x00000003
+/* inverted bitmask for bitfield data_tc_arb_mode */
+#define HW_ATL2_TPS_DATA_TC_ARB_MODE_MSKN 0xfffffffc
+/* lower bit position of bitfield data_tc_arb_mode */
+#define HW_ATL2_TPS_DATA_TC_ARB_MODE_SHIFT 0
+/* width of bitfield data_tc_arb_mode */
+#define HW_ATL2_TPS_DATA_TC_ARB_MODE_WIDTH 2
+/* default value of bitfield data_tc_arb_mode */
+#define HW_ATL2_TPS_DATA_TC_ARB_MODE_DEFAULT 0x0
+
+/* tx data_tc{t}_credit_max[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "data_tc{t}_credit_max[f:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_data_tc0_credit_max_i[15:0]"
+ */
+
+/* register address for bitfield data_tc{t}_credit_max[f:0] */
+#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_ADR(tc) (0x00007110 + (tc) * 0x4)
+/* bitmask for bitfield data_tc{t}_credit_max[f:0] */
+#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSK 0xffff0000
+/* inverted bitmask for bitfield data_tc{t}_credit_max[f:0] */
+#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSKN 0x0000ffff
+/* lower bit position of bitfield data_tc{t}_credit_max[f:0] */
+#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_SHIFT 16
+/* width of bitfield data_tc{t}_credit_max[f:0] */
+#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_WIDTH 16
+/* default value of bitfield data_tc{t}_credit_max[f:0] */
+#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_DEFAULT 0x0
+
+/* tx data_tc{t}_weight[e:0] bitfield definitions
+ * preprocessor definitions for the bitfield "data_tc{t}_weight[e:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_data_tc0_weight_i[14:0]"
+ */
+
+/* register address for bitfield data_tc{t}_weight[e:0] */
+#define HW_ATL2_TPS_DATA_TCTWEIGHT_ADR(tc) (0x00007110 + (tc) * 0x4)
+/* bitmask for bitfield data_tc{t}_weight[e:0] */
+#define HW_ATL2_TPS_DATA_TCTWEIGHT_MSK 0x00007fff
+/* inverted bitmask for bitfield data_tc{t}_weight[e:0] */
+#define HW_ATL2_TPS_DATA_TCTWEIGHT_MSKN 0xffff8000
+/* lower bit position of bitfield data_tc{t}_weight[e:0] */
+#define HW_ATL2_TPS_DATA_TCTWEIGHT_SHIFT 0
+/* width of bitfield data_tc{t}_weight[e:0] */
+#define HW_ATL2_TPS_DATA_TCTWEIGHT_WIDTH 15
+/* default value of bitfield data_tc{t}_weight[e:0] */
+#define HW_ATL2_TPS_DATA_TCTWEIGHT_DEFAULT 0x0
+
+/* tx interrupt moderation control register definitions
+ * Preprocessor definitions for TX Interrupt Moderation Control Register
+ * Base Address: 0x00007c28
+ * Parameter: queue {Q} | stride size 0x4 | range [0, 31]
+ */
+
+#define HW_ATL2_TX_INTR_MODERATION_CTL_ADR(queue) (0x00007c28u + (queue) * 0x40)
+
+/* Launch time control register */
+#define HW_ATL2_LT_CTRL_ADR 0x00007a1c
+
+#define HW_ATL2_LT_CTRL_AVB_LEN_CMP_TRSHLD_MSK 0xFFFF0000
+#define HW_ATL2_LT_CTRL_AVB_LEN_CMP_TRSHLD_SHIFT 16
+
+#define HW_ATL2_LT_CTRL_CLK_RATIO_MSK 0x0000FF00
+#define HW_ATL2_LT_CTRL_CLK_RATIO_SHIFT 8
+#define HW_ATL2_LT_CTRL_CLK_RATIO_QUATER_SPEED 4
+#define HW_ATL2_LT_CTRL_CLK_RATIO_HALF_SPEED 2
+#define HW_ATL2_LT_CTRL_CLK_RATIO_FULL_SPEED 1
+
+#define HW_ATL2_LT_CTRL_25G_MODE_SUPPORT_MSK 0x00000008
+#define HW_ATL2_LT_CTRL_25G_MODE_SUPPORT_SHIFT 3
+
+#define HW_ATL2_LT_CTRL_LINK_SPEED_MSK 0x00000007
+#define HW_ATL2_LT_CTRL_LINK_SPEED_SHIFT 0
+
+/* FPGA VER register */
+#define HW_ATL2_FPGA_VER_ADR 0x000000f4
+#define HW_ATL2_FPGA_VER_U32(mj, mi, bl, rv) \
+ ((((mj) & 0xff) << 24) | \
+ (((mi) & 0xff) << 16) | \
+ (((bl) & 0xff) << 8) | \
+ (((rv) & 0xff) << 0))
+
+/* ahb_mem_addr{f}[31:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "ahb_mem_addr{f}[31:0]".
+ * Parameter: filter {f} | stride size 0x10 | range [0, 127]
+ * PORT="ahb_mem_addr{f}[31:0]"
+ */
+
+/* Register address for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_ADR(filter) \
+ (0x00014000u + (filter) * 0x10)
+/* Bitmask for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_MSKN 0x00000000u
+/* Lower bit position of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_SHIFT 0
+/* Width of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_WIDTH 31
+/* Default value of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_DEFAULT 0x0
+
+/* Register address for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_ADR(filter) \
+ (0x00014004u + (filter) * 0x10)
+/* Bitmask for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_MSKN 0x00000000u
+/* Lower bit position of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_SHIFT 0
+/* Width of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_WIDTH 31
+/* Default value of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_DEFAULT 0x0
+
+/* Register address for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_ACTN_ADR(filter) \
+ (0x00014008u + (filter) * 0x10)
+/* Bitmask for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_ACTN_MSK 0x000007FFu
+/* Inverted bitmask for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_ACTN_MSKN 0xFFFFF800u
+/* Lower bit position of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_ACTN_SHIFT 0
+/* Width of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_ACTN_WIDTH 10
+/* Default value of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_ACTN_DEFAULT 0x0
+
+/* rpf_rec_tab_en[15:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "rpf_rec_tab_en[15:0]".
+ * PORT="pif_rpf_rec_tab_en[15:0]"
+ */
+/* Register address for bitfield rpf_rec_tab_en[15:0] */
+#define HW_ATL2_RPF_REC_TAB_EN_ADR 0x00006ff0u
+/* Bitmask for bitfield rpf_rec_tab_en[15:0] */
+#define HW_ATL2_RPF_REC_TAB_EN_MSK 0x0000FFFFu
+/* Inverted bitmask for bitfield rpf_rec_tab_en[15:0] */
+#define HW_ATL2_RPF_REC_TAB_EN_MSKN 0xFFFF0000u
+/* Lower bit position of bitfield rpf_rec_tab_en[15:0] */
+#define HW_ATL2_RPF_REC_TAB_EN_SHIFT 0
+/* Width of bitfield rpf_rec_tab_en[15:0] */
+#define HW_ATL2_RPF_REC_TAB_EN_WIDTH 16
+/* Default value of bitfield rpf_rec_tab_en[15:0] */
+#define HW_ATL2_RPF_REC_TAB_EN_DEFAULT 0x0
+
+/* Register address for firmware shared input buffer */
+#define HW_ATL2_MIF_SHARED_BUFFER_IN_ADR(dword) (0x00012000U + (dword) * 0x4U)
+/* Register address for firmware shared output buffer */
+#define HW_ATL2_MIF_SHARED_BUFFER_OUT_ADR(dword) (0x00013000U + (dword) * 0x4U)
+
+/* pif_host_finished_buf_wr_i Bitfield Definitions
+ * Preprocessor definitions for the bitfield "pif_host_finished_buf_wr_i".
+ * PORT="pif_host_finished_buf_wr_i"
+ */
+/* Register address for bitfield rpif_host_finished_buf_wr_i */
+#define HW_ATL2_MIF_HOST_FINISHED_WRITE_ADR 0x00000e00u
+/* Bitmask for bitfield pif_host_finished_buf_wr_i */
+#define HW_ATL2_MIF_HOST_FINISHED_WRITE_MSK 0x00000001u
+/* Inverted bitmask for bitfield pif_host_finished_buf_wr_i */
+#define HW_ATL2_MIF_HOST_FINISHED_WRITE_MSKN 0xFFFFFFFEu
+/* Lower bit position of bitfield pif_host_finished_buf_wr_i */
+#define HW_ATL2_MIF_HOST_FINISHED_WRITE_SHIFT 0
+/* Width of bitfield pif_host_finished_buf_wr_i */
+#define HW_ATL2_MIF_HOST_FINISHED_WRITE_WIDTH 1
+/* Default value of bitfield pif_host_finished_buf_wr_i */
+#define HW_ATL2_MIF_HOST_FINISHED_WRITE_DEFAULT 0x0
+
+/* pif_mcp_finished_buf_rd_i Bitfield Definitions
+ * Preprocessor definitions for the bitfield "pif_mcp_finished_buf_rd_i".
+ * PORT="pif_mcp_finished_buf_rd_i"
+ */
+/* Register address for bitfield pif_mcp_finished_buf_rd_i */
+#define HW_ATL2_MIF_MCP_FINISHED_READ_ADR 0x00000e04u
+/* Bitmask for bitfield pif_mcp_finished_buf_rd_i */
+#define HW_ATL2_MIF_MCP_FINISHED_READ_MSK 0x00000001u
+/* Inverted bitmask for bitfield pif_mcp_finished_buf_rd_i */
+#define HW_ATL2_MIF_MCP_FINISHED_READ_MSKN 0xFFFFFFFEu
+/* Lower bit position of bitfield pif_mcp_finished_buf_rd_i */
+#define HW_ATL2_MIF_MCP_FINISHED_READ_SHIFT 0
+/* Width of bitfield pif_mcp_finished_buf_rd_i */
+#define HW_ATL2_MIF_MCP_FINISHED_READ_WIDTH 1
+/* Default value of bitfield pif_mcp_finished_buf_rd_i */
+#define HW_ATL2_MIF_MCP_FINISHED_READ_DEFAULT 0x0
+
+/* Register address for bitfield pif_mcp_boot_reg */
+#define HW_ATL2_MIF_BOOT_REG_ADR 0x00003040u
+
+#define HW_ATL2_MCP_HOST_REQ_INT_READY BIT(0)
+
+#define HW_ATL2_MCP_HOST_REQ_INT_ADR 0x00000F00u
+#define HW_ATL2_MCP_HOST_REQ_INT_SET_ADR 0x00000F04u
+#define HW_ATL2_MCP_HOST_REQ_INT_CLR_ADR 0x00000F08u
+
+#endif /* HW_ATL2_LLH_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c
new file mode 100644
index 000000000..0fe6257d9
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include <linux/iopoll.h>
+
+#include "aq_hw_utils.h"
+#include "hw_atl/hw_atl_utils.h"
+#include "hw_atl2_utils.h"
+#include "hw_atl2_llh.h"
+#include "hw_atl2_llh_internal.h"
+
+#define HW_ATL2_FW_VER_1X 0x01000000U
+
+#define AQ_A2_BOOT_STARTED BIT(0x18)
+#define AQ_A2_CRASH_INIT BIT(0x1B)
+#define AQ_A2_BOOT_CODE_FAILED BIT(0x1C)
+#define AQ_A2_FW_INIT_FAILED BIT(0x1D)
+#define AQ_A2_FW_INIT_COMP_SUCCESS BIT(0x1F)
+
+#define AQ_A2_FW_BOOT_FAILED_MASK (AQ_A2_CRASH_INIT | \
+ AQ_A2_BOOT_CODE_FAILED | \
+ AQ_A2_FW_INIT_FAILED)
+#define AQ_A2_FW_BOOT_COMPLETE_MASK (AQ_A2_FW_BOOT_FAILED_MASK | \
+ AQ_A2_FW_INIT_COMP_SUCCESS)
+
+#define AQ_A2_FW_BOOT_REQ_REBOOT BIT(0x0)
+#define AQ_A2_FW_BOOT_REQ_HOST_BOOT BIT(0x8)
+#define AQ_A2_FW_BOOT_REQ_MAC_FAST_BOOT BIT(0xA)
+#define AQ_A2_FW_BOOT_REQ_PHY_FAST_BOOT BIT(0xB)
+
+int hw_atl2_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
+{
+ int err;
+
+ self->fw_ver_actual = hw_atl2_utils_get_fw_version(self);
+
+ if (hw_atl_utils_ver_match(HW_ATL2_FW_VER_1X, self->fw_ver_actual)) {
+ *fw_ops = &aq_a2_fw_ops;
+ } else {
+ aq_pr_err("Bad FW version detected: %x, but continue\n",
+ self->fw_ver_actual);
+ *fw_ops = &aq_a2_fw_ops;
+ }
+ aq_pr_trace("Detect ATL2FW %x\n", self->fw_ver_actual);
+ self->aq_fw_ops = *fw_ops;
+ err = self->aq_fw_ops->init(self);
+
+ self->chip_features |= ATL_HW_CHIP_ANTIGUA;
+
+ return err;
+}
+
+static bool hw_atl2_mcp_boot_complete(struct aq_hw_s *self)
+{
+ u32 rbl_status;
+
+ rbl_status = hw_atl2_mif_mcp_boot_reg_get(self);
+ if (rbl_status & AQ_A2_FW_BOOT_COMPLETE_MASK)
+ return true;
+
+ /* Host boot requested */
+ if (hw_atl2_mif_host_req_int_get(self) & HW_ATL2_MCP_HOST_REQ_INT_READY)
+ return true;
+
+ return false;
+}
+
+int hw_atl2_utils_soft_reset(struct aq_hw_s *self)
+{
+ bool rbl_complete = false;
+ u32 rbl_status = 0;
+ u32 rbl_request;
+ int err;
+
+ hw_atl2_mif_host_req_int_clr(self, 0x01);
+ rbl_request = AQ_A2_FW_BOOT_REQ_REBOOT;
+#ifdef AQ_CFG_FAST_START
+ rbl_request |= AQ_A2_FW_BOOT_REQ_MAC_FAST_BOOT;
+#endif
+ hw_atl2_mif_mcp_boot_reg_set(self, rbl_request);
+
+ /* Wait for RBL boot */
+ err = readx_poll_timeout_atomic(hw_atl2_mif_mcp_boot_reg_get, self,
+ rbl_status,
+ ((rbl_status & AQ_A2_BOOT_STARTED) &&
+ (rbl_status != 0xFFFFFFFFu)),
+ 10, 200000);
+ if (err) {
+ aq_pr_err("Boot code hanged");
+ goto err_exit;
+ }
+
+ err = readx_poll_timeout_atomic(hw_atl2_mcp_boot_complete, self,
+ rbl_complete,
+ rbl_complete,
+ 10, 2000000);
+
+ if (err) {
+ aq_pr_err("FW Restart timed out");
+ goto err_exit;
+ }
+
+ rbl_status = hw_atl2_mif_mcp_boot_reg_get(self);
+
+ if (rbl_status & AQ_A2_FW_BOOT_FAILED_MASK) {
+ err = -EIO;
+ aq_pr_err("FW Restart failed");
+ goto err_exit;
+ }
+
+ if (hw_atl2_mif_host_req_int_get(self) &
+ HW_ATL2_MCP_HOST_REQ_INT_READY) {
+ err = -EIO;
+ aq_pr_err("No FW detected. Dynamic FW load not implemented");
+ goto err_exit;
+ }
+
+ if (self->aq_fw_ops) {
+ err = self->aq_fw_ops->init(self);
+ if (err) {
+ aq_pr_err("FW Init failed");
+ goto err_exit;
+ }
+ }
+
+err_exit:
+ return err;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h
new file mode 100644
index 000000000..6bad64c77
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h
@@ -0,0 +1,636 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef HW_ATL2_UTILS_H
+#define HW_ATL2_UTILS_H
+
+#include "aq_hw.h"
+
+/* F W A P I */
+
+struct link_options_s {
+ u8 link_up:1;
+ u8 link_renegotiate:1;
+ u8 minimal_link_speed:1;
+ u8 internal_loopback:1;
+ u8 external_loopback:1;
+ u8 rate_10M_hd:1;
+ u8 rate_100M_hd:1;
+ u8 rate_1G_hd:1;
+
+ u8 rate_10M:1;
+ u8 rate_100M:1;
+ u8 rate_1G:1;
+ u8 rate_2P5G:1;
+ u8 rate_N2P5G:1;
+ u8 rate_5G:1;
+ u8 rate_N5G:1;
+ u8 rate_10G:1;
+
+ u8 eee_100M:1;
+ u8 eee_1G:1;
+ u8 eee_2P5G:1;
+ u8 eee_5G:1;
+ u8 eee_10G:1;
+ u8 rsvd3:3;
+
+ u8 pause_rx:1;
+ u8 pause_tx:1;
+ u8 rsvd4:1;
+ u8 downshift:1;
+ u8 downshift_retry:4;
+};
+
+struct link_control_s {
+ u8 mode:4;
+ u8 disable_crc_corruption:1;
+ u8 discard_short_frames:1;
+ u8 flow_control_mode:1;
+ u8 disable_length_check:1;
+
+ u8 discard_errored_frames:1;
+ u8 control_frame_enable:1;
+ u8 enable_tx_padding:1;
+ u8 enable_crc_forwarding:1;
+ u8 enable_frame_padding_removal_rx: 1;
+ u8 promiscuous_mode: 1;
+ u8 rsvd:2;
+
+ u16 rsvd2;
+};
+
+struct thermal_shutdown_s {
+ u8 enable:1;
+ u8 warning_enable:1;
+ u8 rsvd:6;
+
+ u8 shutdown_temperature;
+ u8 cold_temperature;
+ u8 warning_temperature;
+};
+
+struct mac_address_s {
+ u8 mac_address[6];
+};
+
+struct mac_address_aligned_s {
+ struct mac_address_s aligned;
+ u16 rsvd;
+};
+
+struct sleep_proxy_s {
+ struct wake_on_lan_s {
+ u8 wake_on_magic_packet:1;
+ u8 wake_on_pattern:1;
+ u8 wake_on_link_up:1;
+ u8 wake_on_link_down:1;
+ u8 wake_on_ping:1;
+ u8 wake_on_timer:1;
+ u8 rsvd:2;
+
+ u8 rsvd2;
+ u16 rsvd3;
+
+ u32 link_up_timeout;
+ u32 link_down_timeout;
+ u32 timer;
+ } wake_on_lan;
+
+ struct {
+ u32 mask[4];
+ u32 crc32;
+ } wake_up_pattern[8];
+
+ struct __packed {
+ u8 arp_responder:1;
+ u8 echo_responder:1;
+ u8 igmp_client:1;
+ u8 echo_truncate:1;
+ u8 address_guard:1;
+ u8 ignore_fragmented:1;
+ u8 rsvd:2;
+
+ u16 echo_max_len;
+ u8 rsvd2;
+ } ipv4_offload;
+
+ u32 ipv4_offload_addr[8];
+ u32 reserved[8];
+
+ struct __packed {
+ u8 ns_responder:1;
+ u8 echo_responder:1;
+ u8 mld_client:1;
+ u8 echo_truncate:1;
+ u8 address_guard:1;
+ u8 rsvd:3;
+
+ u16 echo_max_len;
+ u8 rsvd2;
+ } ipv6_offload;
+
+ u32 ipv6_offload_addr[16][4];
+
+ struct {
+ u16 port[16];
+ } tcp_port_offload;
+
+ struct {
+ u16 port[16];
+ } udp_port_offload;
+
+ struct {
+ u32 retry_count;
+ u32 retry_interval;
+ } ka4_offload;
+
+ struct {
+ u32 timeout;
+ u16 local_port;
+ u16 remote_port;
+ u8 remote_mac_addr[6];
+ u16 rsvd;
+ u32 rsvd2;
+ u32 rsvd3;
+ u16 rsvd4;
+ u16 win_size;
+ u32 seq_num;
+ u32 ack_num;
+ u32 local_ip;
+ u32 remote_ip;
+ } ka4_connection[16];
+
+ struct {
+ u32 retry_count;
+ u32 retry_interval;
+ } ka6_offload;
+
+ struct {
+ u32 timeout;
+ u16 local_port;
+ u16 remote_port;
+ u8 remote_mac_addr[6];
+ u16 rsvd;
+ u32 rsvd2;
+ u32 rsvd3;
+ u16 rsvd4;
+ u16 win_size;
+ u32 seq_num;
+ u32 ack_num;
+ u32 local_ip[4];
+ u32 remote_ip[4];
+ } ka6_connection[16];
+
+ struct {
+ u32 rr_count;
+ u32 rr_buf_len;
+ u32 idx_offset;
+ u32 rr__offset;
+ } mdns_offload;
+};
+
+struct pause_quanta_s {
+ u16 quanta_10M;
+ u16 threshold_10M;
+ u16 quanta_100M;
+ u16 threshold_100M;
+ u16 quanta_1G;
+ u16 threshold_1G;
+ u16 quanta_2P5G;
+ u16 threshold_2P5G;
+ u16 quanta_5G;
+ u16 threshold_5G;
+ u16 quanta_10G;
+ u16 threshold_10G;
+};
+
+struct data_buffer_status_s {
+ u32 data_offset;
+ u32 data_length;
+};
+
+struct device_caps_s {
+ u8 finite_flashless:1;
+ u8 cable_diag:1;
+ u8 ncsi:1;
+ u8 avb:1;
+ u8 rsvd:4;
+
+ u8 rsvd2;
+ u16 rsvd3;
+ u32 rsvd4;
+};
+
+struct version_s {
+ struct bundle_version_t {
+ u8 major;
+ u8 minor;
+ u16 build;
+ } bundle;
+ struct mac_version_t {
+ u8 major;
+ u8 minor;
+ u16 build;
+ } mac;
+ struct phy_version_t {
+ u8 major;
+ u8 minor;
+ u16 build;
+ } phy;
+ u32 drv_iface_ver:4;
+ u32 rsvd:28;
+};
+
+struct link_status_s {
+ u8 link_state:4;
+ u8 link_rate:4;
+
+ u8 pause_tx:1;
+ u8 pause_rx:1;
+ u8 eee:1;
+ u8 duplex:1;
+ u8 rsvd:4;
+
+ u16 rsvd2;
+};
+
+struct wol_status_s {
+ u8 wake_count;
+ u8 wake_reason;
+
+ u16 wake_up_packet_length :12;
+ u16 wake_up_pattern_number :3;
+ u16 rsvd:1;
+
+ u32 wake_up_packet[379];
+};
+
+struct mac_health_monitor_s {
+ u8 mac_ready:1;
+ u8 mac_fault:1;
+ u8 mac_flashless_finished:1;
+ u8 rsvd:5;
+
+ u8 mac_temperature;
+ u16 mac_heart_beat;
+ u16 mac_fault_code;
+ u16 rsvd2;
+};
+
+struct phy_health_monitor_s {
+ u8 phy_ready:1;
+ u8 phy_fault:1;
+ u8 phy_hot_warning:1;
+ u8 rsvd:5;
+
+ u8 phy_temperature;
+ u16 phy_heart_beat;
+ u16 phy_fault_code;
+ u16 rsvd2;
+};
+
+struct device_link_caps_s {
+ u8 rsvd:3;
+ u8 internal_loopback:1;
+ u8 external_loopback:1;
+ u8 rate_10M_hd:1;
+ u8 rate_100M_hd:1;
+ u8 rate_1G_hd:1;
+
+ u8 rate_10M:1;
+ u8 rate_100M:1;
+ u8 rate_1G:1;
+ u8 rate_2P5G:1;
+ u8 rate_N2P5G:1;
+ u8 rate_5G:1;
+ u8 rate_N5G:1;
+ u8 rate_10G:1;
+
+ u8 rsvd3:1;
+ u8 eee_100M:1;
+ u8 eee_1G:1;
+ u8 eee_2P5G:1;
+ u8 rsvd4:1;
+ u8 eee_5G:1;
+ u8 rsvd5:1;
+ u8 eee_10G:1;
+
+ u8 pause_rx:1;
+ u8 pause_tx:1;
+ u8 pfc:1;
+ u8 downshift:1;
+ u8 downshift_retry:4;
+};
+
+struct sleep_proxy_caps_s {
+ u8 ipv4_offload:1;
+ u8 ipv6_offload:1;
+ u8 tcp_port_offload:1;
+ u8 udp_port_offload:1;
+ u8 ka4_offload:1;
+ u8 ka6_offload:1;
+ u8 mdns_offload:1;
+ u8 wake_on_ping:1;
+
+ u8 wake_on_magic_packet:1;
+ u8 wake_on_pattern:1;
+ u8 wake_on_timer:1;
+ u8 wake_on_link:1;
+ u8 wake_patterns_count:4;
+
+ u8 ipv4_count;
+ u8 ipv6_count;
+
+ u8 tcp_port_offload_count;
+ u8 udp_port_offload_count;
+
+ u8 tcp4_ka_count;
+ u8 tcp6_ka_count;
+
+ u8 igmp_offload:1;
+ u8 mld_offload:1;
+ u8 rsvd:6;
+
+ u8 rsvd2;
+ u16 rsvd3;
+};
+
+struct lkp_link_caps_s {
+ u8 rsvd:5;
+ u8 rate_10M_hd:1;
+ u8 rate_100M_hd:1;
+ u8 rate_1G_hd:1;
+
+ u8 rate_10M:1;
+ u8 rate_100M:1;
+ u8 rate_1G:1;
+ u8 rate_2P5G:1;
+ u8 rate_N2P5G:1;
+ u8 rate_5G:1;
+ u8 rate_N5G:1;
+ u8 rate_10G:1;
+
+ u8 rsvd2:1;
+ u8 eee_100M:1;
+ u8 eee_1G:1;
+ u8 eee_2P5G:1;
+ u8 rsvd3:1;
+ u8 eee_5G:1;
+ u8 rsvd4:1;
+ u8 eee_10G:1;
+
+ u8 pause_rx:1;
+ u8 pause_tx:1;
+ u8 rsvd5:6;
+};
+
+struct core_dump_s {
+ u32 reg0;
+ u32 reg1;
+ u32 reg2;
+
+ u32 hi;
+ u32 lo;
+
+ u32 regs[32];
+};
+
+struct trace_s {
+ u32 sync_counter;
+ u32 mem_buffer[0x1ff];
+};
+
+struct cable_diag_control_s {
+ u8 toggle :1;
+ u8 rsvd:7;
+
+ u8 wait_timeout_sec;
+ u16 rsvd2;
+};
+
+struct cable_diag_lane_data_s {
+ u8 result_code;
+ u8 dist;
+ u8 far_dist;
+ u8 rsvd;
+};
+
+struct cable_diag_status_s {
+ struct cable_diag_lane_data_s lane_data[4];
+ u8 transact_id;
+ u8 status:4;
+ u8 rsvd:4;
+ u16 rsvd2;
+};
+
+struct statistics_a0_s {
+ struct {
+ u32 link_up;
+ u32 link_down;
+ } link;
+
+ struct {
+ u64 tx_unicast_octets;
+ u64 tx_multicast_octets;
+ u64 tx_broadcast_octets;
+ u64 rx_unicast_octets;
+ u64 rx_multicast_octets;
+ u64 rx_broadcast_octets;
+
+ u32 tx_unicast_frames;
+ u32 tx_multicast_frames;
+ u32 tx_broadcast_frames;
+ u32 tx_errors;
+
+ u32 rx_unicast_frames;
+ u32 rx_multicast_frames;
+ u32 rx_broadcast_frames;
+ u32 rx_dropped_frames;
+ u32 rx_error_frames;
+
+ u32 tx_good_frames;
+ u32 rx_good_frames;
+ u32 reserve_fw_gap;
+ } msm;
+ u32 main_loop_cycles;
+ u32 reserve_fw_gap;
+};
+
+struct __packed statistics_b0_s {
+ u64 rx_good_octets;
+ u64 rx_pause_frames;
+ u64 rx_good_frames;
+ u64 rx_errors;
+ u64 rx_unicast_frames;
+ u64 rx_multicast_frames;
+ u64 rx_broadcast_frames;
+
+ u64 tx_good_octets;
+ u64 tx_pause_frames;
+ u64 tx_good_frames;
+ u64 tx_errors;
+ u64 tx_unicast_frames;
+ u64 tx_multicast_frames;
+ u64 tx_broadcast_frames;
+
+ u32 main_loop_cycles;
+};
+
+struct __packed statistics_s {
+ union __packed {
+ struct statistics_a0_s a0;
+ struct statistics_b0_s b0;
+ };
+};
+
+struct filter_caps_s {
+ u8 l2_filters_base_index:6;
+ u8 flexible_filter_mask:2;
+ u8 l2_filter_count;
+ u8 ethertype_filter_base_index;
+ u8 ethertype_filter_count;
+
+ u8 vlan_filter_base_index;
+ u8 vlan_filter_count;
+ u8 l3_ip4_filter_base_index:4;
+ u8 l3_ip4_filter_count:4;
+ u8 l3_ip6_filter_base_index:4;
+ u8 l3_ip6_filter_count:4;
+
+ u8 l4_filter_base_index:4;
+ u8 l4_filter_count:4;
+ u8 l4_flex_filter_base_index:4;
+ u8 l4_flex_filter_count:4;
+ u8 rslv_tbl_base_index;
+ u8 rslv_tbl_count;
+};
+
+struct request_policy_s {
+ struct {
+ u8 all:1;
+ u8 mcast:1;
+ u8 rx_queue_tc_index:5;
+ u8 queue_or_tc:1;
+ } promisc;
+
+ struct {
+ u8 accept:1;
+ u8 rsvd:1;
+ u8 rx_queue_tc_index:5;
+ u8 queue_or_tc:1;
+ } bcast;
+
+ struct {
+ u8 accept:1;
+ u8 rsvd:1;
+ u8 rx_queue_tc_index:5;
+ u8 queue_or_tc:1;
+ } mcast;
+
+ u8 rsvd:8;
+};
+
+struct fw_interface_in {
+ u32 mtu;
+ u32 rsvd1;
+ struct mac_address_aligned_s mac_address;
+ struct link_control_s link_control;
+ u32 rsvd2;
+ struct link_options_s link_options;
+ u32 rsvd3;
+ struct thermal_shutdown_s thermal_shutdown;
+ u32 rsvd4;
+ struct sleep_proxy_s sleep_proxy;
+ u32 rsvd5;
+ struct pause_quanta_s pause_quanta[8];
+ struct cable_diag_control_s cable_diag_control;
+ u32 rsvd6;
+ struct data_buffer_status_s data_buffer_status;
+ u32 rsvd7;
+ struct request_policy_s request_policy;
+};
+
+struct transaction_counter_s {
+ u16 transaction_cnt_a;
+ u16 transaction_cnt_b;
+};
+
+struct management_status_s {
+ struct mac_address_s mac_address;
+ u16 vlan;
+
+ struct{
+ u32 enable : 1;
+ u32 rsvd:31;
+ } flags;
+
+ u32 rsvd1;
+ u32 rsvd2;
+ u32 rsvd3;
+ u32 rsvd4;
+ u32 rsvd5;
+};
+
+struct __packed fw_interface_out {
+ struct transaction_counter_s transaction_id;
+ struct version_s version;
+ struct link_status_s link_status;
+ struct wol_status_s wol_status;
+ u32 rsvd;
+ u32 rsvd2;
+ struct mac_health_monitor_s mac_health_monitor;
+ u32 rsvd3;
+ u32 rsvd4;
+ struct phy_health_monitor_s phy_health_monitor;
+ u32 rsvd5;
+ u32 rsvd6;
+ struct cable_diag_status_s cable_diag_status;
+ u32 rsvd7;
+ struct device_link_caps_s device_link_caps;
+ u32 rsvd8;
+ struct sleep_proxy_caps_s sleep_proxy_caps;
+ u32 rsvd9;
+ struct lkp_link_caps_s lkp_link_caps;
+ u32 rsvd10;
+ struct core_dump_s core_dump;
+ u32 rsvd11;
+ struct statistics_s stats;
+ struct filter_caps_s filter_caps;
+ struct device_caps_s device_caps;
+ u32 rsvd13;
+ struct management_status_s management_status;
+ u32 reserve[21];
+ struct trace_s trace;
+};
+
+#define AQ_A2_FW_LINK_RATE_INVALID 0
+#define AQ_A2_FW_LINK_RATE_10M 1
+#define AQ_A2_FW_LINK_RATE_100M 2
+#define AQ_A2_FW_LINK_RATE_1G 3
+#define AQ_A2_FW_LINK_RATE_2G5 4
+#define AQ_A2_FW_LINK_RATE_5G 5
+#define AQ_A2_FW_LINK_RATE_10G 6
+
+#define AQ_HOST_MODE_INVALID 0U
+#define AQ_HOST_MODE_ACTIVE 1U
+#define AQ_HOST_MODE_SLEEP_PROXY 2U
+#define AQ_HOST_MODE_LOW_POWER 3U
+#define AQ_HOST_MODE_SHUTDOWN 4U
+
+#define AQ_A2_FW_INTERFACE_A0 0
+#define AQ_A2_FW_INTERFACE_B0 1
+
+int hw_atl2_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops);
+
+int hw_atl2_utils_soft_reset(struct aq_hw_s *self);
+
+u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self);
+
+int hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s *self,
+ u8 *base_index, u8 *count);
+
+extern const struct aq_fw_ops aq_a2_fw_ops;
+
+#endif /* HW_ATL2_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
new file mode 100644
index 000000000..58d426dda
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
@@ -0,0 +1,616 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include <linux/iopoll.h>
+
+#include "aq_hw.h"
+#include "aq_hw_utils.h"
+#include "aq_nic.h"
+#include "hw_atl/hw_atl_llh.h"
+#include "hw_atl2_utils.h"
+#include "hw_atl2_llh.h"
+#include "hw_atl2_internal.h"
+
+#define AQ_A2_FW_READ_TRY_MAX 1000
+
+#define hw_atl2_shared_buffer_write(HW, ITEM, VARIABLE) \
+{\
+ BUILD_BUG_ON_MSG((offsetof(struct fw_interface_in, ITEM) % \
+ sizeof(u32)) != 0,\
+ "Unaligned write " # ITEM);\
+ BUILD_BUG_ON_MSG((sizeof(VARIABLE) % sizeof(u32)) != 0,\
+ "Unaligned write length " # ITEM);\
+ hw_atl2_mif_shared_buf_write(HW,\
+ (offsetof(struct fw_interface_in, ITEM) / sizeof(u32)),\
+ (u32 *)&(VARIABLE), sizeof(VARIABLE) / sizeof(u32));\
+}
+
+#define hw_atl2_shared_buffer_get(HW, ITEM, VARIABLE) \
+{\
+ BUILD_BUG_ON_MSG((offsetof(struct fw_interface_in, ITEM) % \
+ sizeof(u32)) != 0,\
+ "Unaligned get " # ITEM);\
+ BUILD_BUG_ON_MSG((sizeof(VARIABLE) % sizeof(u32)) != 0,\
+ "Unaligned get length " # ITEM);\
+ hw_atl2_mif_shared_buf_get(HW, \
+ (offsetof(struct fw_interface_in, ITEM) / sizeof(u32)),\
+ (u32 *)&(VARIABLE), \
+ sizeof(VARIABLE) / sizeof(u32));\
+}
+
+/* This should never be used on non atomic fields,
+ * treat any > u32 read as non atomic.
+ */
+#define hw_atl2_shared_buffer_read(HW, ITEM, VARIABLE) \
+{\
+ BUILD_BUG_ON_MSG((offsetof(struct fw_interface_out, ITEM) % \
+ sizeof(u32)) != 0,\
+ "Unaligned read " # ITEM);\
+ BUILD_BUG_ON_MSG((sizeof(VARIABLE) % sizeof(u32)) != 0,\
+ "Unaligned read length " # ITEM);\
+ BUILD_BUG_ON_MSG(sizeof(VARIABLE) > sizeof(u32),\
+ "Non atomic read " # ITEM);\
+ hw_atl2_mif_shared_buf_read(HW, \
+ (offsetof(struct fw_interface_out, ITEM) / sizeof(u32)),\
+ (u32 *)&(VARIABLE), sizeof(VARIABLE) / sizeof(u32));\
+}
+
+#define hw_atl2_shared_buffer_read_safe(HW, ITEM, DATA) \
+({\
+ BUILD_BUG_ON_MSG((offsetof(struct fw_interface_out, ITEM) % \
+ sizeof(u32)) != 0,\
+ "Unaligned read_safe " # ITEM);\
+ BUILD_BUG_ON_MSG((sizeof(((struct fw_interface_out *)0)->ITEM) % \
+ sizeof(u32)) != 0,\
+ "Unaligned read_safe length " # ITEM);\
+ hw_atl2_shared_buffer_read_block((HW), \
+ (offsetof(struct fw_interface_out, ITEM) / sizeof(u32)),\
+ sizeof(((struct fw_interface_out *)0)->ITEM) / sizeof(u32),\
+ (DATA));\
+})
+
+static int hw_atl2_shared_buffer_read_block(struct aq_hw_s *self,
+ u32 offset, u32 dwords, void *data)
+{
+ struct transaction_counter_s tid1, tid2;
+ int cnt = 0;
+
+ do {
+ do {
+ hw_atl2_shared_buffer_read(self, transaction_id, tid1);
+ cnt++;
+ if (cnt > AQ_A2_FW_READ_TRY_MAX)
+ return -ETIME;
+ if (tid1.transaction_cnt_a != tid1.transaction_cnt_b)
+ mdelay(1);
+ } while (tid1.transaction_cnt_a != tid1.transaction_cnt_b);
+
+ hw_atl2_mif_shared_buf_read(self, offset, (u32 *)data, dwords);
+
+ hw_atl2_shared_buffer_read(self, transaction_id, tid2);
+
+ cnt++;
+ if (cnt > AQ_A2_FW_READ_TRY_MAX)
+ return -ETIME;
+ } while (tid2.transaction_cnt_a != tid2.transaction_cnt_b ||
+ tid1.transaction_cnt_a != tid2.transaction_cnt_a);
+
+ return 0;
+}
+
+static inline int hw_atl2_shared_buffer_finish_ack(struct aq_hw_s *self)
+{
+ u32 val;
+ int err;
+
+ hw_atl2_mif_host_finished_write_set(self, 1U);
+ err = readx_poll_timeout_atomic(hw_atl2_mif_mcp_finished_read_get,
+ self, val, val == 0U,
+ 100, 100000U);
+ WARN(err, "hw_atl2_shared_buffer_finish_ack");
+
+ return err;
+}
+
+static int aq_a2_fw_init(struct aq_hw_s *self)
+{
+ struct link_control_s link_control;
+ u32 mtu;
+ u32 val;
+ int err;
+
+ hw_atl2_shared_buffer_get(self, link_control, link_control);
+ link_control.mode = AQ_HOST_MODE_ACTIVE;
+ hw_atl2_shared_buffer_write(self, link_control, link_control);
+
+ hw_atl2_shared_buffer_get(self, mtu, mtu);
+ mtu = HW_ATL2_MTU_JUMBO;
+ hw_atl2_shared_buffer_write(self, mtu, mtu);
+
+ hw_atl2_mif_host_finished_write_set(self, 1U);
+ err = readx_poll_timeout_atomic(hw_atl2_mif_mcp_finished_read_get,
+ self, val, val == 0U,
+ 100, 5000000U);
+ WARN(err, "hw_atl2_shared_buffer_finish_ack");
+
+ return err;
+}
+
+static int aq_a2_fw_deinit(struct aq_hw_s *self)
+{
+ struct link_control_s link_control;
+
+ hw_atl2_shared_buffer_get(self, link_control, link_control);
+ link_control.mode = AQ_HOST_MODE_SHUTDOWN;
+ hw_atl2_shared_buffer_write(self, link_control, link_control);
+
+ return hw_atl2_shared_buffer_finish_ack(self);
+}
+
+static void a2_link_speed_mask2fw(u32 speed,
+ struct link_options_s *link_options)
+{
+ link_options->rate_10G = !!(speed & AQ_NIC_RATE_10G);
+ link_options->rate_5G = !!(speed & AQ_NIC_RATE_5G);
+ link_options->rate_N5G = link_options->rate_5G;
+ link_options->rate_2P5G = !!(speed & AQ_NIC_RATE_2G5);
+ link_options->rate_N2P5G = link_options->rate_2P5G;
+ link_options->rate_1G = !!(speed & AQ_NIC_RATE_1G);
+ link_options->rate_100M = !!(speed & AQ_NIC_RATE_100M);
+ link_options->rate_10M = !!(speed & AQ_NIC_RATE_10M);
+
+ link_options->rate_1G_hd = !!(speed & AQ_NIC_RATE_1G_HALF);
+ link_options->rate_100M_hd = !!(speed & AQ_NIC_RATE_100M_HALF);
+ link_options->rate_10M_hd = !!(speed & AQ_NIC_RATE_10M_HALF);
+}
+
+static u32 a2_fw_dev_to_eee_mask(struct device_link_caps_s *device_link_caps)
+{
+ u32 rate = 0;
+
+ if (device_link_caps->eee_10G)
+ rate |= AQ_NIC_RATE_EEE_10G;
+ if (device_link_caps->eee_5G)
+ rate |= AQ_NIC_RATE_EEE_5G;
+ if (device_link_caps->eee_2P5G)
+ rate |= AQ_NIC_RATE_EEE_2G5;
+ if (device_link_caps->eee_1G)
+ rate |= AQ_NIC_RATE_EEE_1G;
+ if (device_link_caps->eee_100M)
+ rate |= AQ_NIC_RATE_EEE_100M;
+
+ return rate;
+}
+
+static u32 a2_fw_lkp_to_mask(struct lkp_link_caps_s *lkp_link_caps)
+{
+ u32 rate = 0;
+
+ if (lkp_link_caps->rate_10G)
+ rate |= AQ_NIC_RATE_10G;
+ if (lkp_link_caps->rate_5G)
+ rate |= AQ_NIC_RATE_5G;
+ if (lkp_link_caps->rate_2P5G)
+ rate |= AQ_NIC_RATE_2G5;
+ if (lkp_link_caps->rate_1G)
+ rate |= AQ_NIC_RATE_1G;
+ if (lkp_link_caps->rate_1G_hd)
+ rate |= AQ_NIC_RATE_1G_HALF;
+ if (lkp_link_caps->rate_100M)
+ rate |= AQ_NIC_RATE_100M;
+ if (lkp_link_caps->rate_100M_hd)
+ rate |= AQ_NIC_RATE_100M_HALF;
+ if (lkp_link_caps->rate_10M)
+ rate |= AQ_NIC_RATE_10M;
+ if (lkp_link_caps->rate_10M_hd)
+ rate |= AQ_NIC_RATE_10M_HALF;
+
+ if (lkp_link_caps->eee_10G)
+ rate |= AQ_NIC_RATE_EEE_10G;
+ if (lkp_link_caps->eee_5G)
+ rate |= AQ_NIC_RATE_EEE_5G;
+ if (lkp_link_caps->eee_2P5G)
+ rate |= AQ_NIC_RATE_EEE_2G5;
+ if (lkp_link_caps->eee_1G)
+ rate |= AQ_NIC_RATE_EEE_1G;
+ if (lkp_link_caps->eee_100M)
+ rate |= AQ_NIC_RATE_EEE_100M;
+
+ return rate;
+}
+
+static int aq_a2_fw_set_link_speed(struct aq_hw_s *self, u32 speed)
+{
+ struct link_options_s link_options;
+
+ hw_atl2_shared_buffer_get(self, link_options, link_options);
+ link_options.link_up = 1U;
+ a2_link_speed_mask2fw(speed, &link_options);
+ hw_atl2_shared_buffer_write(self, link_options, link_options);
+
+ return hw_atl2_shared_buffer_finish_ack(self);
+}
+
+static void aq_a2_fw_set_mpi_flow_control(struct aq_hw_s *self,
+ struct link_options_s *link_options)
+{
+ u32 flow_control = self->aq_nic_cfg->fc.req;
+
+ link_options->pause_rx = !!(flow_control & AQ_NIC_FC_RX);
+ link_options->pause_tx = !!(flow_control & AQ_NIC_FC_TX);
+}
+
+static void aq_a2_fw_upd_eee_rate_bits(struct aq_hw_s *self,
+ struct link_options_s *link_options,
+ u32 eee_speeds)
+{
+ link_options->eee_10G = !!(eee_speeds & AQ_NIC_RATE_EEE_10G);
+ link_options->eee_5G = !!(eee_speeds & AQ_NIC_RATE_EEE_5G);
+ link_options->eee_2P5G = !!(eee_speeds & AQ_NIC_RATE_EEE_2G5);
+ link_options->eee_1G = !!(eee_speeds & AQ_NIC_RATE_EEE_1G);
+ link_options->eee_100M = !!(eee_speeds & AQ_NIC_RATE_EEE_100M);
+}
+
+static int aq_a2_fw_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state)
+{
+ struct link_options_s link_options;
+
+ hw_atl2_shared_buffer_get(self, link_options, link_options);
+
+ switch (state) {
+ case MPI_INIT:
+ link_options.link_up = 1U;
+ aq_a2_fw_upd_eee_rate_bits(self, &link_options,
+ self->aq_nic_cfg->eee_speeds);
+ aq_a2_fw_set_mpi_flow_control(self, &link_options);
+ break;
+ case MPI_DEINIT:
+ link_options.link_up = 0U;
+ break;
+ case MPI_RESET:
+ case MPI_POWER:
+ /* No actions */
+ break;
+ }
+
+ hw_atl2_shared_buffer_write(self, link_options, link_options);
+
+ return hw_atl2_shared_buffer_finish_ack(self);
+}
+
+static int aq_a2_fw_update_link_status(struct aq_hw_s *self)
+{
+ struct lkp_link_caps_s lkp_link_caps;
+ struct link_status_s link_status;
+
+ hw_atl2_shared_buffer_read(self, link_status, link_status);
+
+ switch (link_status.link_rate) {
+ case AQ_A2_FW_LINK_RATE_10G:
+ self->aq_link_status.mbps = 10000;
+ break;
+ case AQ_A2_FW_LINK_RATE_5G:
+ self->aq_link_status.mbps = 5000;
+ break;
+ case AQ_A2_FW_LINK_RATE_2G5:
+ self->aq_link_status.mbps = 2500;
+ break;
+ case AQ_A2_FW_LINK_RATE_1G:
+ self->aq_link_status.mbps = 1000;
+ break;
+ case AQ_A2_FW_LINK_RATE_100M:
+ self->aq_link_status.mbps = 100;
+ break;
+ case AQ_A2_FW_LINK_RATE_10M:
+ self->aq_link_status.mbps = 10;
+ break;
+ default:
+ self->aq_link_status.mbps = 0;
+ }
+ self->aq_link_status.full_duplex = link_status.duplex;
+
+ hw_atl2_shared_buffer_read(self, lkp_link_caps, lkp_link_caps);
+
+ self->aq_link_status.lp_link_speed_msk =
+ a2_fw_lkp_to_mask(&lkp_link_caps);
+ self->aq_link_status.lp_flow_control =
+ ((lkp_link_caps.pause_rx) ? AQ_NIC_FC_RX : 0) |
+ ((lkp_link_caps.pause_tx) ? AQ_NIC_FC_TX : 0);
+
+ return 0;
+}
+
+static int aq_a2_fw_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
+{
+ struct mac_address_aligned_s mac_address;
+
+ hw_atl2_shared_buffer_get(self, mac_address, mac_address);
+ ether_addr_copy(mac, (u8 *)mac_address.aligned.mac_address);
+
+ return 0;
+}
+
+static void aq_a2_fill_a0_stats(struct aq_hw_s *self,
+ struct statistics_s *stats)
+{
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct aq_stats_s *cs = &self->curr_stats;
+ struct aq_stats_s curr_stats = *cs;
+ bool corrupted_stats = false;
+
+#define AQ_SDELTA(_N, _F) \
+do { \
+ if (!corrupted_stats && \
+ ((s64)(stats->a0.msm._F - priv->last_stats.a0.msm._F)) >= 0) \
+ curr_stats._N += stats->a0.msm._F - priv->last_stats.a0.msm._F;\
+ else \
+ corrupted_stats = true; \
+} while (0)
+
+ if (self->aq_link_status.mbps) {
+ AQ_SDELTA(uprc, rx_unicast_frames);
+ AQ_SDELTA(mprc, rx_multicast_frames);
+ AQ_SDELTA(bprc, rx_broadcast_frames);
+ AQ_SDELTA(erpr, rx_error_frames);
+
+ AQ_SDELTA(uptc, tx_unicast_frames);
+ AQ_SDELTA(mptc, tx_multicast_frames);
+ AQ_SDELTA(bptc, tx_broadcast_frames);
+ AQ_SDELTA(erpt, tx_errors);
+
+ AQ_SDELTA(ubrc, rx_unicast_octets);
+ AQ_SDELTA(ubtc, tx_unicast_octets);
+ AQ_SDELTA(mbrc, rx_multicast_octets);
+ AQ_SDELTA(mbtc, tx_multicast_octets);
+ AQ_SDELTA(bbrc, rx_broadcast_octets);
+ AQ_SDELTA(bbtc, tx_broadcast_octets);
+
+ if (!corrupted_stats)
+ *cs = curr_stats;
+ }
+#undef AQ_SDELTA
+
+}
+
+static void aq_a2_fill_b0_stats(struct aq_hw_s *self,
+ struct statistics_s *stats)
+{
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct aq_stats_s *cs = &self->curr_stats;
+ struct aq_stats_s curr_stats = *cs;
+ bool corrupted_stats = false;
+
+#define AQ_SDELTA(_N, _F) \
+do { \
+ if (!corrupted_stats && \
+ ((s64)(stats->b0._F - priv->last_stats.b0._F)) >= 0) \
+ curr_stats._N += stats->b0._F - priv->last_stats.b0._F; \
+ else \
+ corrupted_stats = true; \
+} while (0)
+
+ if (self->aq_link_status.mbps) {
+ AQ_SDELTA(uprc, rx_unicast_frames);
+ AQ_SDELTA(mprc, rx_multicast_frames);
+ AQ_SDELTA(bprc, rx_broadcast_frames);
+ AQ_SDELTA(erpr, rx_errors);
+ AQ_SDELTA(brc, rx_good_octets);
+
+ AQ_SDELTA(uptc, tx_unicast_frames);
+ AQ_SDELTA(mptc, tx_multicast_frames);
+ AQ_SDELTA(bptc, tx_broadcast_frames);
+ AQ_SDELTA(erpt, tx_errors);
+ AQ_SDELTA(btc, tx_good_octets);
+
+ if (!corrupted_stats)
+ *cs = curr_stats;
+ }
+#undef AQ_SDELTA
+}
+
+static int aq_a2_fw_update_stats(struct aq_hw_s *self)
+{
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct aq_stats_s *cs = &self->curr_stats;
+ struct statistics_s stats;
+ struct version_s version;
+ int err;
+
+ err = hw_atl2_shared_buffer_read_safe(self, version, &version);
+ if (err)
+ return err;
+
+ err = hw_atl2_shared_buffer_read_safe(self, stats, &stats);
+ if (err)
+ return err;
+
+ if (version.drv_iface_ver == AQ_A2_FW_INTERFACE_A0)
+ aq_a2_fill_a0_stats(self, &stats);
+ else
+ aq_a2_fill_b0_stats(self, &stats);
+
+ cs->dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counter_get(self);
+ cs->dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counter_get(self);
+ cs->dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counter_get(self);
+ cs->dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counter_get(self);
+ cs->dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
+
+ memcpy(&priv->last_stats, &stats, sizeof(stats));
+
+ return 0;
+}
+
+static int aq_a2_fw_get_phy_temp(struct aq_hw_s *self, int *temp)
+{
+ struct phy_health_monitor_s phy_health_monitor;
+
+ hw_atl2_shared_buffer_read_safe(self, phy_health_monitor,
+ &phy_health_monitor);
+
+ *temp = (int8_t)phy_health_monitor.phy_temperature * 1000;
+ return 0;
+}
+
+static int aq_a2_fw_get_mac_temp(struct aq_hw_s *self, int *temp)
+{
+ /* There's only one temperature sensor on A2, use it for
+ * both MAC and PHY.
+ */
+ return aq_a2_fw_get_phy_temp(self, temp);
+}
+
+static int aq_a2_fw_set_eee_rate(struct aq_hw_s *self, u32 speed)
+{
+ struct link_options_s link_options;
+
+ hw_atl2_shared_buffer_get(self, link_options, link_options);
+
+ aq_a2_fw_upd_eee_rate_bits(self, &link_options, speed);
+
+ hw_atl2_shared_buffer_write(self, link_options, link_options);
+
+ return hw_atl2_shared_buffer_finish_ack(self);
+}
+
+static int aq_a2_fw_get_eee_rate(struct aq_hw_s *self, u32 *rate,
+ u32 *supported_rates)
+{
+ struct device_link_caps_s device_link_caps;
+ struct lkp_link_caps_s lkp_link_caps;
+
+ hw_atl2_shared_buffer_read(self, device_link_caps, device_link_caps);
+ hw_atl2_shared_buffer_read(self, lkp_link_caps, lkp_link_caps);
+
+ *supported_rates = a2_fw_dev_to_eee_mask(&device_link_caps);
+ *rate = a2_fw_lkp_to_mask(&lkp_link_caps);
+
+ return 0;
+}
+
+static int aq_a2_fw_renegotiate(struct aq_hw_s *self)
+{
+ struct link_options_s link_options;
+ int err;
+
+ hw_atl2_shared_buffer_get(self, link_options, link_options);
+ link_options.link_renegotiate = 1U;
+ hw_atl2_shared_buffer_write(self, link_options, link_options);
+
+ err = hw_atl2_shared_buffer_finish_ack(self);
+
+ /* We should put renegotiate status back to zero
+ * after command completes
+ */
+ link_options.link_renegotiate = 0U;
+ hw_atl2_shared_buffer_write(self, link_options, link_options);
+
+ return err;
+}
+
+static int aq_a2_fw_set_flow_control(struct aq_hw_s *self)
+{
+ struct link_options_s link_options;
+
+ hw_atl2_shared_buffer_get(self, link_options, link_options);
+
+ aq_a2_fw_set_mpi_flow_control(self, &link_options);
+
+ hw_atl2_shared_buffer_write(self, link_options, link_options);
+
+ return hw_atl2_shared_buffer_finish_ack(self);
+}
+
+static u32 aq_a2_fw_get_flow_control(struct aq_hw_s *self, u32 *fcmode)
+{
+ struct link_status_s link_status;
+
+ hw_atl2_shared_buffer_read(self, link_status, link_status);
+
+ *fcmode = ((link_status.pause_rx) ? AQ_NIC_FC_RX : 0) |
+ ((link_status.pause_tx) ? AQ_NIC_FC_TX : 0);
+ return 0;
+}
+
+static int aq_a2_fw_set_phyloopback(struct aq_hw_s *self, u32 mode, bool enable)
+{
+ struct link_options_s link_options;
+
+ hw_atl2_shared_buffer_get(self, link_options, link_options);
+
+ switch (mode) {
+ case AQ_HW_LOOPBACK_PHYINT_SYS:
+ link_options.internal_loopback = enable;
+ break;
+ case AQ_HW_LOOPBACK_PHYEXT_SYS:
+ link_options.external_loopback = enable;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ hw_atl2_shared_buffer_write(self, link_options, link_options);
+
+ return hw_atl2_shared_buffer_finish_ack(self);
+}
+
+u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self)
+{
+ struct version_s version;
+
+ hw_atl2_shared_buffer_read_safe(self, version, &version);
+
+ /* A2 FW version is stored in reverse order */
+ return version.bundle.major << 24 |
+ version.bundle.minor << 16 |
+ version.bundle.build;
+}
+
+int hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s *self,
+ u8 *base_index, u8 *count)
+{
+ struct filter_caps_s filter_caps;
+ int err;
+
+ err = hw_atl2_shared_buffer_read_safe(self, filter_caps, &filter_caps);
+ if (err)
+ return err;
+
+ *base_index = filter_caps.rslv_tbl_base_index;
+ *count = filter_caps.rslv_tbl_count;
+ return 0;
+}
+
+static int aq_a2_fw_set_downshift(struct aq_hw_s *self, u32 counter)
+{
+ struct link_options_s link_options;
+
+ hw_atl2_shared_buffer_get(self, link_options, link_options);
+ link_options.downshift = !!counter;
+ link_options.downshift_retry = counter;
+ hw_atl2_shared_buffer_write(self, link_options, link_options);
+
+ return hw_atl2_shared_buffer_finish_ack(self);
+}
+
+const struct aq_fw_ops aq_a2_fw_ops = {
+ .init = aq_a2_fw_init,
+ .deinit = aq_a2_fw_deinit,
+ .reset = NULL,
+ .renegotiate = aq_a2_fw_renegotiate,
+ .get_mac_permanent = aq_a2_fw_get_mac_permanent,
+ .set_link_speed = aq_a2_fw_set_link_speed,
+ .set_state = aq_a2_fw_set_state,
+ .update_link_status = aq_a2_fw_update_link_status,
+ .update_stats = aq_a2_fw_update_stats,
+ .get_mac_temp = aq_a2_fw_get_mac_temp,
+ .get_phy_temp = aq_a2_fw_get_phy_temp,
+ .set_eee_rate = aq_a2_fw_set_eee_rate,
+ .get_eee_rate = aq_a2_fw_get_eee_rate,
+ .set_flow_control = aq_a2_fw_set_flow_control,
+ .get_flow_control = aq_a2_fw_get_flow_control,
+ .set_phyloopback = aq_a2_fw_set_phyloopback,
+ .set_downshift = aq_a2_fw_set_downshift,
+};
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/MSS_Egress_registers.h b/drivers/net/ethernet/aquantia/atlantic/macsec/MSS_Egress_registers.h
new file mode 100644
index 000000000..71d08ea80
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/MSS_Egress_registers.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef MSS_EGRESS_REGS_HEADER
+#define MSS_EGRESS_REGS_HEADER
+
+#define MSS_EGRESS_CTL_REGISTER_ADDR 0x00005002
+#define MSS_EGRESS_SA_EXPIRED_STATUS_REGISTER_ADDR 0x00005060
+#define MSS_EGRESS_SA_THRESHOLD_EXPIRED_STATUS_REGISTER_ADDR 0x00005062
+#define MSS_EGRESS_LUT_ADDR_CTL_REGISTER_ADDR 0x00005080
+#define MSS_EGRESS_LUT_CTL_REGISTER_ADDR 0x00005081
+#define MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR 0x000050A0
+
+struct mss_egress_ctl_register {
+ union {
+ struct {
+ unsigned int soft_reset : 1;
+ unsigned int drop_kay_packet : 1;
+ unsigned int drop_egprc_lut_miss : 1;
+ unsigned int gcm_start : 1;
+ unsigned int gcm_test_mode : 1;
+ unsigned int unmatched_use_sc_0 : 1;
+ unsigned int drop_invalid_sa_sc_packets : 1;
+ unsigned int reserved0 : 1;
+ /* Should always be set to 0. */
+ unsigned int external_classification_enable : 1;
+ unsigned int icv_lsb_8bytes_enable : 1;
+ unsigned int high_prio : 1;
+ unsigned int clear_counter : 1;
+ unsigned int clear_global_time : 1;
+ unsigned int ethertype_explicit_sectag_lsb : 3;
+ } bits_0;
+ unsigned short word_0;
+ };
+ union {
+ struct {
+ unsigned int ethertype_explicit_sectag_msb : 13;
+ unsigned int reserved0 : 3;
+ } bits_1;
+ unsigned short word_1;
+ };
+};
+
+struct mss_egress_lut_addr_ctl_register {
+ union {
+ struct {
+ unsigned int lut_addr : 9;
+ unsigned int reserved0 : 3;
+ /* 0x0 : Egress MAC Control FIlter (CTLF) LUT
+ * 0x1 : Egress Classification LUT
+ * 0x2 : Egress SC/SA LUT
+ * 0x3 : Egress SMIB
+ */
+ unsigned int lut_select : 4;
+ } bits_0;
+ unsigned short word_0;
+ };
+};
+
+struct mss_egress_lut_ctl_register {
+ union {
+ struct {
+ unsigned int reserved0 : 14;
+ unsigned int lut_read : 1;
+ unsigned int lut_write : 1;
+ } bits_0;
+ unsigned short word_0;
+ };
+};
+
+#endif /* MSS_EGRESS_REGS_HEADER */
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/MSS_Ingress_registers.h b/drivers/net/ethernet/aquantia/atlantic/macsec/MSS_Ingress_registers.h
new file mode 100644
index 000000000..d4c00d9a0
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/MSS_Ingress_registers.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef MSS_INGRESS_REGS_HEADER
+#define MSS_INGRESS_REGS_HEADER
+
+#define MSS_INGRESS_CTL_REGISTER_ADDR 0x0000800E
+#define MSS_INGRESS_LUT_ADDR_CTL_REGISTER_ADDR 0x00008080
+#define MSS_INGRESS_LUT_CTL_REGISTER_ADDR 0x00008081
+#define MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR 0x000080A0
+
+struct mss_ingress_ctl_register {
+ union {
+ struct {
+ unsigned int soft_reset : 1;
+ unsigned int operation_point_to_point : 1;
+ unsigned int create_sci : 1;
+ /* Unused */
+ unsigned int mask_short_length_error : 1;
+ unsigned int drop_kay_packet : 1;
+ unsigned int drop_igprc_miss : 1;
+ /* Unused */
+ unsigned int check_icv : 1;
+ unsigned int clear_global_time : 1;
+ unsigned int clear_count : 1;
+ unsigned int high_prio : 1;
+ unsigned int remove_sectag : 1;
+ unsigned int global_validate_frames : 2;
+ unsigned int icv_lsb_8bytes_enabled : 1;
+ unsigned int reserved0 : 2;
+ } bits_0;
+ unsigned short word_0;
+ };
+ union {
+ struct {
+ unsigned int reserved0 : 16;
+ } bits_1;
+ unsigned short word_1;
+ };
+};
+
+struct mss_ingress_lut_addr_ctl_register {
+ union {
+ struct {
+ unsigned int lut_addr : 9;
+ unsigned int reserved0 : 3;
+ /* 0x0 : Ingress Pre-Security MAC Control FIlter
+ * (IGPRCTLF) LUT
+ * 0x1 : Ingress Pre-Security Classification LUT (IGPRC)
+ * 0x2 : Ingress Packet Format (IGPFMT) SAKey LUT
+ * 0x3 : Ingress Packet Format (IGPFMT) SC/SA LUT
+ * 0x4 : Ingress Post-Security Classification LUT
+ * (IGPOC)
+ * 0x5 : Ingress Post-Security MAC Control Filter
+ * (IGPOCTLF) LUT
+ * 0x6 : Ingress MIB (IGMIB)
+ */
+ unsigned int lut_select : 4;
+ } bits_0;
+ unsigned short word_0;
+ };
+};
+
+struct mss_ingress_lut_ctl_register {
+ union {
+ struct {
+ unsigned int reserved0 : 14;
+ unsigned int lut_read : 1;
+ unsigned int lut_write : 1;
+ } bits_0;
+ unsigned short word_0;
+ };
+};
+
+#endif /* MSS_INGRESS_REGS_HEADER */
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
new file mode 100644
index 000000000..431924959
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
@@ -0,0 +1,2477 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include "macsec_api.h"
+#include <linux/mdio.h>
+#include "MSS_Ingress_registers.h"
+#include "MSS_Egress_registers.h"
+#include "aq_phy.h"
+
+#define AQ_API_CALL_SAFE(func, ...) \
+({ \
+ int ret; \
+ do { \
+ ret = aq_mss_mdio_sem_get(hw); \
+ if (unlikely(ret)) \
+ break; \
+ \
+ ret = func(__VA_ARGS__); \
+ \
+ aq_mss_mdio_sem_put(hw); \
+ } while (0); \
+ ret; \
+})
+
+/*******************************************************************************
+ * MDIO wrappers
+ ******************************************************************************/
+static int aq_mss_mdio_sem_get(struct aq_hw_s *hw)
+{
+ u32 val;
+
+ return readx_poll_timeout_atomic(hw_atl_sem_mdio_get, hw, val,
+ val == 1U, 10U, 100000U);
+}
+
+static void aq_mss_mdio_sem_put(struct aq_hw_s *hw)
+{
+ hw_atl_reg_glb_cpu_sem_set(hw, 1U, HW_ATL_FW_SM_MDIO);
+}
+
+static int aq_mss_mdio_read(struct aq_hw_s *hw, u16 mmd, u16 addr, u16 *data)
+{
+ *data = aq_mdio_read_word(hw, mmd, addr);
+ return (*data != 0xffff) ? 0 : -ETIME;
+}
+
+static int aq_mss_mdio_write(struct aq_hw_s *hw, u16 mmd, u16 addr, u16 data)
+{
+ aq_mdio_write_word(hw, mmd, addr, data);
+ return 0;
+}
+
+/*******************************************************************************
+ * MACSEC config and status
+ ******************************************************************************/
+
+static int set_raw_ingress_record(struct aq_hw_s *hw, u16 *packed_record,
+ u8 num_words, u8 table_id,
+ u16 table_index)
+{
+ struct mss_ingress_lut_addr_ctl_register lut_sel_reg;
+ struct mss_ingress_lut_ctl_register lut_op_reg;
+
+ unsigned int i;
+
+ /* NOTE: MSS registers must always be read/written as adjacent pairs.
+ * For instance, to write either or both 1E.80A0 and 80A1, we have to:
+ * 1. Write 1E.80A0 first
+ * 2. Then write 1E.80A1
+ *
+ * For HHD devices: These writes need to be performed consecutively, and
+ * to ensure this we use the PIF mailbox to delegate the reads/writes to
+ * the FW.
+ *
+ * For EUR devices: Not need to use the PIF mailbox; it is safe to
+ * write to the registers directly.
+ */
+
+ /* Write the packed record words to the data buffer registers. */
+ for (i = 0; i < num_words; i += 2) {
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR + i,
+ packed_record[i]);
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR + i +
+ 1,
+ packed_record[i + 1]);
+ }
+
+ /* Clear out the unused data buffer registers. */
+ for (i = num_words; i < 24; i += 2) {
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR + i,
+ 0);
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR + i + 1, 0);
+ }
+
+ /* Select the table and row index to write to */
+ lut_sel_reg.bits_0.lut_select = table_id;
+ lut_sel_reg.bits_0.lut_addr = table_index;
+
+ lut_op_reg.bits_0.lut_read = 0;
+ lut_op_reg.bits_0.lut_write = 1;
+
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_LUT_ADDR_CTL_REGISTER_ADDR,
+ lut_sel_reg.word_0);
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1, MSS_INGRESS_LUT_CTL_REGISTER_ADDR,
+ lut_op_reg.word_0);
+
+ return 0;
+}
+
+/*! Read the specified Ingress LUT table row.
+ * packed_record - [OUT] The table row data (raw).
+ */
+static int get_raw_ingress_record(struct aq_hw_s *hw, u16 *packed_record,
+ u8 num_words, u8 table_id,
+ u16 table_index)
+{
+ struct mss_ingress_lut_addr_ctl_register lut_sel_reg;
+ struct mss_ingress_lut_ctl_register lut_op_reg;
+ int ret;
+
+ unsigned int i;
+
+ /* Select the table and row index to read */
+ lut_sel_reg.bits_0.lut_select = table_id;
+ lut_sel_reg.bits_0.lut_addr = table_index;
+
+ lut_op_reg.bits_0.lut_read = 1;
+ lut_op_reg.bits_0.lut_write = 0;
+
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_LUT_ADDR_CTL_REGISTER_ADDR,
+ lut_sel_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_LUT_CTL_REGISTER_ADDR,
+ lut_op_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+
+ memset(packed_record, 0, sizeof(u16) * num_words);
+
+ for (i = 0; i < num_words; i += 2) {
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR +
+ i,
+ &packed_record[i]);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR +
+ i + 1,
+ &packed_record[i + 1]);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ return 0;
+}
+
+/*! Write packed_record to the specified Egress LUT table row. */
+static int set_raw_egress_record(struct aq_hw_s *hw, u16 *packed_record,
+ u8 num_words, u8 table_id,
+ u16 table_index)
+{
+ struct mss_egress_lut_addr_ctl_register lut_sel_reg;
+ struct mss_egress_lut_ctl_register lut_op_reg;
+
+ unsigned int i;
+
+ /* Write the packed record words to the data buffer registers. */
+ for (i = 0; i < num_words; i += 2) {
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR + i,
+ packed_record[i]);
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR + i + 1,
+ packed_record[i + 1]);
+ }
+
+ /* Clear out the unused data buffer registers. */
+ for (i = num_words; i < 28; i += 2) {
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR + i, 0);
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR + i + 1,
+ 0);
+ }
+
+ /* Select the table and row index to write to */
+ lut_sel_reg.bits_0.lut_select = table_id;
+ lut_sel_reg.bits_0.lut_addr = table_index;
+
+ lut_op_reg.bits_0.lut_read = 0;
+ lut_op_reg.bits_0.lut_write = 1;
+
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_LUT_ADDR_CTL_REGISTER_ADDR,
+ lut_sel_reg.word_0);
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1, MSS_EGRESS_LUT_CTL_REGISTER_ADDR,
+ lut_op_reg.word_0);
+
+ return 0;
+}
+
+static int get_raw_egress_record(struct aq_hw_s *hw, u16 *packed_record,
+ u8 num_words, u8 table_id,
+ u16 table_index)
+{
+ struct mss_egress_lut_addr_ctl_register lut_sel_reg;
+ struct mss_egress_lut_ctl_register lut_op_reg;
+ int ret;
+
+ unsigned int i;
+
+ /* Select the table and row index to read */
+ lut_sel_reg.bits_0.lut_select = table_id;
+ lut_sel_reg.bits_0.lut_addr = table_index;
+
+ lut_op_reg.bits_0.lut_read = 1;
+ lut_op_reg.bits_0.lut_write = 0;
+
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_LUT_ADDR_CTL_REGISTER_ADDR,
+ lut_sel_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_LUT_CTL_REGISTER_ADDR,
+ lut_op_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+
+ memset(packed_record, 0, sizeof(u16) * num_words);
+
+ for (i = 0; i < num_words; i += 2) {
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR +
+ i,
+ &packed_record[i]);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR +
+ i + 1,
+ &packed_record[i + 1]);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+set_ingress_prectlf_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_prectlf_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[6];
+
+ if (table_index >= NUMROWS_INGRESSPRECTLFRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 6);
+
+ packed_record[0] = rec->sa_da[0] & 0xFFFF;
+ packed_record[1] = (rec->sa_da[0] >> 16) & 0xFFFF;
+ packed_record[2] = rec->sa_da[1] & 0xFFFF;
+ packed_record[3] = rec->eth_type & 0xFFFF;
+ packed_record[4] = rec->match_mask & 0xFFFF;
+ packed_record[5] = rec->match_type & 0xF;
+ packed_record[5] |= (rec->action & 0x1) << 4;
+
+ return set_raw_ingress_record(hw, packed_record, 6, 0,
+ ROWOFFSET_INGRESSPRECTLFRECORD +
+ table_index);
+}
+
+int aq_mss_set_ingress_prectlf_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_prectlf_record *rec,
+ u16 table_index)
+{
+ return AQ_API_CALL_SAFE(set_ingress_prectlf_record, hw, rec,
+ table_index);
+}
+
+static int get_ingress_prectlf_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_prectlf_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[6];
+ int ret;
+
+ if (table_index >= NUMROWS_INGRESSPRECTLFRECORD)
+ return -EINVAL;
+
+ /* If the row that we want to read is odd, first read the previous even
+ * row, throw that value away, and finally read the desired row.
+ * This is a workaround for EUR devices that allows us to read
+ * odd-numbered rows. For HHD devices: this workaround will not work,
+ * so don't bother; odd-numbered rows are not readable.
+ */
+ if ((table_index % 2) > 0) {
+ ret = get_raw_ingress_record(hw, packed_record, 6, 0,
+ ROWOFFSET_INGRESSPRECTLFRECORD +
+ table_index - 1);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ret = get_raw_ingress_record(hw, packed_record, 6, 0,
+ ROWOFFSET_INGRESSPRECTLFRECORD +
+ table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->sa_da[0] = packed_record[0];
+ rec->sa_da[0] |= packed_record[1] << 16;
+
+ rec->sa_da[1] = packed_record[2];
+
+ rec->eth_type = packed_record[3];
+
+ rec->match_mask = packed_record[4];
+
+ rec->match_type = packed_record[5] & 0xF;
+
+ rec->action = (packed_record[5] >> 4) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_ingress_prectlf_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_prectlf_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_ingress_prectlf_record, hw, rec,
+ table_index);
+}
+
+static int
+set_ingress_preclass_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_preclass_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[20];
+
+ if (table_index >= NUMROWS_INGRESSPRECLASSRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 20);
+
+ packed_record[0] = rec->sci[0] & 0xFFFF;
+ packed_record[1] = (rec->sci[0] >> 16) & 0xFFFF;
+
+ packed_record[2] = rec->sci[1] & 0xFFFF;
+ packed_record[3] = (rec->sci[1] >> 16) & 0xFFFF;
+
+ packed_record[4] = rec->tci & 0xFF;
+
+ packed_record[4] |= (rec->encr_offset & 0xFF) << 8;
+
+ packed_record[5] = rec->eth_type & 0xFFFF;
+
+ packed_record[6] = rec->snap[0] & 0xFFFF;
+ packed_record[7] = (rec->snap[0] >> 16) & 0xFFFF;
+
+ packed_record[8] = rec->snap[1] & 0xFF;
+
+ packed_record[8] |= (rec->llc & 0xFF) << 8;
+ packed_record[9] = (rec->llc >> 8) & 0xFFFF;
+
+ packed_record[10] = rec->mac_sa[0] & 0xFFFF;
+ packed_record[11] = (rec->mac_sa[0] >> 16) & 0xFFFF;
+
+ packed_record[12] = rec->mac_sa[1] & 0xFFFF;
+
+ packed_record[13] = rec->mac_da[0] & 0xFFFF;
+ packed_record[14] = (rec->mac_da[0] >> 16) & 0xFFFF;
+
+ packed_record[15] = rec->mac_da[1] & 0xFFFF;
+
+ packed_record[16] = rec->lpbk_packet & 0x1;
+
+ packed_record[16] |= (rec->an_mask & 0x3) << 1;
+
+ packed_record[16] |= (rec->tci_mask & 0x3F) << 3;
+
+ packed_record[16] |= (rec->sci_mask & 0x7F) << 9;
+ packed_record[17] = (rec->sci_mask >> 7) & 0x1;
+
+ packed_record[17] |= (rec->eth_type_mask & 0x3) << 1;
+
+ packed_record[17] |= (rec->snap_mask & 0x1F) << 3;
+
+ packed_record[17] |= (rec->llc_mask & 0x7) << 8;
+
+ packed_record[17] |= (rec->_802_2_encapsulate & 0x1) << 11;
+
+ packed_record[17] |= (rec->sa_mask & 0xF) << 12;
+ packed_record[18] = (rec->sa_mask >> 4) & 0x3;
+
+ packed_record[18] |= (rec->da_mask & 0x3F) << 2;
+
+ packed_record[18] |= (rec->lpbk_mask & 0x1) << 8;
+
+ packed_record[18] |= (rec->sc_idx & 0x1F) << 9;
+
+ packed_record[18] |= (rec->proc_dest & 0x1) << 14;
+
+ packed_record[18] |= (rec->action & 0x1) << 15;
+ packed_record[19] = (rec->action >> 1) & 0x1;
+
+ packed_record[19] |= (rec->ctrl_unctrl & 0x1) << 1;
+
+ packed_record[19] |= (rec->sci_from_table & 0x1) << 2;
+
+ packed_record[19] |= (rec->reserved & 0xF) << 3;
+
+ packed_record[19] |= (rec->valid & 0x1) << 7;
+
+ return set_raw_ingress_record(hw, packed_record, 20, 1,
+ ROWOFFSET_INGRESSPRECLASSRECORD +
+ table_index);
+}
+
+int aq_mss_set_ingress_preclass_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_preclass_record *rec,
+ u16 table_index)
+{
+ int err = AQ_API_CALL_SAFE(set_ingress_preclass_record, hw, rec,
+ table_index);
+
+ WARN_ONCE(err, "%s failed with %d\n", __func__, err);
+
+ return err;
+}
+
+static int
+get_ingress_preclass_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_preclass_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[20];
+ int ret;
+
+ if (table_index >= NUMROWS_INGRESSPRECLASSRECORD)
+ return -EINVAL;
+
+ /* If the row that we want to read is odd, first read the previous even
+ * row, throw that value away, and finally read the desired row.
+ */
+ if ((table_index % 2) > 0) {
+ ret = get_raw_ingress_record(hw, packed_record, 20, 1,
+ ROWOFFSET_INGRESSPRECLASSRECORD +
+ table_index - 1);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ret = get_raw_ingress_record(hw, packed_record, 20, 1,
+ ROWOFFSET_INGRESSPRECLASSRECORD +
+ table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->sci[0] = packed_record[0];
+ rec->sci[0] |= packed_record[1] << 16;
+
+ rec->sci[1] = packed_record[2];
+ rec->sci[1] |= packed_record[3] << 16;
+
+ rec->tci = packed_record[4] & 0xFF;
+
+ rec->encr_offset = (packed_record[4] >> 8) & 0xFF;
+
+ rec->eth_type = packed_record[5];
+
+ rec->snap[0] = packed_record[6];
+ rec->snap[0] |= packed_record[7] << 16;
+
+ rec->snap[1] = packed_record[8] & 0xFF;
+
+ rec->llc = (packed_record[8] >> 8) & 0xFF;
+ rec->llc |= packed_record[9] << 8;
+
+ rec->mac_sa[0] = packed_record[10];
+ rec->mac_sa[0] |= packed_record[11] << 16;
+
+ rec->mac_sa[1] = packed_record[12];
+
+ rec->mac_da[0] = packed_record[13];
+ rec->mac_da[0] |= packed_record[14] << 16;
+
+ rec->mac_da[1] = packed_record[15];
+
+ rec->lpbk_packet = packed_record[16] & 0x1;
+
+ rec->an_mask = (packed_record[16] >> 1) & 0x3;
+
+ rec->tci_mask = (packed_record[16] >> 3) & 0x3F;
+
+ rec->sci_mask = (packed_record[16] >> 9) & 0x7F;
+ rec->sci_mask |= (packed_record[17] & 0x1) << 7;
+
+ rec->eth_type_mask = (packed_record[17] >> 1) & 0x3;
+
+ rec->snap_mask = (packed_record[17] >> 3) & 0x1F;
+
+ rec->llc_mask = (packed_record[17] >> 8) & 0x7;
+
+ rec->_802_2_encapsulate = (packed_record[17] >> 11) & 0x1;
+
+ rec->sa_mask = (packed_record[17] >> 12) & 0xF;
+ rec->sa_mask |= (packed_record[18] & 0x3) << 4;
+
+ rec->da_mask = (packed_record[18] >> 2) & 0x3F;
+
+ rec->lpbk_mask = (packed_record[18] >> 8) & 0x1;
+
+ rec->sc_idx = (packed_record[18] >> 9) & 0x1F;
+
+ rec->proc_dest = (packed_record[18] >> 14) & 0x1;
+
+ rec->action = (packed_record[18] >> 15) & 0x1;
+ rec->action |= (packed_record[19] & 0x1) << 1;
+
+ rec->ctrl_unctrl = (packed_record[19] >> 1) & 0x1;
+
+ rec->sci_from_table = (packed_record[19] >> 2) & 0x1;
+
+ rec->reserved = (packed_record[19] >> 3) & 0xF;
+
+ rec->valid = (packed_record[19] >> 7) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_ingress_preclass_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_preclass_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_ingress_preclass_record, hw, rec,
+ table_index);
+}
+
+static int set_ingress_sc_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_sc_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+
+ if (table_index >= NUMROWS_INGRESSSCRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 8);
+
+ packed_record[0] = rec->stop_time & 0xFFFF;
+ packed_record[1] = (rec->stop_time >> 16) & 0xFFFF;
+
+ packed_record[2] = rec->start_time & 0xFFFF;
+ packed_record[3] = (rec->start_time >> 16) & 0xFFFF;
+
+ packed_record[4] = rec->validate_frames & 0x3;
+
+ packed_record[4] |= (rec->replay_protect & 0x1) << 2;
+
+ packed_record[4] |= (rec->anti_replay_window & 0x1FFF) << 3;
+ packed_record[5] = (rec->anti_replay_window >> 13) & 0xFFFF;
+ packed_record[6] = (rec->anti_replay_window >> 29) & 0x7;
+
+ packed_record[6] |= (rec->receiving & 0x1) << 3;
+
+ packed_record[6] |= (rec->fresh & 0x1) << 4;
+
+ packed_record[6] |= (rec->an_rol & 0x1) << 5;
+
+ packed_record[6] |= (rec->reserved & 0x3FF) << 6;
+ packed_record[7] = (rec->reserved >> 10) & 0x7FFF;
+
+ packed_record[7] |= (rec->valid & 0x1) << 15;
+
+ return set_raw_ingress_record(hw, packed_record, 8, 3,
+ ROWOFFSET_INGRESSSCRECORD + table_index);
+}
+
+int aq_mss_set_ingress_sc_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_sc_record *rec,
+ u16 table_index)
+{
+ int err = AQ_API_CALL_SAFE(set_ingress_sc_record, hw, rec, table_index);
+
+ WARN_ONCE(err, "%s failed with %d\n", __func__, err);
+
+ return err;
+}
+
+static int get_ingress_sc_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sc_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+ int ret;
+
+ if (table_index >= NUMROWS_INGRESSSCRECORD)
+ return -EINVAL;
+
+ ret = get_raw_ingress_record(hw, packed_record, 8, 3,
+ ROWOFFSET_INGRESSSCRECORD + table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->stop_time = packed_record[0];
+ rec->stop_time |= packed_record[1] << 16;
+
+ rec->start_time = packed_record[2];
+ rec->start_time |= packed_record[3] << 16;
+
+ rec->validate_frames = packed_record[4] & 0x3;
+
+ rec->replay_protect = (packed_record[4] >> 2) & 0x1;
+
+ rec->anti_replay_window = (packed_record[4] >> 3) & 0x1FFF;
+ rec->anti_replay_window |= packed_record[5] << 13;
+ rec->anti_replay_window |= (packed_record[6] & 0x7) << 29;
+
+ rec->receiving = (packed_record[6] >> 3) & 0x1;
+
+ rec->fresh = (packed_record[6] >> 4) & 0x1;
+
+ rec->an_rol = (packed_record[6] >> 5) & 0x1;
+
+ rec->reserved = (packed_record[6] >> 6) & 0x3FF;
+ rec->reserved |= (packed_record[7] & 0x7FFF) << 10;
+
+ rec->valid = (packed_record[7] >> 15) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_ingress_sc_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sc_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_ingress_sc_record, hw, rec, table_index);
+}
+
+static int set_ingress_sa_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_sa_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+
+ if (table_index >= NUMROWS_INGRESSSARECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 8);
+
+ packed_record[0] = rec->stop_time & 0xFFFF;
+ packed_record[1] = (rec->stop_time >> 16) & 0xFFFF;
+
+ packed_record[2] = rec->start_time & 0xFFFF;
+ packed_record[3] = (rec->start_time >> 16) & 0xFFFF;
+
+ packed_record[4] = rec->next_pn & 0xFFFF;
+ packed_record[5] = (rec->next_pn >> 16) & 0xFFFF;
+
+ packed_record[6] = rec->sat_nextpn & 0x1;
+
+ packed_record[6] |= (rec->in_use & 0x1) << 1;
+
+ packed_record[6] |= (rec->fresh & 0x1) << 2;
+
+ packed_record[6] |= (rec->reserved & 0x1FFF) << 3;
+ packed_record[7] = (rec->reserved >> 13) & 0x7FFF;
+
+ packed_record[7] |= (rec->valid & 0x1) << 15;
+
+ return set_raw_ingress_record(hw, packed_record, 8, 3,
+ ROWOFFSET_INGRESSSARECORD + table_index);
+}
+
+int aq_mss_set_ingress_sa_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_sa_record *rec,
+ u16 table_index)
+{
+ int err = AQ_API_CALL_SAFE(set_ingress_sa_record, hw, rec, table_index);
+
+ WARN_ONCE(err, "%s failed with %d\n", __func__, err);
+
+ return err;
+}
+
+static int get_ingress_sa_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sa_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+ int ret;
+
+ if (table_index >= NUMROWS_INGRESSSARECORD)
+ return -EINVAL;
+
+ ret = get_raw_ingress_record(hw, packed_record, 8, 3,
+ ROWOFFSET_INGRESSSARECORD + table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->stop_time = packed_record[0];
+ rec->stop_time |= packed_record[1] << 16;
+
+ rec->start_time = packed_record[2];
+ rec->start_time |= packed_record[3] << 16;
+
+ rec->next_pn = packed_record[4];
+ rec->next_pn |= packed_record[5] << 16;
+
+ rec->sat_nextpn = packed_record[6] & 0x1;
+
+ rec->in_use = (packed_record[6] >> 1) & 0x1;
+
+ rec->fresh = (packed_record[6] >> 2) & 0x1;
+
+ rec->reserved = (packed_record[6] >> 3) & 0x1FFF;
+ rec->reserved |= (packed_record[7] & 0x7FFF) << 13;
+
+ rec->valid = (packed_record[7] >> 15) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_ingress_sa_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sa_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_ingress_sa_record, hw, rec, table_index);
+}
+
+static int
+set_ingress_sakey_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_sakey_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[18];
+ int ret;
+
+ if (table_index >= NUMROWS_INGRESSSAKEYRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 18);
+
+ packed_record[0] = rec->key[0] & 0xFFFF;
+ packed_record[1] = (rec->key[0] >> 16) & 0xFFFF;
+
+ packed_record[2] = rec->key[1] & 0xFFFF;
+ packed_record[3] = (rec->key[1] >> 16) & 0xFFFF;
+
+ packed_record[4] = rec->key[2] & 0xFFFF;
+ packed_record[5] = (rec->key[2] >> 16) & 0xFFFF;
+
+ packed_record[6] = rec->key[3] & 0xFFFF;
+ packed_record[7] = (rec->key[3] >> 16) & 0xFFFF;
+
+ packed_record[8] = rec->key[4] & 0xFFFF;
+ packed_record[9] = (rec->key[4] >> 16) & 0xFFFF;
+
+ packed_record[10] = rec->key[5] & 0xFFFF;
+ packed_record[11] = (rec->key[5] >> 16) & 0xFFFF;
+
+ packed_record[12] = rec->key[6] & 0xFFFF;
+ packed_record[13] = (rec->key[6] >> 16) & 0xFFFF;
+
+ packed_record[14] = rec->key[7] & 0xFFFF;
+ packed_record[15] = (rec->key[7] >> 16) & 0xFFFF;
+
+ packed_record[16] = rec->key_len & 0x3;
+
+ ret = set_raw_ingress_record(hw, packed_record, 18, 2,
+ ROWOFFSET_INGRESSSAKEYRECORD +
+ table_index);
+
+ memzero_explicit(packed_record, sizeof(packed_record));
+ return ret;
+}
+
+int aq_mss_set_ingress_sakey_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_sakey_record *rec,
+ u16 table_index)
+{
+ int err = AQ_API_CALL_SAFE(set_ingress_sakey_record, hw, rec,
+ table_index);
+
+ WARN_ONCE(err, "%s failed with %d\n", __func__, err);
+
+ return err;
+}
+
+static int get_ingress_sakey_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sakey_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[18];
+ int ret;
+
+ if (table_index >= NUMROWS_INGRESSSAKEYRECORD)
+ return -EINVAL;
+
+ ret = get_raw_ingress_record(hw, packed_record, 18, 2,
+ ROWOFFSET_INGRESSSAKEYRECORD +
+ table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->key[0] = packed_record[0];
+ rec->key[0] |= packed_record[1] << 16;
+
+ rec->key[1] = packed_record[2];
+ rec->key[1] |= packed_record[3] << 16;
+
+ rec->key[2] = packed_record[4];
+ rec->key[2] |= packed_record[5] << 16;
+
+ rec->key[3] = packed_record[6];
+ rec->key[3] |= packed_record[7] << 16;
+
+ rec->key[4] = packed_record[8];
+ rec->key[4] |= packed_record[9] << 16;
+
+ rec->key[5] = packed_record[10];
+ rec->key[5] |= packed_record[11] << 16;
+
+ rec->key[6] = packed_record[12];
+ rec->key[6] |= packed_record[13] << 16;
+
+ rec->key[7] = packed_record[14];
+ rec->key[7] |= packed_record[15] << 16;
+
+ rec->key_len = packed_record[16] & 0x3;
+
+ return 0;
+}
+
+int aq_mss_get_ingress_sakey_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sakey_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_ingress_sakey_record, hw, rec, table_index);
+}
+
+static int
+set_ingress_postclass_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_postclass_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+
+ if (table_index >= NUMROWS_INGRESSPOSTCLASSRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 8);
+
+ packed_record[0] = rec->byte0 & 0xFF;
+
+ packed_record[0] |= (rec->byte1 & 0xFF) << 8;
+
+ packed_record[1] = rec->byte2 & 0xFF;
+
+ packed_record[1] |= (rec->byte3 & 0xFF) << 8;
+
+ packed_record[2] = rec->eth_type & 0xFFFF;
+
+ packed_record[3] = rec->eth_type_valid & 0x1;
+
+ packed_record[3] |= (rec->vlan_id & 0xFFF) << 1;
+
+ packed_record[3] |= (rec->vlan_up & 0x7) << 13;
+
+ packed_record[4] = rec->vlan_valid & 0x1;
+
+ packed_record[4] |= (rec->sai & 0x1F) << 1;
+
+ packed_record[4] |= (rec->sai_hit & 0x1) << 6;
+
+ packed_record[4] |= (rec->eth_type_mask & 0xF) << 7;
+
+ packed_record[4] |= (rec->byte3_location & 0x1F) << 11;
+ packed_record[5] = (rec->byte3_location >> 5) & 0x1;
+
+ packed_record[5] |= (rec->byte3_mask & 0x3) << 1;
+
+ packed_record[5] |= (rec->byte2_location & 0x3F) << 3;
+
+ packed_record[5] |= (rec->byte2_mask & 0x3) << 9;
+
+ packed_record[5] |= (rec->byte1_location & 0x1F) << 11;
+ packed_record[6] = (rec->byte1_location >> 5) & 0x1;
+
+ packed_record[6] |= (rec->byte1_mask & 0x3) << 1;
+
+ packed_record[6] |= (rec->byte0_location & 0x3F) << 3;
+
+ packed_record[6] |= (rec->byte0_mask & 0x3) << 9;
+
+ packed_record[6] |= (rec->eth_type_valid_mask & 0x3) << 11;
+
+ packed_record[6] |= (rec->vlan_id_mask & 0x7) << 13;
+ packed_record[7] = (rec->vlan_id_mask >> 3) & 0x1;
+
+ packed_record[7] |= (rec->vlan_up_mask & 0x3) << 1;
+
+ packed_record[7] |= (rec->vlan_valid_mask & 0x3) << 3;
+
+ packed_record[7] |= (rec->sai_mask & 0x3) << 5;
+
+ packed_record[7] |= (rec->sai_hit_mask & 0x3) << 7;
+
+ packed_record[7] |= (rec->firstlevel_actions & 0x1) << 9;
+
+ packed_record[7] |= (rec->secondlevel_actions & 0x1) << 10;
+
+ packed_record[7] |= (rec->reserved & 0xF) << 11;
+
+ packed_record[7] |= (rec->valid & 0x1) << 15;
+
+ return set_raw_ingress_record(hw, packed_record, 8, 4,
+ ROWOFFSET_INGRESSPOSTCLASSRECORD +
+ table_index);
+}
+
+int aq_mss_set_ingress_postclass_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_postclass_record *rec,
+ u16 table_index)
+{
+ return AQ_API_CALL_SAFE(set_ingress_postclass_record, hw, rec,
+ table_index);
+}
+
+static int
+get_ingress_postclass_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_postclass_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+ int ret;
+
+ if (table_index >= NUMROWS_INGRESSPOSTCLASSRECORD)
+ return -EINVAL;
+
+ /* If the row that we want to read is odd, first read the previous even
+ * row, throw that value away, and finally read the desired row.
+ */
+ if ((table_index % 2) > 0) {
+ ret = get_raw_ingress_record(hw, packed_record, 8, 4,
+ ROWOFFSET_INGRESSPOSTCLASSRECORD +
+ table_index - 1);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ret = get_raw_ingress_record(hw, packed_record, 8, 4,
+ ROWOFFSET_INGRESSPOSTCLASSRECORD +
+ table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->byte0 = packed_record[0] & 0xFF;
+
+ rec->byte1 = (packed_record[0] >> 8) & 0xFF;
+
+ rec->byte2 = packed_record[1] & 0xFF;
+
+ rec->byte3 = (packed_record[1] >> 8) & 0xFF;
+
+ rec->eth_type = packed_record[2];
+
+ rec->eth_type_valid = packed_record[3] & 0x1;
+
+ rec->vlan_id = (packed_record[3] >> 1) & 0xFFF;
+
+ rec->vlan_up = (packed_record[3] >> 13) & 0x7;
+
+ rec->vlan_valid = packed_record[4] & 0x1;
+
+ rec->sai = (packed_record[4] >> 1) & 0x1F;
+
+ rec->sai_hit = (packed_record[4] >> 6) & 0x1;
+
+ rec->eth_type_mask = (packed_record[4] >> 7) & 0xF;
+
+ rec->byte3_location = (packed_record[4] >> 11) & 0x1F;
+ rec->byte3_location |= (packed_record[5] & 0x1) << 5;
+
+ rec->byte3_mask = (packed_record[5] >> 1) & 0x3;
+
+ rec->byte2_location = (packed_record[5] >> 3) & 0x3F;
+
+ rec->byte2_mask = (packed_record[5] >> 9) & 0x3;
+
+ rec->byte1_location = (packed_record[5] >> 11) & 0x1F;
+ rec->byte1_location |= (packed_record[6] & 0x1) << 5;
+
+ rec->byte1_mask = (packed_record[6] >> 1) & 0x3;
+
+ rec->byte0_location = (packed_record[6] >> 3) & 0x3F;
+
+ rec->byte0_mask = (packed_record[6] >> 9) & 0x3;
+
+ rec->eth_type_valid_mask = (packed_record[6] >> 11) & 0x3;
+
+ rec->vlan_id_mask = (packed_record[6] >> 13) & 0x7;
+ rec->vlan_id_mask |= (packed_record[7] & 0x1) << 3;
+
+ rec->vlan_up_mask = (packed_record[7] >> 1) & 0x3;
+
+ rec->vlan_valid_mask = (packed_record[7] >> 3) & 0x3;
+
+ rec->sai_mask = (packed_record[7] >> 5) & 0x3;
+
+ rec->sai_hit_mask = (packed_record[7] >> 7) & 0x3;
+
+ rec->firstlevel_actions = (packed_record[7] >> 9) & 0x1;
+
+ rec->secondlevel_actions = (packed_record[7] >> 10) & 0x1;
+
+ rec->reserved = (packed_record[7] >> 11) & 0xF;
+
+ rec->valid = (packed_record[7] >> 15) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_ingress_postclass_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_postclass_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_ingress_postclass_record, hw, rec,
+ table_index);
+}
+
+static int
+set_ingress_postctlf_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_postctlf_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[6];
+
+ if (table_index >= NUMROWS_INGRESSPOSTCTLFRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 6);
+
+ packed_record[0] = rec->sa_da[0] & 0xFFFF;
+ packed_record[1] = (rec->sa_da[0] >> 16) & 0xFFFF;
+
+ packed_record[2] = rec->sa_da[1] & 0xFFFF;
+
+ packed_record[3] = rec->eth_type & 0xFFFF;
+
+ packed_record[4] = rec->match_mask & 0xFFFF;
+
+ packed_record[5] = rec->match_type & 0xF;
+
+ packed_record[5] |= (rec->action & 0x1) << 4;
+
+ return set_raw_ingress_record(hw, packed_record, 6, 5,
+ ROWOFFSET_INGRESSPOSTCTLFRECORD +
+ table_index);
+}
+
+int aq_mss_set_ingress_postctlf_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_postctlf_record *rec,
+ u16 table_index)
+{
+ return AQ_API_CALL_SAFE(set_ingress_postctlf_record, hw, rec,
+ table_index);
+}
+
+static int
+get_ingress_postctlf_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_postctlf_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[6];
+ int ret;
+
+ if (table_index >= NUMROWS_INGRESSPOSTCTLFRECORD)
+ return -EINVAL;
+
+ /* If the row that we want to read is odd, first read the previous even
+ * row, throw that value away, and finally read the desired row.
+ */
+ if ((table_index % 2) > 0) {
+ ret = get_raw_ingress_record(hw, packed_record, 6, 5,
+ ROWOFFSET_INGRESSPOSTCTLFRECORD +
+ table_index - 1);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ret = get_raw_ingress_record(hw, packed_record, 6, 5,
+ ROWOFFSET_INGRESSPOSTCTLFRECORD +
+ table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->sa_da[0] = packed_record[0];
+ rec->sa_da[0] |= packed_record[1] << 16;
+
+ rec->sa_da[1] = packed_record[2];
+
+ rec->eth_type = packed_record[3];
+
+ rec->match_mask = packed_record[4];
+
+ rec->match_type = packed_record[5] & 0xF;
+
+ rec->action = (packed_record[5] >> 4) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_ingress_postctlf_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_postctlf_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_ingress_postctlf_record, hw, rec,
+ table_index);
+}
+
+static int set_egress_ctlf_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_ctlf_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[6];
+
+ if (table_index >= NUMROWS_EGRESSCTLFRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 6);
+
+ packed_record[0] = rec->sa_da[0] & 0xFFFF;
+ packed_record[1] = (rec->sa_da[0] >> 16) & 0xFFFF;
+
+ packed_record[2] = rec->sa_da[1] & 0xFFFF;
+
+ packed_record[3] = rec->eth_type & 0xFFFF;
+
+ packed_record[4] = rec->match_mask & 0xFFFF;
+
+ packed_record[5] = rec->match_type & 0xF;
+
+ packed_record[5] |= (rec->action & 0x1) << 4;
+
+ return set_raw_egress_record(hw, packed_record, 6, 0,
+ ROWOFFSET_EGRESSCTLFRECORD + table_index);
+}
+
+int aq_mss_set_egress_ctlf_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_ctlf_record *rec,
+ u16 table_index)
+{
+ return AQ_API_CALL_SAFE(set_egress_ctlf_record, hw, rec, table_index);
+}
+
+static int get_egress_ctlf_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_ctlf_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[6];
+ int ret;
+
+ if (table_index >= NUMROWS_EGRESSCTLFRECORD)
+ return -EINVAL;
+
+ /* If the row that we want to read is odd, first read the previous even
+ * row, throw that value away, and finally read the desired row.
+ */
+ if ((table_index % 2) > 0) {
+ ret = get_raw_egress_record(hw, packed_record, 6, 0,
+ ROWOFFSET_EGRESSCTLFRECORD +
+ table_index - 1);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ret = get_raw_egress_record(hw, packed_record, 6, 0,
+ ROWOFFSET_EGRESSCTLFRECORD + table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->sa_da[0] = packed_record[0];
+ rec->sa_da[0] |= packed_record[1] << 16;
+
+ rec->sa_da[1] = packed_record[2];
+
+ rec->eth_type = packed_record[3];
+
+ rec->match_mask = packed_record[4];
+
+ rec->match_type = packed_record[5] & 0xF;
+
+ rec->action = (packed_record[5] >> 4) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_egress_ctlf_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_ctlf_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_egress_ctlf_record, hw, rec, table_index);
+}
+
+static int set_egress_class_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_class_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[28];
+
+ if (table_index >= NUMROWS_EGRESSCLASSRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 28);
+
+ packed_record[0] = rec->vlan_id & 0xFFF;
+
+ packed_record[0] |= (rec->vlan_up & 0x7) << 12;
+
+ packed_record[0] |= (rec->vlan_valid & 0x1) << 15;
+
+ packed_record[1] = rec->byte3 & 0xFF;
+
+ packed_record[1] |= (rec->byte2 & 0xFF) << 8;
+
+ packed_record[2] = rec->byte1 & 0xFF;
+
+ packed_record[2] |= (rec->byte0 & 0xFF) << 8;
+
+ packed_record[3] = rec->tci & 0xFF;
+
+ packed_record[3] |= (rec->sci[0] & 0xFF) << 8;
+ packed_record[4] = (rec->sci[0] >> 8) & 0xFFFF;
+ packed_record[5] = (rec->sci[0] >> 24) & 0xFF;
+
+ packed_record[5] |= (rec->sci[1] & 0xFF) << 8;
+ packed_record[6] = (rec->sci[1] >> 8) & 0xFFFF;
+ packed_record[7] = (rec->sci[1] >> 24) & 0xFF;
+
+ packed_record[7] |= (rec->eth_type & 0xFF) << 8;
+ packed_record[8] = (rec->eth_type >> 8) & 0xFF;
+
+ packed_record[8] |= (rec->snap[0] & 0xFF) << 8;
+ packed_record[9] = (rec->snap[0] >> 8) & 0xFFFF;
+ packed_record[10] = (rec->snap[0] >> 24) & 0xFF;
+
+ packed_record[10] |= (rec->snap[1] & 0xFF) << 8;
+
+ packed_record[11] = rec->llc & 0xFFFF;
+ packed_record[12] = (rec->llc >> 16) & 0xFF;
+
+ packed_record[12] |= (rec->mac_sa[0] & 0xFF) << 8;
+ packed_record[13] = (rec->mac_sa[0] >> 8) & 0xFFFF;
+ packed_record[14] = (rec->mac_sa[0] >> 24) & 0xFF;
+
+ packed_record[14] |= (rec->mac_sa[1] & 0xFF) << 8;
+ packed_record[15] = (rec->mac_sa[1] >> 8) & 0xFF;
+
+ packed_record[15] |= (rec->mac_da[0] & 0xFF) << 8;
+ packed_record[16] = (rec->mac_da[0] >> 8) & 0xFFFF;
+ packed_record[17] = (rec->mac_da[0] >> 24) & 0xFF;
+
+ packed_record[17] |= (rec->mac_da[1] & 0xFF) << 8;
+ packed_record[18] = (rec->mac_da[1] >> 8) & 0xFF;
+
+ packed_record[18] |= (rec->pn & 0xFF) << 8;
+ packed_record[19] = (rec->pn >> 8) & 0xFFFF;
+ packed_record[20] = (rec->pn >> 24) & 0xFF;
+
+ packed_record[20] |= (rec->byte3_location & 0x3F) << 8;
+
+ packed_record[20] |= (rec->byte3_mask & 0x1) << 14;
+
+ packed_record[20] |= (rec->byte2_location & 0x1) << 15;
+ packed_record[21] = (rec->byte2_location >> 1) & 0x1F;
+
+ packed_record[21] |= (rec->byte2_mask & 0x1) << 5;
+
+ packed_record[21] |= (rec->byte1_location & 0x3F) << 6;
+
+ packed_record[21] |= (rec->byte1_mask & 0x1) << 12;
+
+ packed_record[21] |= (rec->byte0_location & 0x7) << 13;
+ packed_record[22] = (rec->byte0_location >> 3) & 0x7;
+
+ packed_record[22] |= (rec->byte0_mask & 0x1) << 3;
+
+ packed_record[22] |= (rec->vlan_id_mask & 0x3) << 4;
+
+ packed_record[22] |= (rec->vlan_up_mask & 0x1) << 6;
+
+ packed_record[22] |= (rec->vlan_valid_mask & 0x1) << 7;
+
+ packed_record[22] |= (rec->tci_mask & 0xFF) << 8;
+
+ packed_record[23] = rec->sci_mask & 0xFF;
+
+ packed_record[23] |= (rec->eth_type_mask & 0x3) << 8;
+
+ packed_record[23] |= (rec->snap_mask & 0x1F) << 10;
+
+ packed_record[23] |= (rec->llc_mask & 0x1) << 15;
+ packed_record[24] = (rec->llc_mask >> 1) & 0x3;
+
+ packed_record[24] |= (rec->sa_mask & 0x3F) << 2;
+
+ packed_record[24] |= (rec->da_mask & 0x3F) << 8;
+
+ packed_record[24] |= (rec->pn_mask & 0x3) << 14;
+ packed_record[25] = (rec->pn_mask >> 2) & 0x3;
+
+ packed_record[25] |= (rec->eight02dot2 & 0x1) << 2;
+
+ packed_record[25] |= (rec->tci_sc & 0x1) << 3;
+
+ packed_record[25] |= (rec->tci_87543 & 0x1) << 4;
+
+ packed_record[25] |= (rec->exp_sectag_en & 0x1) << 5;
+
+ packed_record[25] |= (rec->sc_idx & 0x1F) << 6;
+
+ packed_record[25] |= (rec->sc_sa & 0x3) << 11;
+
+ packed_record[25] |= (rec->debug & 0x1) << 13;
+
+ packed_record[25] |= (rec->action & 0x3) << 14;
+
+ packed_record[26] = (rec->valid & 0x1) << 3;
+
+ return set_raw_egress_record(hw, packed_record, 28, 1,
+ ROWOFFSET_EGRESSCLASSRECORD + table_index);
+}
+
+int aq_mss_set_egress_class_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_class_record *rec,
+ u16 table_index)
+{
+ return AQ_API_CALL_SAFE(set_egress_class_record, hw, rec, table_index);
+}
+
+static int get_egress_class_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_class_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[28];
+ int ret;
+
+ if (table_index >= NUMROWS_EGRESSCLASSRECORD)
+ return -EINVAL;
+
+ /* If the row that we want to read is odd, first read the previous even
+ * row, throw that value away, and finally read the desired row.
+ */
+ if ((table_index % 2) > 0) {
+ ret = get_raw_egress_record(hw, packed_record, 28, 1,
+ ROWOFFSET_EGRESSCLASSRECORD +
+ table_index - 1);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ret = get_raw_egress_record(hw, packed_record, 28, 1,
+ ROWOFFSET_EGRESSCLASSRECORD + table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->vlan_id = packed_record[0] & 0xFFF;
+
+ rec->vlan_up = (packed_record[0] >> 12) & 0x7;
+
+ rec->vlan_valid = (packed_record[0] >> 15) & 0x1;
+
+ rec->byte3 = packed_record[1] & 0xFF;
+
+ rec->byte2 = (packed_record[1] >> 8) & 0xFF;
+
+ rec->byte1 = packed_record[2] & 0xFF;
+
+ rec->byte0 = (packed_record[2] >> 8) & 0xFF;
+
+ rec->tci = packed_record[3] & 0xFF;
+
+ rec->sci[0] = (packed_record[3] >> 8) & 0xFF;
+ rec->sci[0] |= packed_record[4] << 8;
+ rec->sci[0] |= (packed_record[5] & 0xFF) << 24;
+
+ rec->sci[1] = (packed_record[5] >> 8) & 0xFF;
+ rec->sci[1] |= packed_record[6] << 8;
+ rec->sci[1] |= (packed_record[7] & 0xFF) << 24;
+
+ rec->eth_type = (packed_record[7] >> 8) & 0xFF;
+ rec->eth_type |= (packed_record[8] & 0xFF) << 8;
+
+ rec->snap[0] = (packed_record[8] >> 8) & 0xFF;
+ rec->snap[0] |= packed_record[9] << 8;
+ rec->snap[0] |= (packed_record[10] & 0xFF) << 24;
+
+ rec->snap[1] = (packed_record[10] >> 8) & 0xFF;
+
+ rec->llc = packed_record[11];
+ rec->llc |= (packed_record[12] & 0xFF) << 16;
+
+ rec->mac_sa[0] = (packed_record[12] >> 8) & 0xFF;
+ rec->mac_sa[0] |= packed_record[13] << 8;
+ rec->mac_sa[0] |= (packed_record[14] & 0xFF) << 24;
+
+ rec->mac_sa[1] = (packed_record[14] >> 8) & 0xFF;
+ rec->mac_sa[1] |= (packed_record[15] & 0xFF) << 8;
+
+ rec->mac_da[0] = (packed_record[15] >> 8) & 0xFF;
+ rec->mac_da[0] |= packed_record[16] << 8;
+ rec->mac_da[0] |= (packed_record[17] & 0xFF) << 24;
+
+ rec->mac_da[1] = (packed_record[17] >> 8) & 0xFF;
+ rec->mac_da[1] |= (packed_record[18] & 0xFF) << 8;
+
+ rec->pn = (packed_record[18] >> 8) & 0xFF;
+ rec->pn |= packed_record[19] << 8;
+ rec->pn |= (packed_record[20] & 0xFF) << 24;
+
+ rec->byte3_location = (packed_record[20] >> 8) & 0x3F;
+
+ rec->byte3_mask = (packed_record[20] >> 14) & 0x1;
+
+ rec->byte2_location = (packed_record[20] >> 15) & 0x1;
+ rec->byte2_location |= (packed_record[21] & 0x1F) << 1;
+
+ rec->byte2_mask = (packed_record[21] >> 5) & 0x1;
+
+ rec->byte1_location = (packed_record[21] >> 6) & 0x3F;
+
+ rec->byte1_mask = (packed_record[21] >> 12) & 0x1;
+
+ rec->byte0_location = (packed_record[21] >> 13) & 0x7;
+ rec->byte0_location |= (packed_record[22] & 0x7) << 3;
+
+ rec->byte0_mask = (packed_record[22] >> 3) & 0x1;
+
+ rec->vlan_id_mask = (packed_record[22] >> 4) & 0x3;
+
+ rec->vlan_up_mask = (packed_record[22] >> 6) & 0x1;
+
+ rec->vlan_valid_mask = (packed_record[22] >> 7) & 0x1;
+
+ rec->tci_mask = (packed_record[22] >> 8) & 0xFF;
+
+ rec->sci_mask = packed_record[23] & 0xFF;
+
+ rec->eth_type_mask = (packed_record[23] >> 8) & 0x3;
+
+ rec->snap_mask = (packed_record[23] >> 10) & 0x1F;
+
+ rec->llc_mask = (packed_record[23] >> 15) & 0x1;
+ rec->llc_mask |= (packed_record[24] & 0x3) << 1;
+
+ rec->sa_mask = (packed_record[24] >> 2) & 0x3F;
+
+ rec->da_mask = (packed_record[24] >> 8) & 0x3F;
+
+ rec->pn_mask = (packed_record[24] >> 14) & 0x3;
+ rec->pn_mask |= (packed_record[25] & 0x3) << 2;
+
+ rec->eight02dot2 = (packed_record[25] >> 2) & 0x1;
+
+ rec->tci_sc = (packed_record[25] >> 3) & 0x1;
+
+ rec->tci_87543 = (packed_record[25] >> 4) & 0x1;
+
+ rec->exp_sectag_en = (packed_record[25] >> 5) & 0x1;
+
+ rec->sc_idx = (packed_record[25] >> 6) & 0x1F;
+
+ rec->sc_sa = (packed_record[25] >> 11) & 0x3;
+
+ rec->debug = (packed_record[25] >> 13) & 0x1;
+
+ rec->action = (packed_record[25] >> 14) & 0x3;
+
+ rec->valid = (packed_record[26] >> 3) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_egress_class_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_class_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_egress_class_record, hw, rec, table_index);
+}
+
+static int set_egress_sc_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_sc_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+
+ if (table_index >= NUMROWS_EGRESSSCRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 8);
+
+ packed_record[0] = rec->start_time & 0xFFFF;
+ packed_record[1] = (rec->start_time >> 16) & 0xFFFF;
+
+ packed_record[2] = rec->stop_time & 0xFFFF;
+ packed_record[3] = (rec->stop_time >> 16) & 0xFFFF;
+
+ packed_record[4] = rec->curr_an & 0x3;
+
+ packed_record[4] |= (rec->an_roll & 0x1) << 2;
+
+ packed_record[4] |= (rec->tci & 0x3F) << 3;
+
+ packed_record[4] |= (rec->enc_off & 0x7F) << 9;
+ packed_record[5] = (rec->enc_off >> 7) & 0x1;
+
+ packed_record[5] |= (rec->protect & 0x1) << 1;
+
+ packed_record[5] |= (rec->recv & 0x1) << 2;
+
+ packed_record[5] |= (rec->fresh & 0x1) << 3;
+
+ packed_record[5] |= (rec->sak_len & 0x3) << 4;
+
+ packed_record[7] = (rec->valid & 0x1) << 15;
+
+ return set_raw_egress_record(hw, packed_record, 8, 2,
+ ROWOFFSET_EGRESSSCRECORD + table_index);
+}
+
+int aq_mss_set_egress_sc_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_sc_record *rec,
+ u16 table_index)
+{
+ return AQ_API_CALL_SAFE(set_egress_sc_record, hw, rec, table_index);
+}
+
+static int get_egress_sc_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_sc_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+ int ret;
+
+ if (table_index >= NUMROWS_EGRESSSCRECORD)
+ return -EINVAL;
+
+ ret = get_raw_egress_record(hw, packed_record, 8, 2,
+ ROWOFFSET_EGRESSSCRECORD + table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->start_time = packed_record[0];
+ rec->start_time |= packed_record[1] << 16;
+
+ rec->stop_time = packed_record[2];
+ rec->stop_time |= packed_record[3] << 16;
+
+ rec->curr_an = packed_record[4] & 0x3;
+
+ rec->an_roll = (packed_record[4] >> 2) & 0x1;
+
+ rec->tci = (packed_record[4] >> 3) & 0x3F;
+
+ rec->enc_off = (packed_record[4] >> 9) & 0x7F;
+ rec->enc_off |= (packed_record[5] & 0x1) << 7;
+
+ rec->protect = (packed_record[5] >> 1) & 0x1;
+
+ rec->recv = (packed_record[5] >> 2) & 0x1;
+
+ rec->fresh = (packed_record[5] >> 3) & 0x1;
+
+ rec->sak_len = (packed_record[5] >> 4) & 0x3;
+
+ rec->valid = (packed_record[7] >> 15) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_egress_sc_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_sc_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_egress_sc_record, hw, rec, table_index);
+}
+
+static int set_egress_sa_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_sa_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+
+ if (table_index >= NUMROWS_EGRESSSARECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 8);
+
+ packed_record[0] = rec->start_time & 0xFFFF;
+ packed_record[1] = (rec->start_time >> 16) & 0xFFFF;
+
+ packed_record[2] = rec->stop_time & 0xFFFF;
+ packed_record[3] = (rec->stop_time >> 16) & 0xFFFF;
+
+ packed_record[4] = rec->next_pn & 0xFFFF;
+ packed_record[5] = (rec->next_pn >> 16) & 0xFFFF;
+
+ packed_record[6] = rec->sat_pn & 0x1;
+
+ packed_record[6] |= (rec->fresh & 0x1) << 1;
+
+ packed_record[7] = (rec->valid & 0x1) << 15;
+
+ return set_raw_egress_record(hw, packed_record, 8, 2,
+ ROWOFFSET_EGRESSSARECORD + table_index);
+}
+
+int aq_mss_set_egress_sa_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_sa_record *rec,
+ u16 table_index)
+{
+ int err = AQ_API_CALL_SAFE(set_egress_sa_record, hw, rec, table_index);
+
+ WARN_ONCE(err, "%s failed with %d\n", __func__, err);
+
+ return err;
+}
+
+static int get_egress_sa_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_sa_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+ int ret;
+
+ if (table_index >= NUMROWS_EGRESSSARECORD)
+ return -EINVAL;
+
+ ret = get_raw_egress_record(hw, packed_record, 8, 2,
+ ROWOFFSET_EGRESSSARECORD + table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->start_time = packed_record[0];
+ rec->start_time |= packed_record[1] << 16;
+
+ rec->stop_time = packed_record[2];
+ rec->stop_time |= packed_record[3] << 16;
+
+ rec->next_pn = packed_record[4];
+ rec->next_pn |= packed_record[5] << 16;
+
+ rec->sat_pn = packed_record[6] & 0x1;
+
+ rec->fresh = (packed_record[6] >> 1) & 0x1;
+
+ rec->valid = (packed_record[7] >> 15) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_egress_sa_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_sa_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_egress_sa_record, hw, rec, table_index);
+}
+
+static int set_egress_sakey_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_sakey_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[16];
+ int ret;
+
+ if (table_index >= NUMROWS_EGRESSSAKEYRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 16);
+
+ packed_record[0] = rec->key[0] & 0xFFFF;
+ packed_record[1] = (rec->key[0] >> 16) & 0xFFFF;
+
+ packed_record[2] = rec->key[1] & 0xFFFF;
+ packed_record[3] = (rec->key[1] >> 16) & 0xFFFF;
+
+ packed_record[4] = rec->key[2] & 0xFFFF;
+ packed_record[5] = (rec->key[2] >> 16) & 0xFFFF;
+
+ packed_record[6] = rec->key[3] & 0xFFFF;
+ packed_record[7] = (rec->key[3] >> 16) & 0xFFFF;
+
+ packed_record[8] = rec->key[4] & 0xFFFF;
+ packed_record[9] = (rec->key[4] >> 16) & 0xFFFF;
+
+ packed_record[10] = rec->key[5] & 0xFFFF;
+ packed_record[11] = (rec->key[5] >> 16) & 0xFFFF;
+
+ packed_record[12] = rec->key[6] & 0xFFFF;
+ packed_record[13] = (rec->key[6] >> 16) & 0xFFFF;
+
+ packed_record[14] = rec->key[7] & 0xFFFF;
+ packed_record[15] = (rec->key[7] >> 16) & 0xFFFF;
+
+ ret = set_raw_egress_record(hw, packed_record, 8, 2,
+ ROWOFFSET_EGRESSSAKEYRECORD + table_index);
+ if (unlikely(ret))
+ goto clear_key;
+ ret = set_raw_egress_record(hw, packed_record + 8, 8, 2,
+ ROWOFFSET_EGRESSSAKEYRECORD + table_index -
+ 32);
+
+clear_key:
+ memzero_explicit(packed_record, sizeof(packed_record));
+ return ret;
+}
+
+int aq_mss_set_egress_sakey_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_sakey_record *rec,
+ u16 table_index)
+{
+ int err = AQ_API_CALL_SAFE(set_egress_sakey_record, hw, rec,
+ table_index);
+
+ WARN_ONCE(err, "%s failed with %d\n", __func__, err);
+
+ return err;
+}
+
+static int get_egress_sakey_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_sakey_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[16];
+ int ret;
+
+ if (table_index >= NUMROWS_EGRESSSAKEYRECORD)
+ return -EINVAL;
+
+ ret = get_raw_egress_record(hw, packed_record, 8, 2,
+ ROWOFFSET_EGRESSSAKEYRECORD + table_index);
+ if (unlikely(ret))
+ return ret;
+ ret = get_raw_egress_record(hw, packed_record + 8, 8, 2,
+ ROWOFFSET_EGRESSSAKEYRECORD + table_index -
+ 32);
+ if (unlikely(ret))
+ return ret;
+
+ rec->key[0] = packed_record[0];
+ rec->key[0] |= packed_record[1] << 16;
+
+ rec->key[1] = packed_record[2];
+ rec->key[1] |= packed_record[3] << 16;
+
+ rec->key[2] = packed_record[4];
+ rec->key[2] |= packed_record[5] << 16;
+
+ rec->key[3] = packed_record[6];
+ rec->key[3] |= packed_record[7] << 16;
+
+ rec->key[4] = packed_record[8];
+ rec->key[4] |= packed_record[9] << 16;
+
+ rec->key[5] = packed_record[10];
+ rec->key[5] |= packed_record[11] << 16;
+
+ rec->key[6] = packed_record[12];
+ rec->key[6] |= packed_record[13] << 16;
+
+ rec->key[7] = packed_record[14];
+ rec->key[7] |= packed_record[15] << 16;
+
+ return 0;
+}
+
+int aq_mss_get_egress_sakey_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_sakey_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_egress_sakey_record, hw, rec, table_index);
+}
+
+static int get_egress_sc_counters(struct aq_hw_s *hw,
+ struct aq_mss_egress_sc_counters *counters,
+ u16 sc_index)
+{
+ u16 packed_record[4];
+ int ret;
+
+ if (sc_index >= NUMROWS_EGRESSSCRECORD)
+ return -EINVAL;
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, sc_index * 8 + 4);
+ if (unlikely(ret))
+ return ret;
+ counters->sc_protected_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->sc_protected_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, sc_index * 8 + 5);
+ if (unlikely(ret))
+ return ret;
+ counters->sc_encrypted_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->sc_encrypted_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, sc_index * 8 + 6);
+ if (unlikely(ret))
+ return ret;
+ counters->sc_protected_octets[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->sc_protected_octets[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, sc_index * 8 + 7);
+ if (unlikely(ret))
+ return ret;
+ counters->sc_encrypted_octets[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->sc_encrypted_octets[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ return 0;
+}
+
+int aq_mss_get_egress_sc_counters(struct aq_hw_s *hw,
+ struct aq_mss_egress_sc_counters *counters,
+ u16 sc_index)
+{
+ memset(counters, 0, sizeof(*counters));
+
+ return AQ_API_CALL_SAFE(get_egress_sc_counters, hw, counters, sc_index);
+}
+
+static int get_egress_sa_counters(struct aq_hw_s *hw,
+ struct aq_mss_egress_sa_counters *counters,
+ u16 sa_index)
+{
+ u16 packed_record[4];
+ int ret;
+
+ if (sa_index >= NUMROWS_EGRESSSARECORD)
+ return -EINVAL;
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, sa_index * 8 + 0);
+ if (unlikely(ret))
+ return ret;
+ counters->sa_hit_drop_redirect[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->sa_hit_drop_redirect[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, sa_index * 8 + 1);
+ if (unlikely(ret))
+ return ret;
+ counters->sa_protected2_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->sa_protected2_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, sa_index * 8 + 2);
+ if (unlikely(ret))
+ return ret;
+ counters->sa_protected_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->sa_protected_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, sa_index * 8 + 3);
+ if (unlikely(ret))
+ return ret;
+ counters->sa_encrypted_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->sa_encrypted_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ return 0;
+}
+
+int aq_mss_get_egress_sa_counters(struct aq_hw_s *hw,
+ struct aq_mss_egress_sa_counters *counters,
+ u16 sa_index)
+{
+ memset(counters, 0, sizeof(*counters));
+
+ return AQ_API_CALL_SAFE(get_egress_sa_counters, hw, counters, sa_index);
+}
+
+static int
+get_egress_common_counters(struct aq_hw_s *hw,
+ struct aq_mss_egress_common_counters *counters)
+{
+ u16 packed_record[4];
+ int ret;
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, 256 + 0);
+ if (unlikely(ret))
+ return ret;
+ counters->ctl_pkt[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->ctl_pkt[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, 256 + 1);
+ if (unlikely(ret))
+ return ret;
+ counters->unknown_sa_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->unknown_sa_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, 256 + 2);
+ if (unlikely(ret))
+ return ret;
+ counters->untagged_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->untagged_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, 256 + 3);
+ if (unlikely(ret))
+ return ret;
+ counters->too_long[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->too_long[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, 256 + 4);
+ if (unlikely(ret))
+ return ret;
+ counters->ecc_error_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->ecc_error_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, 256 + 5);
+ if (unlikely(ret))
+ return ret;
+ counters->unctrl_hit_drop_redir[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->unctrl_hit_drop_redir[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ return 0;
+}
+
+int aq_mss_get_egress_common_counters(struct aq_hw_s *hw,
+ struct aq_mss_egress_common_counters *counters)
+{
+ memset(counters, 0, sizeof(*counters));
+
+ return AQ_API_CALL_SAFE(get_egress_common_counters, hw, counters);
+}
+
+static int clear_egress_counters(struct aq_hw_s *hw)
+{
+ struct mss_egress_ctl_register ctl_reg;
+ int ret;
+
+ memset(&ctl_reg, 0, sizeof(ctl_reg));
+
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1, MSS_EGRESS_CTL_REGISTER_ADDR,
+ &ctl_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_CTL_REGISTER_ADDR + 4,
+ &ctl_reg.word_1);
+ if (unlikely(ret))
+ return ret;
+
+ /* Toggle the Egress MIB clear bit 0->1->0 */
+ ctl_reg.bits_0.clear_counter = 0;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_CTL_REGISTER_ADDR + 4,
+ ctl_reg.word_1);
+ if (unlikely(ret))
+ return ret;
+
+ ctl_reg.bits_0.clear_counter = 1;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_CTL_REGISTER_ADDR + 4,
+ ctl_reg.word_1);
+ if (unlikely(ret))
+ return ret;
+
+ ctl_reg.bits_0.clear_counter = 0;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_CTL_REGISTER_ADDR + 4,
+ ctl_reg.word_1);
+ if (unlikely(ret))
+ return ret;
+
+ return 0;
+}
+
+int aq_mss_clear_egress_counters(struct aq_hw_s *hw)
+{
+ return AQ_API_CALL_SAFE(clear_egress_counters, hw);
+}
+
+static int get_ingress_sa_counters(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sa_counters *counters,
+ u16 sa_index)
+{
+ u16 packed_record[4];
+ int ret;
+
+ if (sa_index >= NUMROWS_INGRESSSARECORD)
+ return -EINVAL;
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 0);
+ if (unlikely(ret))
+ return ret;
+ counters->untagged_hit_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->untagged_hit_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 1);
+ if (unlikely(ret))
+ return ret;
+ counters->ctrl_hit_drop_redir_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->ctrl_hit_drop_redir_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 2);
+ if (unlikely(ret))
+ return ret;
+ counters->not_using_sa[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->not_using_sa[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 3);
+ if (unlikely(ret))
+ return ret;
+ counters->unused_sa[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->unused_sa[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 4);
+ if (unlikely(ret))
+ return ret;
+ counters->not_valid_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->not_valid_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 5);
+ if (unlikely(ret))
+ return ret;
+ counters->invalid_pkts[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->invalid_pkts[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 6);
+ if (unlikely(ret))
+ return ret;
+ counters->ok_pkts[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->ok_pkts[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 7);
+ if (unlikely(ret))
+ return ret;
+ counters->late_pkts[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->late_pkts[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 8);
+ if (unlikely(ret))
+ return ret;
+ counters->delayed_pkts[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->delayed_pkts[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 9);
+ if (unlikely(ret))
+ return ret;
+ counters->unchecked_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->unchecked_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 10);
+ if (unlikely(ret))
+ return ret;
+ counters->validated_octets[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->validated_octets[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 11);
+ if (unlikely(ret))
+ return ret;
+ counters->decrypted_octets[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->decrypted_octets[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ return 0;
+}
+
+int aq_mss_get_ingress_sa_counters(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sa_counters *counters,
+ u16 sa_index)
+{
+ memset(counters, 0, sizeof(*counters));
+
+ return AQ_API_CALL_SAFE(get_ingress_sa_counters, hw, counters,
+ sa_index);
+}
+
+static int
+get_ingress_common_counters(struct aq_hw_s *hw,
+ struct aq_mss_ingress_common_counters *counters)
+{
+ u16 packed_record[4];
+ int ret;
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 0);
+ if (unlikely(ret))
+ return ret;
+ counters->ctl_pkts[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->ctl_pkts[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 1);
+ if (unlikely(ret))
+ return ret;
+ counters->tagged_miss_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->tagged_miss_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 2);
+ if (unlikely(ret))
+ return ret;
+ counters->untagged_miss_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->untagged_miss_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 3);
+ if (unlikely(ret))
+ return ret;
+ counters->notag_pkts[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->notag_pkts[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 4);
+ if (unlikely(ret))
+ return ret;
+ counters->untagged_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->untagged_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 5);
+ if (unlikely(ret))
+ return ret;
+ counters->bad_tag_pkts[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->bad_tag_pkts[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 6);
+ if (unlikely(ret))
+ return ret;
+ counters->no_sci_pkts[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->no_sci_pkts[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 7);
+ if (unlikely(ret))
+ return ret;
+ counters->unknown_sci_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->unknown_sci_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 8);
+ if (unlikely(ret))
+ return ret;
+ counters->ctrl_prt_pass_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->ctrl_prt_pass_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 9);
+ if (unlikely(ret))
+ return ret;
+ counters->unctrl_prt_pass_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->unctrl_prt_pass_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 10);
+ if (unlikely(ret))
+ return ret;
+ counters->ctrl_prt_fail_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->ctrl_prt_fail_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 11);
+ if (unlikely(ret))
+ return ret;
+ counters->unctrl_prt_fail_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->unctrl_prt_fail_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 12);
+ if (unlikely(ret))
+ return ret;
+ counters->too_long_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->too_long_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 13);
+ if (unlikely(ret))
+ return ret;
+ counters->igpoc_ctl_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->igpoc_ctl_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 14);
+ if (unlikely(ret))
+ return ret;
+ counters->ecc_error_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->ecc_error_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 15);
+ if (unlikely(ret))
+ return ret;
+ counters->unctrl_hit_drop_redir[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->unctrl_hit_drop_redir[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ return 0;
+}
+
+int aq_mss_get_ingress_common_counters(struct aq_hw_s *hw,
+ struct aq_mss_ingress_common_counters *counters)
+{
+ memset(counters, 0, sizeof(*counters));
+
+ return AQ_API_CALL_SAFE(get_ingress_common_counters, hw, counters);
+}
+
+static int clear_ingress_counters(struct aq_hw_s *hw)
+{
+ struct mss_ingress_ctl_register ctl_reg;
+ int ret;
+
+ memset(&ctl_reg, 0, sizeof(ctl_reg));
+
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_CTL_REGISTER_ADDR, &ctl_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_CTL_REGISTER_ADDR + 4,
+ &ctl_reg.word_1);
+ if (unlikely(ret))
+ return ret;
+
+ /* Toggle the Ingress MIB clear bit 0->1->0 */
+ ctl_reg.bits_0.clear_count = 0;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_CTL_REGISTER_ADDR + 4,
+ ctl_reg.word_1);
+ if (unlikely(ret))
+ return ret;
+
+ ctl_reg.bits_0.clear_count = 1;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_CTL_REGISTER_ADDR + 4,
+ ctl_reg.word_1);
+ if (unlikely(ret))
+ return ret;
+
+ ctl_reg.bits_0.clear_count = 0;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_CTL_REGISTER_ADDR + 4,
+ ctl_reg.word_1);
+ if (unlikely(ret))
+ return ret;
+
+ return 0;
+}
+
+int aq_mss_clear_ingress_counters(struct aq_hw_s *hw)
+{
+ return AQ_API_CALL_SAFE(clear_ingress_counters, hw);
+}
+
+static int get_egress_sa_expired(struct aq_hw_s *hw, u32 *expired)
+{
+ u16 val;
+ int ret;
+
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_SA_EXPIRED_STATUS_REGISTER_ADDR,
+ &val);
+ if (unlikely(ret))
+ return ret;
+
+ *expired = val;
+
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_SA_EXPIRED_STATUS_REGISTER_ADDR + 1,
+ &val);
+ if (unlikely(ret))
+ return ret;
+
+ *expired |= val << 16;
+
+ return 0;
+}
+
+int aq_mss_get_egress_sa_expired(struct aq_hw_s *hw, u32 *expired)
+{
+ *expired = 0;
+
+ return AQ_API_CALL_SAFE(get_egress_sa_expired, hw, expired);
+}
+
+static int get_egress_sa_threshold_expired(struct aq_hw_s *hw,
+ u32 *expired)
+{
+ u16 val;
+ int ret;
+
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_SA_THRESHOLD_EXPIRED_STATUS_REGISTER_ADDR, &val);
+ if (unlikely(ret))
+ return ret;
+
+ *expired = val;
+
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_SA_THRESHOLD_EXPIRED_STATUS_REGISTER_ADDR + 1, &val);
+ if (unlikely(ret))
+ return ret;
+
+ *expired |= val << 16;
+
+ return 0;
+}
+
+int aq_mss_get_egress_sa_threshold_expired(struct aq_hw_s *hw,
+ u32 *expired)
+{
+ *expired = 0;
+
+ return AQ_API_CALL_SAFE(get_egress_sa_threshold_expired, hw, expired);
+}
+
+static int set_egress_sa_expired(struct aq_hw_s *hw, u32 expired)
+{
+ int ret;
+
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_SA_EXPIRED_STATUS_REGISTER_ADDR,
+ expired & 0xFFFF);
+ if (unlikely(ret))
+ return ret;
+
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_SA_EXPIRED_STATUS_REGISTER_ADDR + 1,
+ expired >> 16);
+ if (unlikely(ret))
+ return ret;
+
+ return 0;
+}
+
+int aq_mss_set_egress_sa_expired(struct aq_hw_s *hw, u32 expired)
+{
+ return AQ_API_CALL_SAFE(set_egress_sa_expired, hw, expired);
+}
+
+static int set_egress_sa_threshold_expired(struct aq_hw_s *hw, u32 expired)
+{
+ int ret;
+
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_SA_THRESHOLD_EXPIRED_STATUS_REGISTER_ADDR,
+ expired & 0xFFFF);
+ if (unlikely(ret))
+ return ret;
+
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_SA_THRESHOLD_EXPIRED_STATUS_REGISTER_ADDR + 1,
+ expired >> 16);
+ if (unlikely(ret))
+ return ret;
+
+ return 0;
+}
+
+int aq_mss_set_egress_sa_threshold_expired(struct aq_hw_s *hw, u32 expired)
+{
+ return AQ_API_CALL_SAFE(set_egress_sa_threshold_expired, hw, expired);
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.h b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.h
new file mode 100644
index 000000000..ff03cc462
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.h
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef __MACSEC_API_H__
+#define __MACSEC_API_H__
+
+#include "aq_hw.h"
+#include "macsec_struct.h"
+
+#define NUMROWS_INGRESSPRECTLFRECORD 24
+#define ROWOFFSET_INGRESSPRECTLFRECORD 0
+
+#define NUMROWS_INGRESSPRECLASSRECORD 48
+#define ROWOFFSET_INGRESSPRECLASSRECORD 0
+
+#define NUMROWS_INGRESSPOSTCLASSRECORD 48
+#define ROWOFFSET_INGRESSPOSTCLASSRECORD 0
+
+#define NUMROWS_INGRESSSCRECORD 32
+#define ROWOFFSET_INGRESSSCRECORD 0
+
+#define NUMROWS_INGRESSSARECORD 32
+#define ROWOFFSET_INGRESSSARECORD 32
+
+#define NUMROWS_INGRESSSAKEYRECORD 32
+#define ROWOFFSET_INGRESSSAKEYRECORD 0
+
+#define NUMROWS_INGRESSPOSTCTLFRECORD 24
+#define ROWOFFSET_INGRESSPOSTCTLFRECORD 0
+
+#define NUMROWS_EGRESSCTLFRECORD 24
+#define ROWOFFSET_EGRESSCTLFRECORD 0
+
+#define NUMROWS_EGRESSCLASSRECORD 48
+#define ROWOFFSET_EGRESSCLASSRECORD 0
+
+#define NUMROWS_EGRESSSCRECORD 32
+#define ROWOFFSET_EGRESSSCRECORD 0
+
+#define NUMROWS_EGRESSSARECORD 32
+#define ROWOFFSET_EGRESSSARECORD 32
+
+#define NUMROWS_EGRESSSAKEYRECORD 32
+#define ROWOFFSET_EGRESSSAKEYRECORD 96
+
+/*! Read the raw table data from the specified row of the Egress CTL
+ * Filter table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 23).
+ */
+int aq_mss_get_egress_ctlf_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_ctlf_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Egress CTL Filter table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write(max 23).
+ */
+int aq_mss_set_egress_ctlf_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_ctlf_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Egress
+ * Packet Classifier table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 47).
+ */
+int aq_mss_get_egress_class_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_class_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Egress Packet Classifier table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write (max 47).
+ */
+int aq_mss_set_egress_class_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_class_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Egress SC
+ * Lookup table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 31).
+ */
+int aq_mss_get_egress_sc_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_sc_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Egress SC Lookup table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write (max 31).
+ */
+int aq_mss_set_egress_sc_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_sc_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Egress SA
+ * Lookup table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 31).
+ */
+int aq_mss_get_egress_sa_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_sa_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Egress SA Lookup table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write (max 31).
+ */
+int aq_mss_set_egress_sa_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_sa_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Egress SA
+ * Key Lookup table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 31).
+ */
+int aq_mss_get_egress_sakey_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_sakey_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Egress SA Key Lookup table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write (max 31).
+ */
+int aq_mss_set_egress_sakey_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_sakey_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Ingress
+ * Pre-MACSec CTL Filter table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 23).
+ */
+int aq_mss_get_ingress_prectlf_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_prectlf_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Ingress Pre-MACSec CTL Filter table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write(max 23).
+ */
+int aq_mss_set_ingress_prectlf_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_prectlf_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Ingress
+ * Pre-MACSec Packet Classifier table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 47).
+ */
+int aq_mss_get_ingress_preclass_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_preclass_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Ingress Pre-MACSec Packet Classifier table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write(max 47).
+ */
+int aq_mss_set_ingress_preclass_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_preclass_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Ingress SC
+ * Lookup table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 31).
+ */
+int aq_mss_get_ingress_sc_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sc_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Ingress SC Lookup table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write(max 31).
+ */
+int aq_mss_set_ingress_sc_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_sc_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Ingress SA
+ * Lookup table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 31).
+ */
+int aq_mss_get_ingress_sa_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sa_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Ingress SA Lookup table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write(max 31).
+ */
+int aq_mss_set_ingress_sa_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_sa_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Ingress SA
+ * Key Lookup table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 31).
+ */
+int aq_mss_get_ingress_sakey_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sakey_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Ingress SA Key Lookup table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write(max 31).
+ */
+int aq_mss_set_ingress_sakey_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_sakey_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Ingress
+ * Post-MACSec Packet Classifier table, and unpack it into the
+ * fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 48).
+ */
+int aq_mss_get_ingress_postclass_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_postclass_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Ingress Post-MACSec Packet Classifier table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write(max 48).
+ */
+int aq_mss_set_ingress_postclass_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_postclass_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Ingress
+ * Post-MACSec CTL Filter table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 23).
+ */
+int aq_mss_get_ingress_postctlf_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_postctlf_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Ingress Post-MACSec CTL Filter table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write(max 23).
+ */
+int aq_mss_set_ingress_postctlf_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_postctlf_record *rec,
+ u16 table_index);
+
+/*! Read the counters for the specified SC, and unpack them into the
+ * fields of counters.
+ * counters - [OUT] The raw table row data will be unpacked here.
+ * sc_index - The table row to read (max 31).
+ */
+int aq_mss_get_egress_sc_counters(struct aq_hw_s *hw,
+ struct aq_mss_egress_sc_counters *counters,
+ u16 sc_index);
+
+/*! Read the counters for the specified SA, and unpack them into the
+ * fields of counters.
+ * counters - [OUT] The raw table row data will be unpacked here.
+ * sa_index - The table row to read (max 31).
+ */
+int aq_mss_get_egress_sa_counters(struct aq_hw_s *hw,
+ struct aq_mss_egress_sa_counters *counters,
+ u16 sa_index);
+
+/*! Read the counters for the common egress counters, and unpack them
+ * into the fields of counters.
+ * counters - [OUT] The raw table row data will be unpacked here.
+ */
+int aq_mss_get_egress_common_counters(struct aq_hw_s *hw,
+ struct aq_mss_egress_common_counters *counters);
+
+/*! Clear all Egress counters to 0.*/
+int aq_mss_clear_egress_counters(struct aq_hw_s *hw);
+
+/*! Read the counters for the specified SA, and unpack them into the
+ * fields of counters.
+ * counters - [OUT] The raw table row data will be unpacked here.
+ * sa_index - The table row to read (max 31).
+ */
+int aq_mss_get_ingress_sa_counters(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sa_counters *counters,
+ u16 sa_index);
+
+/*! Read the counters for the common ingress counters, and unpack them
+ * into the fields of counters.
+ * counters - [OUT] The raw table row data will be unpacked here.
+ */
+int aq_mss_get_ingress_common_counters(struct aq_hw_s *hw,
+ struct aq_mss_ingress_common_counters *counters);
+
+/*! Clear all Ingress counters to 0. */
+int aq_mss_clear_ingress_counters(struct aq_hw_s *hw);
+
+/*! Get Egress SA expired. */
+int aq_mss_get_egress_sa_expired(struct aq_hw_s *hw, u32 *expired);
+/*! Get Egress SA threshold expired. */
+int aq_mss_get_egress_sa_threshold_expired(struct aq_hw_s *hw,
+ u32 *expired);
+/*! Set Egress SA expired. */
+int aq_mss_set_egress_sa_expired(struct aq_hw_s *hw, u32 expired);
+/*! Set Egress SA threshold expired. */
+int aq_mss_set_egress_sa_threshold_expired(struct aq_hw_s *hw,
+ u32 expired);
+
+#endif /* __MACSEC_API_H__ */
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h
new file mode 100644
index 000000000..c2fda80fe
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h
@@ -0,0 +1,914 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef _MACSEC_STRUCT_H_
+#define _MACSEC_STRUCT_H_
+
+/*! Represents the bitfields of a single row in the Egress CTL Filter
+ * table.
+ */
+struct aq_mss_egress_ctlf_record {
+ /*! This is used to store the 48 bit value used to compare SA, DA or
+ * halfDA+half SA value.
+ */
+ u32 sa_da[2];
+ /*! This is used to store the 16 bit ethertype value used for
+ * comparison.
+ */
+ u32 eth_type;
+ /*! The match mask is per-nibble. 0 means don't care, i.e. every value
+ * will match successfully. The total data is 64 bit, i.e. 16 nibbles
+ * masks.
+ */
+ u32 match_mask;
+ /*! 0: No compare, i.e. This entry is not used
+ * 1: compare DA only
+ * 2: compare SA only
+ * 3: compare half DA + half SA
+ * 4: compare ether type only
+ * 5: compare DA + ethertype
+ * 6: compare SA + ethertype
+ * 7: compare DA+ range.
+ */
+ u32 match_type;
+ /*! 0: Bypass the remaining modules if matched.
+ * 1: Forward to next module for more classifications.
+ */
+ u32 action;
+};
+
+/*! Represents the bitfields of a single row in the Egress Packet
+ * Classifier table.
+ */
+struct aq_mss_egress_class_record {
+ /*! VLAN ID field. */
+ u32 vlan_id;
+ /*! VLAN UP field. */
+ u32 vlan_up;
+ /*! VLAN Present in the Packet. */
+ u32 vlan_valid;
+ /*! The 8 bit value used to compare with extracted value for byte 3. */
+ u32 byte3;
+ /*! The 8 bit value used to compare with extracted value for byte 2. */
+ u32 byte2;
+ /*! The 8 bit value used to compare with extracted value for byte 1. */
+ u32 byte1;
+ /*! The 8 bit value used to compare with extracted value for byte 0. */
+ u32 byte0;
+ /*! The 8 bit TCI field used to compare with extracted value. */
+ u32 tci;
+ /*! The 64 bit SCI field in the SecTAG. */
+ u32 sci[2];
+ /*! The 16 bit Ethertype (in the clear) field used to compare with
+ * extracted value.
+ */
+ u32 eth_type;
+ /*! This is to specify the 40bit SNAP header if the SNAP header's mask
+ * is enabled.
+ */
+ u32 snap[2];
+ /*! This is to specify the 24bit LLC header if the LLC header's mask is
+ * enabled.
+ */
+ u32 llc;
+ /*! The 48 bit MAC_SA field used to compare with extracted value. */
+ u32 mac_sa[2];
+ /*! The 48 bit MAC_DA field used to compare with extracted value. */
+ u32 mac_da[2];
+ /*! The 32 bit Packet number used to compare with extracted value. */
+ u32 pn;
+ /*! 0~63: byte location used extracted by packets comparator, which
+ * can be anything from the first 64 bytes of the MAC packets.
+ * This byte location counted from MAC' DA address. i.e. set to 0
+ * will point to byte 0 of DA address.
+ */
+ u32 byte3_location;
+ /*! 0: don't care
+ * 1: enable comparison of extracted byte pointed by byte 3 location.
+ */
+ u32 byte3_mask;
+ /*! 0~63: byte location used extracted by packets comparator, which
+ * can be anything from the first 64 bytes of the MAC packets.
+ * This byte location counted from MAC' DA address. i.e. set to 0
+ * will point to byte 0 of DA address.
+ */
+ u32 byte2_location;
+ /*! 0: don't care
+ * 1: enable comparison of extracted byte pointed by byte 2 location.
+ */
+ u32 byte2_mask;
+ /*! 0~63: byte location used extracted by packets comparator, which
+ * can be anything from the first 64 bytes of the MAC packets.
+ * This byte location counted from MAC' DA address. i.e. set to 0
+ * will point to byte 0 of DA address.
+ */
+ u32 byte1_location;
+ /*! 0: don't care
+ * 1: enable comparison of extracted byte pointed by byte 1 location.
+ */
+ u32 byte1_mask;
+ /*! 0~63: byte location used extracted by packets comparator, which
+ * can be anything from the first 64 bytes of the MAC packets.
+ * This byte location counted from MAC' DA address. i.e. set to 0
+ * will point to byte 0 of DA address.
+ */
+ u32 byte0_location;
+ /*! 0: don't care
+ * 1: enable comparison of extracted byte pointed by byte 0 location.
+ */
+ u32 byte0_mask;
+ /*! Mask is per-byte.
+ * 0: don't care
+ * 1: enable comparison of extracted VLAN ID field.
+ */
+ u32 vlan_id_mask;
+ /*! 0: don't care
+ * 1: enable comparison of extracted VLAN UP field.
+ */
+ u32 vlan_up_mask;
+ /*! 0: don't care
+ * 1: enable comparison of extracted VLAN Valid field.
+ */
+ u32 vlan_valid_mask;
+ /*! This is bit mask to enable comparison the 8 bit TCI field,
+ * including the AN field.
+ * For explicit SECTAG, AN is hardware controlled. For sending
+ * packet w/ explicit SECTAG, rest of the TCI fields are directly
+ * from the SECTAG.
+ */
+ u32 tci_mask;
+ /*! Mask is per-byte.
+ * 0: don't care
+ * 1: enable comparison of SCI
+ * Note: If this field is not 0, this means the input packet's
+ * SECTAG is explicitly tagged and MACSEC module will only update
+ * the MSDU.
+ * PN number is hardware controlled.
+ */
+ u32 sci_mask;
+ /*! Mask is per-byte.
+ * 0: don't care
+ * 1: enable comparison of Ethertype.
+ */
+ u32 eth_type_mask;
+ /*! Mask is per-byte.
+ * 0: don't care and no SNAP header exist.
+ * 1: compare the SNAP header.
+ * If this bit is set to 1, the extracted filed will assume the
+ * SNAP header exist as encapsulated in 802.3 (RFC 1042). I.E. the
+ * next 5 bytes after the LLC header is SNAP header.
+ */
+ u32 snap_mask;
+ /*! 0: don't care and no LLC header exist.
+ * 1: compare the LLC header.
+ * If this bit is set to 1, the extracted filed will assume the
+ * LLC header exist as encapsulated in 802.3 (RFC 1042). I.E. the
+ * next three bytes after the 802.3MAC header is LLC header.
+ */
+ u32 llc_mask;
+ /*! Mask is per-byte.
+ * 0: don't care
+ * 1: enable comparison of MAC_SA.
+ */
+ u32 sa_mask;
+ /*! Mask is per-byte.
+ * 0: don't care
+ * 1: enable comparison of MAC_DA.
+ */
+ u32 da_mask;
+ /*! Mask is per-byte. */
+ u32 pn_mask;
+ /*! Reserved. This bit should be always 0. */
+ u32 eight02dot2;
+ /*! 1: For explicit sectag case use TCI_SC from table
+ * 0: use TCI_SC from explicit sectag.
+ */
+ u32 tci_sc;
+ /*! 1: For explicit sectag case,use TCI_V,ES,SCB,E,C from table
+ * 0: use TCI_V,ES,SCB,E,C from explicit sectag.
+ */
+ u32 tci_87543;
+ /*! 1: indicates that incoming packet has explicit sectag. */
+ u32 exp_sectag_en;
+ /*! If packet matches and tagged as controlled-packet, this SC/SA
+ * index is used for later SC and SA table lookup.
+ */
+ u32 sc_idx;
+ /*! This field is used to specify how many SA entries are
+ * associated with 1 SC entry.
+ * 2'b00: 1 SC has 4 SA.
+ * SC index is equivalent to {SC_Index[4:2], 1'b0}.
+ * SA index is equivalent to {SC_Index[4:2], SC entry's current AN[1:0]
+ * 2'b10: 1 SC has 2 SA.
+ * SC index is equivalent to SC_Index[4:1]
+ * SA index is equivalent to {SC_Index[4:1], SC entry's current AN[0]}
+ * 2'b11: 1 SC has 1 SA. No SC entry exists for the specific SA.
+ * SA index is equivalent to SC_Index[4:0]
+ * Note: if specified as 2'b11, hardware AN roll over is not
+ * supported.
+ */
+ u32 sc_sa;
+ /*! 0: the packets will be sent to MAC FIFO
+ * 1: The packets will be sent to Debug/Loopback FIFO.
+ * If the above's action is drop, this bit has no meaning.
+ */
+ u32 debug;
+ /*! 0: forward to remaining modules
+ * 1: bypass the next encryption modules. This packet is considered
+ * un-control packet.
+ * 2: drop
+ * 3: Reserved.
+ */
+ u32 action;
+ /*! 0: Not valid entry. This entry is not used
+ * 1: valid entry.
+ */
+ u32 valid;
+};
+
+/*! Represents the bitfields of a single row in the Egress SC Lookup table. */
+struct aq_mss_egress_sc_record {
+ /*! This is to specify when the SC was first used. Set by HW. */
+ u32 start_time;
+ /*! This is to specify when the SC was last used. Set by HW. */
+ u32 stop_time;
+ /*! This is to specify which of the SA entries are used by current HW.
+ * Note: This value need to be set by SW after reset. It will be
+ * automatically updated by HW, if AN roll over is enabled.
+ */
+ u32 curr_an;
+ /*! 0: Clear the SA Valid Bit after PN expiry.
+ * 1: Do not Clear the SA Valid bit after PN expiry of the current SA.
+ * When the Enable AN roll over is set, S/W does not need to
+ * program the new SA's and the H/W will automatically roll over
+ * between the SA's without session expiry.
+ * For normal operation, Enable AN Roll over will be set to '0'
+ * and in which case, the SW needs to program the new SA values
+ * after the current PN expires.
+ */
+ u32 an_roll;
+ /*! This is the TCI field used if packet is not explicitly tagged. */
+ u32 tci;
+ /*! This value indicates the offset where the decryption will start.
+ * [[Values of 0, 4, 8-50].
+ */
+ u32 enc_off;
+ /*! 0: Do not protect frames, all the packets will be forwarded
+ * unchanged. MIB counter (OutPktsUntagged) will be updated.
+ * 1: Protect.
+ */
+ u32 protect;
+ /*! 0: when none of the SA related to SC has inUse set.
+ * 1: when either of the SA related to the SC has inUse set.
+ * This bit is set by HW.
+ */
+ u32 recv;
+ /*! 0: H/W Clears this bit on the first use.
+ * 1: SW updates this entry, when programming the SC Table.
+ */
+ u32 fresh;
+ /*! AES Key size
+ * 00 - 128bits
+ * 01 - 192bits
+ * 10 - 256bits
+ * 11 - Reserved.
+ */
+ u32 sak_len;
+ /*! 0: Invalid SC
+ * 1: Valid SC.
+ */
+ u32 valid;
+};
+
+/*! Represents the bitfields of a single row in the Egress SA Lookup table. */
+struct aq_mss_egress_sa_record {
+ /*! This is to specify when the SC was first used. Set by HW. */
+ u32 start_time;
+ /*! This is to specify when the SC was last used. Set by HW. */
+ u32 stop_time;
+ /*! This is set by SW and updated by HW to store the Next PN number
+ * used for encryption.
+ */
+ u32 next_pn;
+ /*! The Next_PN number is going to wrapped around from 0xFFFF_FFFF
+ * to 0. set by HW.
+ */
+ u32 sat_pn;
+ /*! 0: This SA is in use.
+ * 1: This SA is Fresh and set by SW.
+ */
+ u32 fresh;
+ /*! 0: Invalid SA
+ * 1: Valid SA.
+ */
+ u32 valid;
+};
+
+/*! Represents the bitfields of a single row in the Egress SA Key
+ * Lookup table.
+ */
+struct aq_mss_egress_sakey_record {
+ /*! Key for AES-GCM processing. */
+ u32 key[8];
+};
+
+/*! Represents the bitfields of a single row in the Ingress Pre-MACSec
+ * CTL Filter table.
+ */
+struct aq_mss_ingress_prectlf_record {
+ /*! This is used to store the 48 bit value used to compare SA, DA
+ * or halfDA+half SA value.
+ */
+ u32 sa_da[2];
+ /*! This is used to store the 16 bit ethertype value used for
+ * comparison.
+ */
+ u32 eth_type;
+ /*! The match mask is per-nibble. 0 means don't care, i.e. every
+ * value will match successfully. The total data is 64 bit, i.e.
+ * 16 nibbles masks.
+ */
+ u32 match_mask;
+ /*! 0: No compare, i.e. This entry is not used
+ * 1: compare DA only
+ * 2: compare SA only
+ * 3: compare half DA + half SA
+ * 4: compare ether type only
+ * 5: compare DA + ethertype
+ * 6: compare SA + ethertype
+ * 7: compare DA+ range.
+ */
+ u32 match_type;
+ /*! 0: Bypass the remaining modules if matched.
+ * 1: Forward to next module for more classifications.
+ */
+ u32 action;
+};
+
+/*! Represents the bitfields of a single row in the Ingress Pre-MACSec
+ * Packet Classifier table.
+ */
+struct aq_mss_ingress_preclass_record {
+ /*! The 64 bit SCI field used to compare with extracted value.
+ * Should have SCI value in case TCI[SCI_SEND] == 0. This will be
+ * used for ICV calculation.
+ */
+ u32 sci[2];
+ /*! The 8 bit TCI field used to compare with extracted value. */
+ u32 tci;
+ /*! 8 bit encryption offset. */
+ u32 encr_offset;
+ /*! The 16 bit Ethertype (in the clear) field used to compare with
+ * extracted value.
+ */
+ u32 eth_type;
+ /*! This is to specify the 40bit SNAP header if the SNAP header's
+ * mask is enabled.
+ */
+ u32 snap[2];
+ /*! This is to specify the 24bit LLC header if the LLC header's
+ * mask is enabled.
+ */
+ u32 llc;
+ /*! The 48 bit MAC_SA field used to compare with extracted value. */
+ u32 mac_sa[2];
+ /*! The 48 bit MAC_DA field used to compare with extracted value. */
+ u32 mac_da[2];
+ /*! 0: this is to compare with non-LPBK packet
+ * 1: this is to compare with LPBK packet.
+ * This value is used to compare with a controlled-tag which goes
+ * with the packet when looped back from Egress port.
+ */
+ u32 lpbk_packet;
+ /*! The value of this bit mask will affects how the SC index and SA
+ * index created.
+ * 2'b00: 1 SC has 4 SA.
+ * SC index is equivalent to {SC_Index[4:2], 1'b0}.
+ * SA index is equivalent to {SC_Index[4:2], SECTAG's AN[1:0]}
+ * Here AN bits are not compared.
+ * 2'b10: 1 SC has 2 SA.
+ * SC index is equivalent to SC_Index[4:1]
+ * SA index is equivalent to {SC_Index[4:1], SECTAG's AN[0]}
+ * Compare AN[1] field only
+ * 2'b11: 1 SC has 1 SA. No SC entry exists for the specific SA.
+ * SA index is equivalent to SC_Index[4:0]
+ * AN[1:0] bits are compared.
+ * NOTE: This design is to supports different usage of AN. User
+ * can either ping-pong buffer 2 SA by using only the AN[0] bit.
+ * Or use 4 SA per SC by use AN[1:0] bits. Or even treat each SA
+ * as independent. i.e. AN[1:0] is just another matching pointer
+ * to select SA.
+ */
+ u32 an_mask;
+ /*! This is bit mask to enable comparison the upper 6 bits TCI
+ * field, which does not include the AN field.
+ * 0: don't compare
+ * 1: enable comparison of the bits.
+ */
+ u32 tci_mask;
+ /*! 0: don't care
+ * 1: enable comparison of SCI.
+ */
+ u32 sci_mask;
+ /*! Mask is per-byte.
+ * 0: don't care
+ * 1: enable comparison of Ethertype.
+ */
+ u32 eth_type_mask;
+ /*! Mask is per-byte.
+ * 0: don't care and no SNAP header exist.
+ * 1: compare the SNAP header.
+ * If this bit is set to 1, the extracted filed will assume the
+ * SNAP header exist as encapsulated in 802.3 (RFC 1042). I.E. the
+ * next 5 bytes after the LLC header is SNAP header.
+ */
+ u32 snap_mask;
+ /*! Mask is per-byte.
+ * 0: don't care and no LLC header exist.
+ * 1: compare the LLC header.
+ * If this bit is set to 1, the extracted filed will assume the
+ * LLC header exist as encapsulated in 802.3 (RFC 1042). I.E. the
+ * next three bytes after the 802.3MAC header is LLC header.
+ */
+ u32 llc_mask;
+ /*! Reserved. This bit should be always 0. */
+ u32 _802_2_encapsulate;
+ /*! Mask is per-byte.
+ * 0: don't care
+ * 1: enable comparison of MAC_SA.
+ */
+ u32 sa_mask;
+ /*! Mask is per-byte.
+ * 0: don't care
+ * 1: enable comparison of MAC_DA.
+ */
+ u32 da_mask;
+ /*! 0: don't care
+ * 1: enable checking if this is loopback packet or not.
+ */
+ u32 lpbk_mask;
+ /*! If packet matches and tagged as controlled-packet. This SC/SA
+ * index is used for later SC and SA table lookup.
+ */
+ u32 sc_idx;
+ /*! 0: the packets will be sent to MAC FIFO
+ * 1: The packets will be sent to Debug/Loopback FIFO.
+ * If the above's action is drop. This bit has no meaning.
+ */
+ u32 proc_dest;
+ /*! 0: Process: Forward to next two modules for 802.1AE decryption.
+ * 1: Process but keep SECTAG: Forward to next two modules for
+ * 802.1AE decryption but keep the MACSEC header with added error
+ * code information. ICV will be stripped for all control packets.
+ * 2: Bypass: Bypass the next two decryption modules but processed
+ * by post-classification.
+ * 3: Drop: drop this packet and update counts accordingly.
+ */
+ u32 action;
+ /*! 0: This is a controlled-port packet if matched.
+ * 1: This is an uncontrolled-port packet if matched.
+ */
+ u32 ctrl_unctrl;
+ /*! Use the SCI value from the Table if 'SC' bit of the input
+ * packet is not present.
+ */
+ u32 sci_from_table;
+ /*! Reserved. */
+ u32 reserved;
+ /*! 0: Not valid entry. This entry is not used
+ * 1: valid entry.
+ */
+ u32 valid;
+};
+
+/*! Represents the bitfields of a single row in the Ingress SC Lookup table. */
+struct aq_mss_ingress_sc_record {
+ /*! This is to specify when the SC was first used. Set by HW. */
+ u32 stop_time;
+ /*! This is to specify when the SC was first used. Set by HW. */
+ u32 start_time;
+ /*! 0: Strict
+ * 1: Check
+ * 2: Disabled.
+ */
+ u32 validate_frames;
+ /*! 1: Replay control enabled.
+ * 0: replay control disabled.
+ */
+ u32 replay_protect;
+ /*! This is to specify the window range for anti-replay. Default is 0.
+ * 0: is strict order enforcement.
+ */
+ u32 anti_replay_window;
+ /*! 0: when none of the SA related to SC has inUse set.
+ * 1: when either of the SA related to the SC has inUse set.
+ * This bit is set by HW.
+ */
+ u32 receiving;
+ /*! 0: when hardware processed the SC for the first time, it clears
+ * this bit
+ * 1: This bit is set by SW, when it sets up the SC.
+ */
+ u32 fresh;
+ /*! 0: The AN number will not automatically roll over if Next_PN is
+ * saturated.
+ * 1: The AN number will automatically roll over if Next_PN is
+ * saturated.
+ * Rollover is valid only after expiry. Normal roll over between
+ * SA's should be normal process.
+ */
+ u32 an_rol;
+ /*! Reserved. */
+ u32 reserved;
+ /*! 0: Invalid SC
+ * 1: Valid SC.
+ */
+ u32 valid;
+};
+
+/*! Represents the bitfields of a single row in the Ingress SA Lookup table. */
+struct aq_mss_ingress_sa_record {
+ /*! This is to specify when the SC was first used. Set by HW. */
+ u32 stop_time;
+ /*! This is to specify when the SC was first used. Set by HW. */
+ u32 start_time;
+ /*! This is updated by HW to store the expected NextPN number for
+ * anti-replay.
+ */
+ u32 next_pn;
+ /*! The Next_PN number is going to wrapped around from 0XFFFF_FFFF
+ * to 0. set by HW.
+ */
+ u32 sat_nextpn;
+ /*! 0: This SA is not yet used.
+ * 1: This SA is inUse.
+ */
+ u32 in_use;
+ /*! 0: when hardware processed the SC for the first time, it clears
+ * this timer
+ * 1: This bit is set by SW, when it sets up the SC.
+ */
+ u32 fresh;
+ /*! Reserved. */
+ u32 reserved;
+ /*! 0: Invalid SA.
+ * 1: Valid SA.
+ */
+ u32 valid;
+};
+
+/*! Represents the bitfields of a single row in the Ingress SA Key
+ * Lookup table.
+ */
+struct aq_mss_ingress_sakey_record {
+ /*! Key for AES-GCM processing. */
+ u32 key[8];
+ /*! AES key size
+ * 00 - 128bits
+ * 01 - 192bits
+ * 10 - 256bits
+ * 11 - reserved.
+ */
+ u32 key_len;
+};
+
+/*! Represents the bitfields of a single row in the Ingress Post-
+ * MACSec Packet Classifier table.
+ */
+struct aq_mss_ingress_postclass_record {
+ /*! The 8 bit value used to compare with extracted value for byte 0. */
+ u32 byte0;
+ /*! The 8 bit value used to compare with extracted value for byte 1. */
+ u32 byte1;
+ /*! The 8 bit value used to compare with extracted value for byte 2. */
+ u32 byte2;
+ /*! The 8 bit value used to compare with extracted value for byte 3. */
+ u32 byte3;
+ /*! Ethertype in the packet. */
+ u32 eth_type;
+ /*! Ether Type value > 1500 (0x5dc). */
+ u32 eth_type_valid;
+ /*! VLAN ID after parsing. */
+ u32 vlan_id;
+ /*! VLAN priority after parsing. */
+ u32 vlan_up;
+ /*! Valid VLAN coding. */
+ u32 vlan_valid;
+ /*! SA index. */
+ u32 sai;
+ /*! SAI hit, i.e. controlled packet. */
+ u32 sai_hit;
+ /*! Mask for payload ethertype field. */
+ u32 eth_type_mask;
+ /*! 0~63: byte location used extracted by packets comparator, which
+ * can be anything from the first 64 bytes of the MAC packets.
+ * This byte location counted from MAC' DA address. i.e. set to 0
+ * will point to byte 0 of DA address.
+ */
+ u32 byte3_location;
+ /*! Mask for Byte Offset 3. */
+ u32 byte3_mask;
+ /*! 0~63: byte location used extracted by packets comparator, which
+ * can be anything from the first 64 bytes of the MAC packets.
+ * This byte location counted from MAC' DA address. i.e. set to 0
+ * will point to byte 0 of DA address.
+ */
+ u32 byte2_location;
+ /*! Mask for Byte Offset 2. */
+ u32 byte2_mask;
+ /*! 0~63: byte location used extracted by packets comparator, which
+ * can be anything from the first 64 bytes of the MAC packets.
+ * This byte location counted from MAC' DA address. i.e. set to 0
+ * will point to byte 0 of DA address.
+ */
+ u32 byte1_location;
+ /*! Mask for Byte Offset 1. */
+ u32 byte1_mask;
+ /*! 0~63: byte location used extracted by packets comparator, which
+ * can be anything from the first 64 bytes of the MAC packets.
+ * This byte location counted from MAC' DA address. i.e. set to 0
+ * will point to byte 0 of DA address.
+ */
+ u32 byte0_location;
+ /*! Mask for Byte Offset 0. */
+ u32 byte0_mask;
+ /*! Mask for Ethertype valid field. Indicates 802.3 vs. Other. */
+ u32 eth_type_valid_mask;
+ /*! Mask for VLAN ID field. */
+ u32 vlan_id_mask;
+ /*! Mask for VLAN UP field. */
+ u32 vlan_up_mask;
+ /*! Mask for VLAN valid field. */
+ u32 vlan_valid_mask;
+ /*! Mask for SAI. */
+ u32 sai_mask;
+ /*! Mask for SAI_HIT. */
+ u32 sai_hit_mask;
+ /*! Action if only first level matches and second level does not.
+ * 0: pass
+ * 1: drop (fail).
+ */
+ u32 firstlevel_actions;
+ /*! Action if both first and second level matched.
+ * 0: pass
+ * 1: drop (fail).
+ */
+ u32 secondlevel_actions;
+ /*! Reserved. */
+ u32 reserved;
+ /*! 0: Not valid entry. This entry is not used
+ * 1: valid entry.
+ */
+ u32 valid;
+};
+
+/*! Represents the bitfields of a single row in the Ingress Post-
+ * MACSec CTL Filter table.
+ */
+struct aq_mss_ingress_postctlf_record {
+ /*! This is used to store the 48 bit value used to compare SA, DA
+ * or halfDA+half SA value.
+ */
+ u32 sa_da[2];
+ /*! This is used to store the 16 bit ethertype value used for
+ * comparison.
+ */
+ u32 eth_type;
+ /*! The match mask is per-nibble. 0 means don't care, i.e. every
+ * value will match successfully. The total data is 64 bit, i.e.
+ * 16 nibbles masks.
+ */
+ u32 match_mask;
+ /*! 0: No compare, i.e. This entry is not used
+ * 1: compare DA only
+ * 2: compare SA only
+ * 3: compare half DA + half SA
+ * 4: compare ether type only
+ * 5: compare DA + ethertype
+ * 6: compare SA + ethertype
+ * 7: compare DA+ range.
+ */
+ u32 match_type;
+ /*! 0: Bypass the remaining modules if matched.
+ * 1: Forward to next module for more classifications.
+ */
+ u32 action;
+};
+
+/*! Represents the Egress MIB counters for a single SC. Counters are
+ * 64 bits, lower 32 bits in field[0].
+ */
+struct aq_mss_egress_sc_counters {
+ /*! The number of integrity protected but not encrypted packets
+ * for this transmitting SC.
+ */
+ u32 sc_protected_pkts[2];
+ /*! The number of integrity protected and encrypted packets for
+ * this transmitting SC.
+ */
+ u32 sc_encrypted_pkts[2];
+ /*! The number of plain text octets that are integrity protected
+ * but not encrypted on the transmitting SC.
+ */
+ u32 sc_protected_octets[2];
+ /*! The number of plain text octets that are integrity protected
+ * and encrypted on the transmitting SC.
+ */
+ u32 sc_encrypted_octets[2];
+};
+
+/*! Represents the Egress MIB counters for a single SA. Counters are
+ * 64 bits, lower 32 bits in field[0].
+ */
+struct aq_mss_egress_sa_counters {
+ /*! The number of dropped packets for this transmitting SA. */
+ u32 sa_hit_drop_redirect[2];
+ /*! TODO */
+ u32 sa_protected2_pkts[2];
+ /*! The number of integrity protected but not encrypted packets
+ * for this transmitting SA.
+ */
+ u32 sa_protected_pkts[2];
+ /*! The number of integrity protected and encrypted packets for
+ * this transmitting SA.
+ */
+ u32 sa_encrypted_pkts[2];
+};
+
+/*! Represents the common Egress MIB counters; the counter not
+ * associated with a particular SC/SA. Counters are 64 bits, lower 32
+ * bits in field[0].
+ */
+struct aq_mss_egress_common_counters {
+ /*! The number of transmitted packets classified as MAC_CTL packets. */
+ u32 ctl_pkt[2];
+ /*! The number of transmitted packets that did not match any rows
+ * in the Egress Packet Classifier table.
+ */
+ u32 unknown_sa_pkts[2];
+ /*! The number of transmitted packets where the SC table entry has
+ * protect=0 (so packets are forwarded unchanged).
+ */
+ u32 untagged_pkts[2];
+ /*! The number of transmitted packets discarded because the packet
+ * length is greater than the ifMtu of the Common Port interface.
+ */
+ u32 too_long[2];
+ /*! The number of transmitted packets for which table memory was
+ * affected by an ECC error during processing.
+ */
+ u32 ecc_error_pkts[2];
+ /*! The number of transmitted packets for where the matched row in
+ * the Egress Packet Classifier table has action=drop.
+ */
+ u32 unctrl_hit_drop_redir[2];
+};
+
+/*! Represents the Ingress MIB counters for a single SA. Counters are
+ * 64 bits, lower 32 bits in field[0].
+ */
+struct aq_mss_ingress_sa_counters {
+ /*! For this SA, the number of received packets without a SecTAG. */
+ u32 untagged_hit_pkts[2];
+ /*! For this SA, the number of received packets that were dropped. */
+ u32 ctrl_hit_drop_redir_pkts[2];
+ /*! For this SA which is not currently in use, the number of
+ * received packets that have been discarded, and have either the
+ * packets encrypted or the matched row in the Ingress SC Lookup
+ * table has validate_frames=Strict.
+ */
+ u32 not_using_sa[2];
+ /*! For this SA which is not currently in use, the number of
+ * received, unencrypted, packets with the matched row in the
+ * Ingress SC Lookup table has validate_frames!=Strict.
+ */
+ u32 unused_sa[2];
+ /*! For this SA, the number discarded packets with the condition
+ * that the packets are not valid and one of the following
+ * conditions are true: either the matched row in the Ingress SC
+ * Lookup table has validate_frames=Strict or the packets
+ * encrypted.
+ */
+ u32 not_valid_pkts[2];
+ /*! For this SA, the number of packets with the condition that the
+ * packets are not valid and the matched row in the Ingress SC
+ * Lookup table has validate_frames=Check.
+ */
+ u32 invalid_pkts[2];
+ /*! For this SA, the number of validated packets. */
+ u32 ok_pkts[2];
+ /*! For this SC, the number of received packets that have been
+ * discarded with the condition: the matched row in the Ingress
+ * SC Lookup table has replay_protect=1 and the PN of the packet
+ * is lower than the lower bound replay check PN.
+ */
+ u32 late_pkts[2];
+ /*! For this SA, the number of packets with the condition that the
+ * PN of the packets is lower than the lower bound replay
+ * protection PN.
+ */
+ u32 delayed_pkts[2];
+ /*! For this SC, the number of packets with the following condition:
+ * - the matched row in the Ingress SC Lookup table has
+ * replay_protect=0 or
+ * - the matched row in the Ingress SC Lookup table has
+ * replay_protect=1 and the packet is not encrypted and the
+ * integrity check has failed or
+ * - the matched row in the Ingress SC Lookup table has
+ * replay_protect=1 and the packet is encrypted and integrity
+ * check has failed.
+ */
+ u32 unchecked_pkts[2];
+ /*! The number of octets of plaintext recovered from received
+ * packets that were integrity protected but not encrypted.
+ */
+ u32 validated_octets[2];
+ /*! The number of octets of plaintext recovered from received
+ * packets that were integrity protected and encrypted.
+ */
+ u32 decrypted_octets[2];
+};
+
+/*! Represents the common Ingress MIB counters; the counter not
+ * associated with a particular SA. Counters are 64 bits, lower 32
+ * bits in field[0].
+ */
+struct aq_mss_ingress_common_counters {
+ /*! The number of received packets classified as MAC_CTL packets. */
+ u32 ctl_pkts[2];
+ /*! The number of received packets with the MAC security tag
+ * (SecTAG), not matching any rows in the Ingress Pre-MACSec
+ * Packet Classifier table.
+ */
+ u32 tagged_miss_pkts[2];
+ /*! The number of received packets without the MAC security tag
+ * (SecTAG), not matching any rows in the Ingress Pre-MACSec
+ * Packet Classifier table.
+ */
+ u32 untagged_miss_pkts[2];
+ /*! The number of received packets discarded without the MAC
+ * security tag (SecTAG) and with the matched row in the Ingress
+ * SC Lookup table having validate_frames=Strict.
+ */
+ u32 notag_pkts[2];
+ /*! The number of received packets without the MAC security tag
+ * (SecTAG) and with the matched row in the Ingress SC Lookup
+ * table having validate_frames!=Strict.
+ */
+ u32 untagged_pkts[2];
+ /*! The number of received packets discarded with an invalid
+ * SecTAG or a zero value PN or an invalid ICV.
+ */
+ u32 bad_tag_pkts[2];
+ /*! The number of received packets discarded with unknown SCI
+ * information with the condition:
+ * the matched row in the Ingress SC Lookup table has
+ * validate_frames=Strict or the C bit in the SecTAG is set.
+ */
+ u32 no_sci_pkts[2];
+ /*! The number of received packets with unknown SCI with the condition:
+ * The matched row in the Ingress SC Lookup table has
+ * validate_frames!=Strict and the C bit in the SecTAG is not set.
+ */
+ u32 unknown_sci_pkts[2];
+ /*! The number of received packets by the controlled port service
+ * that passed the Ingress Post-MACSec Packet Classifier table
+ * check.
+ */
+ u32 ctrl_prt_pass_pkts[2];
+ /*! The number of received packets by the uncontrolled port
+ * service that passed the Ingress Post-MACSec Packet Classifier
+ * table check.
+ */
+ u32 unctrl_prt_pass_pkts[2];
+ /*! The number of received packets by the controlled port service
+ * that failed the Ingress Post-MACSec Packet Classifier table
+ * check.
+ */
+ u32 ctrl_prt_fail_pkts[2];
+ /*! The number of received packets by the uncontrolled port
+ * service that failed the Ingress Post-MACSec Packet Classifier
+ * table check.
+ */
+ u32 unctrl_prt_fail_pkts[2];
+ /*! The number of received packets discarded because the packet
+ * length is greater than the ifMtu of the Common Port interface.
+ */
+ u32 too_long_pkts[2];
+ /*! The number of received packets classified as MAC_CTL by the
+ * Ingress Post-MACSec CTL Filter table.
+ */
+ u32 igpoc_ctl_pkts[2];
+ /*! The number of received packets for which table memory was
+ * affected by an ECC error during processing.
+ */
+ u32 ecc_error_pkts[2];
+ /*! The number of received packets by the uncontrolled port
+ * service that were dropped.
+ */
+ u32 unctrl_hit_drop_redir[2];
+};
+
+#endif